3 * sep_main.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2011 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31 * 2011.02.22 Enable kernel crypto operation
33 * Please note that this driver is based on information in the Discretix
34 * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35 * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36 * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37 * Overview and Integration Guide.
40 /* #define SEP_PERF_DEBUG */
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
46 #include <linux/cdev.h>
47 #include <linux/kdev_t.h>
48 #include <linux/mutex.h>
49 #include <linux/sched.h>
51 #include <linux/poll.h>
52 #include <linux/wait.h>
53 #include <linux/pci.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/slab.h>
56 #include <linux/ioctl.h>
57 #include <asm/current.h>
58 #include <linux/ioport.h>
60 #include <linux/interrupt.h>
61 #include <linux/pagemap.h>
62 #include <asm/cacheflush.h>
63 #include <linux/delay.h>
64 #include <linux/jiffies.h>
65 #include <linux/async.h>
66 #include <linux/crypto.h>
67 #include <crypto/internal/hash.h>
68 #include <crypto/scatterwalk.h>
69 #include <crypto/sha.h>
70 #include <crypto/md5.h>
71 #include <crypto/aes.h>
72 #include <crypto/des.h>
73 #include <crypto/hash.h>
75 #include "sep_driver_hw_defs.h"
76 #include "sep_driver_config.h"
77 #include "sep_driver_api.h"
79 #include "sep_crypto.h"
81 #define CREATE_TRACE_POINTS
82 #include "sep_trace_events.h"
85 * Let's not spend cycles iterating over message
86 * area contents if debugging not enabled
89 #define sep_dump_message(sep) _sep_dump_message(sep)
91 #define sep_dump_message(sep)
95 * Currently, there is only one SEP device per platform;
96 * In event platforms in the future have more than one SEP
97 * device, this will be a linked list
100 struct sep_device *sep_dev;
103 * sep_queue_status_remove - Removes transaction from status queue
105 * @sep_queue_info: pointer to status queue
107 * This function will remove information about transaction from the queue.
109 void sep_queue_status_remove(struct sep_device *sep,
110 struct sep_queue_info **queue_elem)
112 unsigned long lck_flags;
114 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
117 if (!queue_elem || !(*queue_elem)) {
118 dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
119 current->pid, __func__);
123 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
124 list_del(&(*queue_elem)->list);
125 sep->sep_queue_num--;
126 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
131 dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
137 * sep_queue_status_add - Adds transaction to status queue
139 * @opcode: transaction opcode
140 * @size: input data size
141 * @pid: pid of current process
142 * @name: current process name
143 * @name_len: length of name (current process)
145 * This function adds information about about transaction started to the status
148 struct sep_queue_info *sep_queue_status_add(
149 struct sep_device *sep,
153 u8 *name, size_t name_len)
155 unsigned long lck_flags;
156 struct sep_queue_info *my_elem = NULL;
158 my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
163 dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165 my_elem->data.opcode = opcode;
166 my_elem->data.size = size;
167 my_elem->data.pid = pid;
169 if (name_len > TASK_COMM_LEN)
170 name_len = TASK_COMM_LEN;
172 memcpy(&my_elem->data.name, name, name_len);
174 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176 list_add_tail(&my_elem->list, &sep->sep_queue_status);
177 sep->sep_queue_num++;
179 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
185 * sep_allocate_dmatables_region - Allocates buf for the MLLI/DMA tables
187 * @dmatables_region: Destination pointer for the buffer
188 * @dma_ctx: DMA context for the transaction
189 * @table_count: Number of MLLI/DMA tables to create
190 * The buffer created will not work as-is for DMA operations,
191 * it needs to be copied over to the appropriate place in the
194 static int sep_allocate_dmatables_region(struct sep_device *sep,
195 void **dmatables_region,
196 struct sep_dma_context *dma_ctx,
197 const u32 table_count)
199 const size_t new_len =
200 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
202 void *tmp_region = NULL;
204 dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
205 current->pid, dma_ctx);
206 dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
207 current->pid, dmatables_region);
209 if (!dma_ctx || !dmatables_region) {
210 dev_warn(&sep->pdev->dev,
211 "[PID%d] dma context/region uninitialized\n",
216 dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
217 current->pid, new_len);
218 dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
219 dma_ctx->dmatables_len);
220 tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
224 /* Were there any previous tables that need to be preserved ? */
225 if (*dmatables_region) {
226 memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
227 kfree(*dmatables_region);
228 *dmatables_region = NULL;
231 *dmatables_region = tmp_region;
233 dma_ctx->dmatables_len += new_len;
239 * sep_wait_transaction - Used for synchronizing transactions
242 int sep_wait_transaction(struct sep_device *sep)
247 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
248 &sep->in_use_flags)) {
249 dev_dbg(&sep->pdev->dev,
250 "[PID%d] no transactions, returning\n",
252 goto end_function_setpid;
256 * Looping needed even for exclusive waitq entries
257 * due to process wakeup latencies, previous process
258 * might have already created another transaction.
262 * Exclusive waitq entry, so that only one process is
263 * woken up from the queue at a time.
265 prepare_to_wait_exclusive(&sep->event_transactions,
268 if (0 == test_and_set_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
269 &sep->in_use_flags)) {
270 dev_dbg(&sep->pdev->dev,
271 "[PID%d] no transactions, breaking\n",
275 dev_dbg(&sep->pdev->dev,
276 "[PID%d] transactions ongoing, sleeping\n",
279 dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
281 if (signal_pending(current)) {
282 dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
290 * The pid_doing_transaction indicates that this process
291 * now owns the facilities to perform a transaction with
292 * the SEP. While this process is performing a transaction,
293 * no other process who has the SEP device open can perform
294 * any transactions. This method allows more than one process
295 * to have the device open at any given time, which provides
296 * finer granularity for device utilization by multiple
299 /* Only one process is able to progress here at a time */
300 sep->pid_doing_transaction = current->pid;
303 finish_wait(&sep->event_transactions, &wait);
309 * sep_check_transaction_owner - Checks if current process owns transaction
312 static inline int sep_check_transaction_owner(struct sep_device *sep)
314 dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
316 sep->pid_doing_transaction);
318 if ((sep->pid_doing_transaction == 0) ||
319 (current->pid != sep->pid_doing_transaction)) {
323 /* We own the transaction */
330 * sep_dump_message - dump the message that is pending
332 * This will only print dump if DEBUG is set; it does
333 * follow kernel debug print enabling
335 static void _sep_dump_message(struct sep_device *sep)
339 u32 *p = sep->shared_addr;
341 for (count = 0; count < 10 * 4; count += 4)
342 dev_dbg(&sep->pdev->dev,
343 "[PID%d] Word %d of the message is %x\n",
344 current->pid, count/4, *p++);
350 * sep_map_and_alloc_shared_area -allocate shared block
351 * @sep: security processor
352 * @size: size of shared area
354 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
356 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
358 &sep->shared_bus, GFP_KERNEL);
360 if (!sep->shared_addr) {
361 dev_dbg(&sep->pdev->dev,
362 "[PID%d] shared memory dma_alloc_coherent failed\n",
366 dev_dbg(&sep->pdev->dev,
367 "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
369 sep->shared_size, sep->shared_addr,
370 (unsigned long long)sep->shared_bus);
375 * sep_unmap_and_free_shared_area - free shared block
376 * @sep: security processor
378 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
380 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
381 sep->shared_addr, sep->shared_bus);
387 * sep_shared_bus_to_virt - convert bus/virt addresses
388 * @sep: pointer to struct sep_device
389 * @bus_address: address to convert
391 * Returns virtual address inside the shared area according
392 * to the bus address.
394 static void *sep_shared_bus_to_virt(struct sep_device *sep,
395 dma_addr_t bus_address)
397 return sep->shared_addr + (bus_address - sep->shared_bus);
403 * sep_open - device open method
404 * @inode: inode of SEP device
405 * @filp: file handle to SEP device
407 * Open method for the SEP device. Called when userspace opens
408 * the SEP device node.
410 * Returns zero on success otherwise an error code.
412 static int sep_open(struct inode *inode, struct file *filp)
414 struct sep_device *sep;
415 struct sep_private_data *priv;
417 dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
419 if (filp->f_flags & O_NONBLOCK)
423 * Get the SEP device structure and use it for the
424 * private_data field in filp for other methods
427 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
433 filp->private_data = priv;
435 dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
438 /* Anyone can open; locking takes place at transaction level */
443 * sep_free_dma_table_data_handler - free DMA table
444 * @sep: pointer to struct sep_device
445 * @dma_ctx: dma context
447 * Handles the request to free DMA table for synchronic actions
449 int sep_free_dma_table_data_handler(struct sep_device *sep,
450 struct sep_dma_context **dma_ctx)
454 /* Pointer to the current dma_resource struct */
455 struct sep_dma_resource *dma;
457 dev_dbg(&sep->pdev->dev,
458 "[PID%d] sep_free_dma_table_data_handler\n",
461 if (!dma_ctx || !(*dma_ctx)) {
462 /* No context or context already freed */
463 dev_dbg(&sep->pdev->dev,
464 "[PID%d] no DMA context or context already freed\n",
470 dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
472 (*dma_ctx)->nr_dcb_creat);
474 for (dcb_counter = 0;
475 dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
476 dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
478 /* Unmap and free input map array */
479 if (dma->in_map_array) {
480 for (count = 0; count < dma->in_num_pages; count++) {
481 dma_unmap_page(&sep->pdev->dev,
482 dma->in_map_array[count].dma_addr,
483 dma->in_map_array[count].size,
486 kfree(dma->in_map_array);
490 * Output is handled different. If
491 * this was a secure dma into restricted memory,
492 * then we skip this step altogether as restricted
493 * memory is not available to the o/s at all.
495 if (!(*dma_ctx)->secure_dma && dma->out_map_array) {
497 for (count = 0; count < dma->out_num_pages; count++) {
498 dma_unmap_page(&sep->pdev->dev,
499 dma->out_map_array[count].dma_addr,
500 dma->out_map_array[count].size,
503 kfree(dma->out_map_array);
506 /* Free page cache for output */
507 if (dma->in_page_array) {
508 for (count = 0; count < dma->in_num_pages; count++) {
509 flush_dcache_page(dma->in_page_array[count]);
510 page_cache_release(dma->in_page_array[count]);
512 kfree(dma->in_page_array);
515 /* Again, we do this only for non secure dma */
516 if (!(*dma_ctx)->secure_dma && dma->out_page_array) {
518 for (count = 0; count < dma->out_num_pages; count++) {
519 if (!PageReserved(dma->out_page_array[count]))
522 out_page_array[count]);
524 flush_dcache_page(dma->out_page_array[count]);
525 page_cache_release(dma->out_page_array[count]);
527 kfree(dma->out_page_array);
531 * Note that here we use in_map_num_entries because we
532 * don't have a page array; the page array is generated
533 * only in the lock_user_pages, which is not called
534 * for kernel crypto, which is what the sg (scatter gather
535 * is used for exclusively)
538 dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
539 dma->in_map_num_entries, DMA_TO_DEVICE);
544 dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
545 dma->in_map_num_entries, DMA_FROM_DEVICE);
549 /* Reset all the values */
550 dma->in_page_array = NULL;
551 dma->out_page_array = NULL;
552 dma->in_num_pages = 0;
553 dma->out_num_pages = 0;
554 dma->in_map_array = NULL;
555 dma->out_map_array = NULL;
556 dma->in_map_num_entries = 0;
557 dma->out_map_num_entries = 0;
560 (*dma_ctx)->nr_dcb_creat = 0;
561 (*dma_ctx)->num_lli_tables_created = 0;
566 dev_dbg(&sep->pdev->dev,
567 "[PID%d] sep_free_dma_table_data_handler end\n",
574 * sep_end_transaction_handler - end transaction
575 * @sep: pointer to struct sep_device
576 * @dma_ctx: DMA context
577 * @call_status: Call status
579 * This API handles the end transaction request.
581 static int sep_end_transaction_handler(struct sep_device *sep,
582 struct sep_dma_context **dma_ctx,
583 struct sep_call_status *call_status,
584 struct sep_queue_info **my_queue_elem)
586 dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
589 * Extraneous transaction clearing would mess up PM
590 * device usage counters and SEP would get suspended
591 * just before we send a command to SEP in the next
594 if (sep_check_transaction_owner(sep)) {
595 dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
600 /* Update queue status */
601 sep_queue_status_remove(sep, my_queue_elem);
603 /* Check that all the DMA resources were freed */
605 sep_free_dma_table_data_handler(sep, dma_ctx);
607 /* Reset call status for next transaction */
609 call_status->status = 0;
611 /* Clear the message area to avoid next transaction reading
612 * sensitive results from previous transaction */
613 memset(sep->shared_addr, 0,
614 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
616 /* start suspend delay */
617 #ifdef SEP_ENABLE_RUNTIME_PM
620 pm_runtime_mark_last_busy(&sep->pdev->dev);
621 pm_runtime_put_autosuspend(&sep->pdev->dev);
625 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
626 sep->pid_doing_transaction = 0;
628 /* Now it's safe for next process to proceed */
629 dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
631 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, &sep->in_use_flags);
632 wake_up(&sep->event_transactions);
639 * sep_release - close a SEP device
640 * @inode: inode of SEP device
641 * @filp: file handle being closed
643 * Called on the final close of a SEP device.
645 static int sep_release(struct inode *inode, struct file *filp)
647 struct sep_private_data * const private_data = filp->private_data;
648 struct sep_call_status *call_status = &private_data->call_status;
649 struct sep_device *sep = private_data->device;
650 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
651 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
653 dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
655 sep_end_transaction_handler(sep, dma_ctx, call_status,
658 kfree(filp->private_data);
664 * sep_mmap - maps the shared area to user space
665 * @filp: pointer to struct file
666 * @vma: pointer to vm_area_struct
668 * Called on an mmap of our space via the normal SEP device
670 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
672 struct sep_private_data * const private_data = filp->private_data;
673 struct sep_call_status *call_status = &private_data->call_status;
674 struct sep_device *sep = private_data->device;
675 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
677 unsigned long error = 0;
679 dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
681 /* Set the transaction busy (own the device) */
683 * Problem for multithreaded applications is that here we're
684 * possibly going to sleep while holding a write lock on
685 * current->mm->mmap_sem, which will cause deadlock for ongoing
686 * transaction trying to create DMA tables
688 error = sep_wait_transaction(sep);
690 /* Interrupted by signal, don't clear transaction */
693 /* Clear the message area to avoid next transaction reading
694 * sensitive results from previous transaction */
695 memset(sep->shared_addr, 0,
696 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
699 * Check that the size of the mapped range is as the size of the message
702 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
704 goto end_function_with_error;
707 dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
708 current->pid, sep->shared_addr);
710 /* Get bus address */
711 bus_addr = sep->shared_bus;
713 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
714 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
715 dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
718 goto end_function_with_error;
721 /* Update call status */
722 set_bit(SEP_LEGACY_MMAP_DONE_OFFSET, &call_status->status);
726 end_function_with_error:
727 /* Clear our transaction */
728 sep_end_transaction_handler(sep, NULL, call_status,
736 * sep_poll - poll handler
737 * @filp: pointer to struct file
738 * @wait: pointer to poll_table
740 * Called by the OS when the kernel is asked to do a poll on
743 static unsigned int sep_poll(struct file *filp, poll_table *wait)
745 struct sep_private_data * const private_data = filp->private_data;
746 struct sep_call_status *call_status = &private_data->call_status;
747 struct sep_device *sep = private_data->device;
751 unsigned long lock_irq_flag;
753 /* Am I the process that owns the transaction? */
754 if (sep_check_transaction_owner(sep)) {
755 dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
761 /* Check if send command or send_reply were activated previously */
762 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
763 &call_status->status)) {
764 dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
771 /* Add the event to the polling wait table */
772 dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
775 poll_wait(filp, &sep->event_interrupt, wait);
777 dev_dbg(&sep->pdev->dev,
778 "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
779 current->pid, sep->send_ct, sep->reply_ct);
781 /* Check if error occurred during poll */
782 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
783 if ((retval2 != 0x0) && (retval2 != 0x8)) {
784 dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
785 current->pid, retval2);
790 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
792 if (sep->send_ct == sep->reply_ct) {
793 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
794 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
795 dev_dbg(&sep->pdev->dev,
796 "[PID%d] poll: data ready check (GPR2) %x\n",
797 current->pid, retval);
799 /* Check if printf request */
800 if ((retval >> 30) & 0x1) {
801 dev_dbg(&sep->pdev->dev,
802 "[PID%d] poll: SEP printf request\n",
807 /* Check if the this is SEP reply or request */
809 dev_dbg(&sep->pdev->dev,
810 "[PID%d] poll: SEP request\n",
813 dev_dbg(&sep->pdev->dev,
814 "[PID%d] poll: normal return\n",
816 sep_dump_message(sep);
817 dev_dbg(&sep->pdev->dev,
818 "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
820 mask |= POLLIN | POLLRDNORM;
822 set_bit(SEP_LEGACY_POLL_DONE_OFFSET, &call_status->status);
824 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
825 dev_dbg(&sep->pdev->dev,
826 "[PID%d] poll; no reply; returning mask of 0\n",
836 * sep_time_address - address in SEP memory of time
837 * @sep: SEP device we want the address from
839 * Return the address of the two dwords in memory used for time
842 static u32 *sep_time_address(struct sep_device *sep)
844 return sep->shared_addr +
845 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
849 * sep_set_time - set the SEP time
850 * @sep: the SEP we are setting the time for
852 * Calculates time and sets it at the predefined address.
853 * Called with the SEP mutex held.
855 static unsigned long sep_set_time(struct sep_device *sep)
858 u32 *time_addr; /* Address of time as seen by the kernel */
861 do_gettimeofday(&time);
863 /* Set value in the SYSTEM MEMORY offset */
864 time_addr = sep_time_address(sep);
866 time_addr[0] = SEP_TIME_VAL_TOKEN;
867 time_addr[1] = time.tv_sec;
869 dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
870 current->pid, time.tv_sec);
871 dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
872 current->pid, time_addr);
873 dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
874 current->pid, sep->shared_addr);
880 * sep_send_command_handler - kick off a command
881 * @sep: SEP being signalled
883 * This function raises interrupt to SEP that signals that is has a new
884 * command from the host
886 * Note that this function does fall under the ioctl lock
888 int sep_send_command_handler(struct sep_device *sep)
890 unsigned long lock_irq_flag;
894 /* Basic sanity check; set msg pool to start of shared area */
895 msg_pool = (u32 *)sep->shared_addr;
898 /* Look for start msg token */
899 if (*msg_pool != SEP_START_MSG_TOKEN) {
900 dev_warn(&sep->pdev->dev, "start message token not present\n");
905 /* Do we have a reasonable size? */
907 if ((*msg_pool < 2) ||
908 (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
910 dev_warn(&sep->pdev->dev, "invalid message size\n");
915 /* Does the command look reasonable? */
918 dev_warn(&sep->pdev->dev, "invalid message opcode\n");
923 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
924 dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
926 sep->pdev->dev.power.runtime_status);
927 sep->in_use = 1; /* device is about to be used */
928 pm_runtime_get_sync(&sep->pdev->dev);
931 if (test_and_set_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags)) {
935 sep->in_use = 1; /* device is about to be used */
938 sep_dump_message(sep);
941 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
943 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
945 dev_dbg(&sep->pdev->dev,
946 "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
947 current->pid, sep->send_ct, sep->reply_ct);
949 /* Send interrupt to SEP */
950 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
958 * @sep: pointer to struct sep_device
959 * @sg: pointer to struct scatterlist
961 * @dma_maps: pointer to place a pointer to array of dma maps
962 * This is filled in; anything previous there will be lost
963 * The structure for dma maps is sep_dma_map
964 * @returns number of dma maps on success; negative on error
966 * This creates the dma table from the scatterlist
967 * It is used only for kernel crypto as it works with scatterlists
968 * representation of data buffers
971 static int sep_crypto_dma(
972 struct sep_device *sep,
973 struct scatterlist *sg,
974 struct sep_dma_map **dma_maps,
975 enum dma_data_direction direction)
977 struct scatterlist *temp_sg;
981 struct sep_dma_map *sep_dma;
987 /* Count the segments */
992 temp_sg = scatterwalk_sg_next(temp_sg);
994 dev_dbg(&sep->pdev->dev,
995 "There are (hex) %x segments in sg\n", count_segment);
997 /* DMA map segments */
998 count_mapped = dma_map_sg(&sep->pdev->dev, sg,
999 count_segment, direction);
1001 dev_dbg(&sep->pdev->dev,
1002 "There are (hex) %x maps in sg\n", count_mapped);
1004 if (count_mapped == 0) {
1005 dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1009 sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1010 count_mapped, GFP_ATOMIC);
1012 if (sep_dma == NULL) {
1013 dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1017 for_each_sg(sg, temp_sg, count_mapped, ct1) {
1018 sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1019 sep_dma[ct1].size = sg_dma_len(temp_sg);
1020 dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1021 ct1, (unsigned long)sep_dma[ct1].dma_addr,
1022 (unsigned long)sep_dma[ct1].size);
1025 *dma_maps = sep_dma;
1026 return count_mapped;
1032 * @sep: pointer to struct sep_device
1033 * @sg: pointer to struct scatterlist
1034 * @data_size: total data size
1036 * @dma_maps: pointer to place a pointer to array of dma maps
1037 * This is filled in; anything previous there will be lost
1038 * The structure for dma maps is sep_dma_map
1039 * @lli_maps: pointer to place a pointer to array of lli maps
1040 * This is filled in; anything previous there will be lost
1041 * The structure for dma maps is sep_dma_map
1042 * @returns number of dma maps on success; negative on error
1044 * This creates the LLI table from the scatterlist
1045 * It is only used for kernel crypto as it works exclusively
1046 * with scatterlists (struct scatterlist) representation of
1049 static int sep_crypto_lli(
1050 struct sep_device *sep,
1051 struct scatterlist *sg,
1052 struct sep_dma_map **maps,
1053 struct sep_lli_entry **llis,
1055 enum dma_data_direction direction)
1059 struct sep_lli_entry *sep_lli;
1060 struct sep_dma_map *sep_map;
1064 nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1065 if (nbr_ents <= 0) {
1066 dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1073 sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1075 if (sep_lli == NULL) {
1076 dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1083 for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1084 sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1086 /* Maximum for page is total data size */
1087 if (sep_map[ct1].size > data_size)
1088 sep_map[ct1].size = data_size;
1090 sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1098 * sep_lock_kernel_pages - map kernel pages for DMA
1099 * @sep: pointer to struct sep_device
1100 * @kernel_virt_addr: address of data buffer in kernel
1101 * @data_size: size of data
1102 * @lli_array_ptr: lli array
1103 * @in_out_flag: input into device or output from device
1105 * This function locks all the physical pages of the kernel virtual buffer
1106 * and construct a basic lli array, where each entry holds the physical
1107 * page address and the size that application data holds in this page
1108 * This function is used only during kernel crypto mod calls from within
1109 * the kernel (when ioctl is not used)
1111 * This is used only for kernel crypto. Kernel pages
1112 * are handled differently as they are done via
1113 * scatter gather lists (struct scatterlist)
1115 static int sep_lock_kernel_pages(struct sep_device *sep,
1116 unsigned long kernel_virt_addr,
1118 struct sep_lli_entry **lli_array_ptr,
1120 struct sep_dma_context *dma_ctx)
1124 struct scatterlist *sg;
1127 struct sep_lli_entry *lli_array;
1129 struct sep_dma_map *map_array;
1131 enum dma_data_direction direction;
1136 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1137 direction = DMA_TO_DEVICE;
1138 sg = dma_ctx->src_sg;
1140 direction = DMA_FROM_DEVICE;
1141 sg = dma_ctx->dst_sg;
1144 num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1145 data_size, direction);
1147 if (num_pages <= 0) {
1148 dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1153 /* Put mapped kernel sg into kernel resource array */
1155 /* Set output params according to the in_out flag */
1156 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1157 *lli_array_ptr = lli_array;
1158 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1160 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1162 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1164 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1166 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1169 *lli_array_ptr = lli_array;
1170 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1172 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1174 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1176 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1177 out_map_num_entries = num_pages;
1178 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1186 * sep_lock_user_pages - lock and map user pages for DMA
1187 * @sep: pointer to struct sep_device
1188 * @app_virt_addr: user memory data buffer
1189 * @data_size: size of data buffer
1190 * @lli_array_ptr: lli array
1191 * @in_out_flag: input or output to device
1193 * This function locks all the physical pages of the application
1194 * virtual buffer and construct a basic lli array, where each entry
1195 * holds the physical page address and the size that application
1196 * data holds in this physical pages
1198 static int sep_lock_user_pages(struct sep_device *sep,
1201 struct sep_lli_entry **lli_array_ptr,
1203 struct sep_dma_context *dma_ctx)
1209 /* The the page of the end address of the user space buffer */
1211 /* The page of the start address of the user space buffer */
1213 /* The range in pages */
1215 /* Array of pointers to page */
1216 struct page **page_array;
1218 struct sep_lli_entry *lli_array;
1220 struct sep_dma_map *map_array;
1222 /* Set start and end pages and num pages */
1223 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1224 start_page = app_virt_addr >> PAGE_SHIFT;
1225 num_pages = end_page - start_page + 1;
1227 dev_dbg(&sep->pdev->dev,
1228 "[PID%d] lock user pages app_virt_addr is %x\n",
1229 current->pid, app_virt_addr);
1231 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1232 current->pid, data_size);
1233 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1234 current->pid, start_page);
1235 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1236 current->pid, end_page);
1237 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1238 current->pid, num_pages);
1240 /* Allocate array of pages structure pointers */
1241 page_array = kmalloc_array(num_pages, sizeof(struct page *),
1248 map_array = kmalloc_array(num_pages, sizeof(struct sep_dma_map),
1252 goto end_function_with_error1;
1255 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1259 goto end_function_with_error2;
1262 /* Convert the application virtual address into a set of physical */
1263 result = get_user_pages_fast(app_virt_addr, num_pages,
1264 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), page_array);
1266 /* Check the number of pages locked - if not all then exit with error */
1267 if (result != num_pages) {
1268 dev_warn(&sep->pdev->dev,
1269 "[PID%d] not all pages locked by get_user_pages, "
1270 "result 0x%X, num_pages 0x%X\n",
1271 current->pid, result, num_pages);
1273 goto end_function_with_error3;
1276 dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1280 * Fill the array using page array data and
1281 * map the pages - this action will also flush the cache as needed
1283 for (count = 0; count < num_pages; count++) {
1284 /* Fill the map array */
1285 map_array[count].dma_addr =
1286 dma_map_page(&sep->pdev->dev, page_array[count],
1287 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
1289 map_array[count].size = PAGE_SIZE;
1291 /* Fill the lli array entry */
1292 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1293 lli_array[count].block_size = PAGE_SIZE;
1295 dev_dbg(&sep->pdev->dev,
1296 "[PID%d] lli_array[%x].bus_address is %08lx, "
1297 "lli_array[%x].block_size is (hex) %x\n", current->pid,
1298 count, (unsigned long)lli_array[count].bus_address,
1299 count, lli_array[count].block_size);
1302 /* Check the offset for the first page */
1303 lli_array[0].bus_address =
1304 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1306 /* Check that not all the data is in the first page only */
1307 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1308 lli_array[0].block_size = data_size;
1310 lli_array[0].block_size =
1311 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1313 dev_dbg(&sep->pdev->dev,
1314 "[PID%d] After check if page 0 has all data\n",
1316 dev_dbg(&sep->pdev->dev,
1317 "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1318 "lli_array[0].block_size is (hex) %x\n",
1320 (unsigned long)lli_array[0].bus_address,
1321 lli_array[0].block_size);
1324 /* Check the size of the last page */
1325 if (num_pages > 1) {
1326 lli_array[num_pages - 1].block_size =
1327 (app_virt_addr + data_size) & (~PAGE_MASK);
1328 if (lli_array[num_pages - 1].block_size == 0)
1329 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1331 dev_dbg(&sep->pdev->dev,
1332 "[PID%d] After last page size adjustment\n",
1334 dev_dbg(&sep->pdev->dev,
1335 "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1336 "lli_array[%x].block_size is (hex) %x\n",
1339 (unsigned long)lli_array[num_pages - 1].bus_address,
1341 lli_array[num_pages - 1].block_size);
1344 /* Set output params according to the in_out flag */
1345 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1346 *lli_array_ptr = lli_array;
1347 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1349 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1351 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1353 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1355 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1357 *lli_array_ptr = lli_array;
1358 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1360 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1362 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1364 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1365 out_map_num_entries = num_pages;
1366 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1370 end_function_with_error3:
1371 /* Free lli array */
1374 end_function_with_error2:
1377 end_function_with_error1:
1378 /* Free page array */
1386 * sep_lli_table_secure_dma - get lli array for IMR addresses
1387 * @sep: pointer to struct sep_device
1388 * @app_virt_addr: user memory data buffer
1389 * @data_size: size of data buffer
1390 * @lli_array_ptr: lli array
1391 * @in_out_flag: not used
1392 * @dma_ctx: pointer to struct sep_dma_context
1394 * This function creates lli tables for outputting data to
1395 * IMR memory, which is memory that cannot be accessed by the
1396 * the x86 processor.
1398 static int sep_lli_table_secure_dma(struct sep_device *sep,
1401 struct sep_lli_entry **lli_array_ptr,
1403 struct sep_dma_context *dma_ctx)
1408 /* The the page of the end address of the user space buffer */
1410 /* The page of the start address of the user space buffer */
1412 /* The range in pages */
1415 struct sep_lli_entry *lli_array;
1417 /* Set start and end pages and num pages */
1418 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1419 start_page = app_virt_addr >> PAGE_SHIFT;
1420 num_pages = end_page - start_page + 1;
1422 dev_dbg(&sep->pdev->dev,
1423 "[PID%d] lock user pages app_virt_addr is %x\n",
1424 current->pid, app_virt_addr);
1426 dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1427 current->pid, data_size);
1428 dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1429 current->pid, start_page);
1430 dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1431 current->pid, end_page);
1432 dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1433 current->pid, num_pages);
1435 lli_array = kmalloc_array(num_pages, sizeof(struct sep_lli_entry),
1441 * Fill the lli_array
1443 start_page = start_page << PAGE_SHIFT;
1444 for (count = 0; count < num_pages; count++) {
1445 /* Fill the lli array entry */
1446 lli_array[count].bus_address = start_page;
1447 lli_array[count].block_size = PAGE_SIZE;
1449 start_page += PAGE_SIZE;
1451 dev_dbg(&sep->pdev->dev,
1452 "[PID%d] lli_array[%x].bus_address is %08lx, "
1453 "lli_array[%x].block_size is (hex) %x\n",
1455 count, (unsigned long)lli_array[count].bus_address,
1456 count, lli_array[count].block_size);
1459 /* Check the offset for the first page */
1460 lli_array[0].bus_address =
1461 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1463 /* Check that not all the data is in the first page only */
1464 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1465 lli_array[0].block_size = data_size;
1467 lli_array[0].block_size =
1468 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1470 dev_dbg(&sep->pdev->dev,
1471 "[PID%d] After check if page 0 has all data\n"
1472 "lli_array[0].bus_address is (hex) %08lx, "
1473 "lli_array[0].block_size is (hex) %x\n",
1475 (unsigned long)lli_array[0].bus_address,
1476 lli_array[0].block_size);
1478 /* Check the size of the last page */
1479 if (num_pages > 1) {
1480 lli_array[num_pages - 1].block_size =
1481 (app_virt_addr + data_size) & (~PAGE_MASK);
1482 if (lli_array[num_pages - 1].block_size == 0)
1483 lli_array[num_pages - 1].block_size = PAGE_SIZE;
1485 dev_dbg(&sep->pdev->dev,
1486 "[PID%d] After last page size adjustment\n"
1487 "lli_array[%x].bus_address is (hex) %08lx, "
1488 "lli_array[%x].block_size is (hex) %x\n",
1489 current->pid, num_pages - 1,
1490 (unsigned long)lli_array[num_pages - 1].bus_address,
1492 lli_array[num_pages - 1].block_size);
1494 *lli_array_ptr = lli_array;
1495 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1496 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1497 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1498 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1504 * sep_calculate_lli_table_max_size - size the LLI table
1505 * @sep: pointer to struct sep_device
1507 * @num_array_entries
1510 * This function calculates the size of data that can be inserted into
1511 * the lli table from this array, such that either the table is full
1512 * (all entries are entered), or there are no more entries in the
1515 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1516 struct sep_lli_entry *lli_in_array_ptr,
1517 u32 num_array_entries,
1518 u32 *last_table_flag)
1521 /* Table data size */
1522 u32 table_data_size = 0;
1523 /* Data size for the next table */
1524 u32 next_table_data_size;
1526 *last_table_flag = 0;
1529 * Calculate the data in the out lli table till we fill the whole
1530 * table or till the data has ended
1533 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1534 (counter < num_array_entries); counter++)
1535 table_data_size += lli_in_array_ptr[counter].block_size;
1538 * Check if we reached the last entry,
1539 * meaning this ia the last table to build,
1540 * and no need to check the block alignment
1542 if (counter == num_array_entries) {
1543 /* Set the last table flag */
1544 *last_table_flag = 1;
1549 * Calculate the data size of the next table.
1550 * Stop if no entries left or if data size is more the DMA restriction
1552 next_table_data_size = 0;
1553 for (; counter < num_array_entries; counter++) {
1554 next_table_data_size += lli_in_array_ptr[counter].block_size;
1555 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1560 * Check if the next table data size is less then DMA rstriction.
1561 * if it is - recalculate the current table size, so that the next
1562 * table data size will be adaquete for DMA
1564 if (next_table_data_size &&
1565 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1567 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1568 next_table_data_size);
1571 return table_data_size;
1575 * sep_build_lli_table - build an lli array for the given table
1576 * @sep: pointer to struct sep_device
1577 * @lli_array_ptr: pointer to lli array
1578 * @lli_table_ptr: pointer to lli table
1579 * @num_processed_entries_ptr: pointer to number of entries
1580 * @num_table_entries_ptr: pointer to number of tables
1581 * @table_data_size: total data size
1583 * Builds an lli table from the lli_array according to
1584 * the given size of data
1586 static void sep_build_lli_table(struct sep_device *sep,
1587 struct sep_lli_entry *lli_array_ptr,
1588 struct sep_lli_entry *lli_table_ptr,
1589 u32 *num_processed_entries_ptr,
1590 u32 *num_table_entries_ptr,
1591 u32 table_data_size)
1593 /* Current table data size */
1594 u32 curr_table_data_size;
1595 /* Counter of lli array entry */
1598 /* Init current table data size and lli array entry counter */
1599 curr_table_data_size = 0;
1601 *num_table_entries_ptr = 1;
1603 dev_dbg(&sep->pdev->dev,
1604 "[PID%d] build lli table table_data_size: (hex) %x\n",
1605 current->pid, table_data_size);
1607 /* Fill the table till table size reaches the needed amount */
1608 while (curr_table_data_size < table_data_size) {
1609 /* Update the number of entries in table */
1610 (*num_table_entries_ptr)++;
1612 lli_table_ptr->bus_address =
1613 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1615 lli_table_ptr->block_size =
1616 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1618 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1620 dev_dbg(&sep->pdev->dev,
1621 "[PID%d] lli_table_ptr is %p\n",
1622 current->pid, lli_table_ptr);
1623 dev_dbg(&sep->pdev->dev,
1624 "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1626 (unsigned long)lli_table_ptr->bus_address);
1628 dev_dbg(&sep->pdev->dev,
1629 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1630 current->pid, lli_table_ptr->block_size);
1632 /* Check for overflow of the table data */
1633 if (curr_table_data_size > table_data_size) {
1634 dev_dbg(&sep->pdev->dev,
1635 "[PID%d] curr_table_data_size too large\n",
1638 /* Update the size of block in the table */
1639 lli_table_ptr->block_size =
1640 cpu_to_le32(lli_table_ptr->block_size) -
1641 (curr_table_data_size - table_data_size);
1643 /* Update the physical address in the lli array */
1644 lli_array_ptr[array_counter].bus_address +=
1645 cpu_to_le32(lli_table_ptr->block_size);
1647 /* Update the block size left in the lli array */
1648 lli_array_ptr[array_counter].block_size =
1649 (curr_table_data_size - table_data_size);
1651 /* Advance to the next entry in the lli_array */
1654 dev_dbg(&sep->pdev->dev,
1655 "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1657 (unsigned long)lli_table_ptr->bus_address);
1658 dev_dbg(&sep->pdev->dev,
1659 "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1661 lli_table_ptr->block_size);
1663 /* Move to the next entry in table */
1667 /* Set the info entry to default */
1668 lli_table_ptr->bus_address = 0xffffffff;
1669 lli_table_ptr->block_size = 0;
1671 /* Set the output parameter */
1672 *num_processed_entries_ptr += array_counter;
1677 * sep_shared_area_virt_to_bus - map shared area to bus address
1678 * @sep: pointer to struct sep_device
1679 * @virt_address: virtual address to convert
1681 * This functions returns the physical address inside shared area according
1682 * to the virtual address. It can be either on the external RAM device
1683 * (ioremapped), or on the system RAM
1684 * This implementation is for the external RAM
1686 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1689 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1690 current->pid, virt_address);
1691 dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1694 sep->shared_bus + (virt_address - sep->shared_addr));
1696 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1700 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1701 * @sep: pointer to struct sep_device
1702 * @bus_address: bus address to convert
1704 * This functions returns the virtual address inside shared area
1705 * according to the physical address. It can be either on the
1706 * external RAM device (ioremapped), or on the system RAM
1707 * This implementation is for the external RAM
1709 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1710 dma_addr_t bus_address)
1712 dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1714 (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1715 (size_t)(bus_address - sep->shared_bus)));
1717 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1721 * sep_debug_print_lli_tables - dump LLI table
1722 * @sep: pointer to struct sep_device
1723 * @lli_table_ptr: pointer to sep_lli_entry
1724 * @num_table_entries: number of entries
1725 * @table_data_size: total data size
1727 * Walk the the list of the print created tables and print all the data
1729 static void sep_debug_print_lli_tables(struct sep_device *sep,
1730 struct sep_lli_entry *lli_table_ptr,
1731 unsigned long num_table_entries,
1732 unsigned long table_data_size)
1735 unsigned long table_count = 1;
1736 unsigned long entries_count = 0;
1738 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1740 if (num_table_entries == 0) {
1741 dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1746 while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1747 dev_dbg(&sep->pdev->dev,
1748 "[PID%d] lli table %08lx, "
1749 "table_data_size is (hex) %lx\n",
1750 current->pid, table_count, table_data_size);
1751 dev_dbg(&sep->pdev->dev,
1752 "[PID%d] num_table_entries is (hex) %lx\n",
1753 current->pid, num_table_entries);
1755 /* Print entries of the table (without info entry) */
1756 for (entries_count = 0; entries_count < num_table_entries;
1757 entries_count++, lli_table_ptr++) {
1759 dev_dbg(&sep->pdev->dev,
1760 "[PID%d] lli_table_ptr address is %08lx\n",
1762 (unsigned long) lli_table_ptr);
1764 dev_dbg(&sep->pdev->dev,
1765 "[PID%d] phys address is %08lx "
1766 "block size is (hex) %x\n", current->pid,
1767 (unsigned long)lli_table_ptr->bus_address,
1768 lli_table_ptr->block_size);
1771 /* Point to the info entry */
1774 dev_dbg(&sep->pdev->dev,
1775 "[PID%d] phys lli_table_ptr->block_size "
1778 lli_table_ptr->block_size);
1780 dev_dbg(&sep->pdev->dev,
1781 "[PID%d] phys lli_table_ptr->physical_address "
1784 (unsigned long)lli_table_ptr->bus_address);
1787 table_data_size = lli_table_ptr->block_size & 0xffffff;
1788 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1790 dev_dbg(&sep->pdev->dev,
1791 "[PID%d] phys table_data_size is "
1792 "(hex) %lx num_table_entries is"
1793 " %lx bus_address is%lx\n",
1797 (unsigned long)lli_table_ptr->bus_address);
1799 if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1800 lli_table_ptr = (struct sep_lli_entry *)
1801 sep_shared_bus_to_virt(sep,
1802 (unsigned long)lli_table_ptr->bus_address);
1806 dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1813 * sep_prepare_empty_lli_table - create a blank LLI table
1814 * @sep: pointer to struct sep_device
1815 * @lli_table_addr_ptr: pointer to lli table
1816 * @num_entries_ptr: pointer to number of entries
1817 * @table_data_size_ptr: point to table data size
1818 * @dmatables_region: Optional buffer for DMA tables
1819 * @dma_ctx: DMA context
1821 * This function creates empty lli tables when there is no data
1823 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1824 dma_addr_t *lli_table_addr_ptr,
1825 u32 *num_entries_ptr,
1826 u32 *table_data_size_ptr,
1827 void **dmatables_region,
1828 struct sep_dma_context *dma_ctx)
1830 struct sep_lli_entry *lli_table_ptr;
1832 /* Find the area for new table */
1834 (struct sep_lli_entry *)(sep->shared_addr +
1835 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1836 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1837 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1839 if (dmatables_region && *dmatables_region)
1840 lli_table_ptr = *dmatables_region;
1842 lli_table_ptr->bus_address = 0;
1843 lli_table_ptr->block_size = 0;
1846 lli_table_ptr->bus_address = 0xFFFFFFFF;
1847 lli_table_ptr->block_size = 0;
1849 /* Set the output parameter value */
1850 *lli_table_addr_ptr = sep->shared_bus +
1851 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1852 dma_ctx->num_lli_tables_created *
1853 sizeof(struct sep_lli_entry) *
1854 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1856 /* Set the num of entries and table data size for empty table */
1857 *num_entries_ptr = 2;
1858 *table_data_size_ptr = 0;
1860 /* Update the number of created tables */
1861 dma_ctx->num_lli_tables_created++;
1865 * sep_prepare_input_dma_table - prepare input DMA mappings
1866 * @sep: pointer to struct sep_device
1871 * @table_data_size_ptr:
1872 * @is_kva: set for kernel data (kernel crypt io call)
1874 * This function prepares only input DMA table for synchronic symmetric
1876 * Note that all bus addresses that are passed to the SEP
1877 * are in 32 bit format; the SEP is a 32 bit device
1879 static int sep_prepare_input_dma_table(struct sep_device *sep,
1880 unsigned long app_virt_addr,
1883 dma_addr_t *lli_table_ptr,
1884 u32 *num_entries_ptr,
1885 u32 *table_data_size_ptr,
1887 void **dmatables_region,
1888 struct sep_dma_context *dma_ctx
1892 /* Pointer to the info entry of the table - the last entry */
1893 struct sep_lli_entry *info_entry_ptr;
1894 /* Array of pointers to page */
1895 struct sep_lli_entry *lli_array_ptr;
1896 /* Points to the first entry to be processed in the lli_in_array */
1897 u32 current_entry = 0;
1898 /* Num entries in the virtual buffer */
1899 u32 sep_lli_entries = 0;
1900 /* Lli table pointer */
1901 struct sep_lli_entry *in_lli_table_ptr;
1902 /* The total data in one table */
1903 u32 table_data_size = 0;
1904 /* Flag for last table */
1905 u32 last_table_flag = 0;
1906 /* Number of entries in lli table */
1907 u32 num_entries_in_table = 0;
1908 /* Next table address */
1909 void *lli_table_alloc_addr = NULL;
1910 void *dma_lli_table_alloc_addr = NULL;
1911 void *dma_in_lli_table_ptr = NULL;
1913 dev_dbg(&sep->pdev->dev,
1914 "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1915 current->pid, data_size);
1917 dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1918 current->pid, block_size);
1920 /* Initialize the pages pointers */
1921 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1922 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1924 /* Set the kernel address for first table to be allocated */
1925 lli_table_alloc_addr = (void *)(sep->shared_addr +
1926 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1927 dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1928 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1930 if (data_size == 0) {
1931 if (dmatables_region) {
1932 error = sep_allocate_dmatables_region(sep,
1939 /* Special case - create meptu table - 2 entries, zero data */
1940 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1941 num_entries_ptr, table_data_size_ptr,
1942 dmatables_region, dma_ctx);
1943 goto update_dcb_counter;
1946 /* Check if the pages are in Kernel Virtual Address layout */
1948 error = sep_lock_kernel_pages(sep, app_virt_addr,
1949 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1953 * Lock the pages of the user buffer
1954 * and translate them to pages
1956 error = sep_lock_user_pages(sep, app_virt_addr,
1957 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1963 dev_dbg(&sep->pdev->dev,
1964 "[PID%d] output sep_in_num_pages is (hex) %x\n",
1966 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1969 info_entry_ptr = NULL;
1972 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1974 dma_lli_table_alloc_addr = lli_table_alloc_addr;
1975 if (dmatables_region) {
1976 error = sep_allocate_dmatables_region(sep,
1981 goto end_function_error;
1982 lli_table_alloc_addr = *dmatables_region;
1985 /* Loop till all the entries in in array are processed */
1986 while (current_entry < sep_lli_entries) {
1988 /* Set the new input and output tables */
1990 (struct sep_lli_entry *)lli_table_alloc_addr;
1991 dma_in_lli_table_ptr =
1992 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
1994 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1995 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1996 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1997 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1999 if (dma_lli_table_alloc_addr >
2000 ((void *)sep->shared_addr +
2001 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2002 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2005 goto end_function_error;
2009 /* Update the number of created tables */
2010 dma_ctx->num_lli_tables_created++;
2012 /* Calculate the maximum size of data for input table */
2013 table_data_size = sep_calculate_lli_table_max_size(sep,
2014 &lli_array_ptr[current_entry],
2015 (sep_lli_entries - current_entry),
2019 * If this is not the last table -
2020 * then align it to the block size
2022 if (!last_table_flag)
2024 (table_data_size / block_size) * block_size;
2026 dev_dbg(&sep->pdev->dev,
2027 "[PID%d] output table_data_size is (hex) %x\n",
2031 /* Construct input lli table */
2032 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2034 ¤t_entry, &num_entries_in_table, table_data_size);
2036 if (info_entry_ptr == NULL) {
2038 /* Set the output parameters to physical addresses */
2039 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2040 dma_in_lli_table_ptr);
2041 *num_entries_ptr = num_entries_in_table;
2042 *table_data_size_ptr = table_data_size;
2044 dev_dbg(&sep->pdev->dev,
2045 "[PID%d] output lli_table_in_ptr is %08lx\n",
2047 (unsigned long)*lli_table_ptr);
2050 /* Update the info entry of the previous in table */
2051 info_entry_ptr->bus_address =
2052 sep_shared_area_virt_to_bus(sep,
2053 dma_in_lli_table_ptr);
2054 info_entry_ptr->block_size =
2055 ((num_entries_in_table) << 24) |
2058 /* Save the pointer to the info entry of the current tables */
2059 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2061 /* Print input tables */
2062 if (!dmatables_region) {
2063 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2064 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2065 *num_entries_ptr, *table_data_size_ptr);
2068 /* The array of the pages */
2069 kfree(lli_array_ptr);
2072 /* Update DCB counter */
2073 dma_ctx->nr_dcb_creat++;
2077 /* Free all the allocated resources */
2078 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2079 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2080 kfree(lli_array_ptr);
2081 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2082 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2090 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
2091 * @sep: pointer to struct sep_device
2093 * @sep_in_lli_entries:
2095 * @sep_out_lli_entries
2098 * @lli_table_out_ptr
2099 * @in_num_entries_ptr
2100 * @out_num_entries_ptr
2101 * @table_data_size_ptr
2103 * This function creates the input and output DMA tables for
2104 * symmetric operations (AES/DES) according to the block
2105 * size from LLI arays
2106 * Note that all bus addresses that are passed to the SEP
2107 * are in 32 bit format; the SEP is a 32 bit device
2109 static int sep_construct_dma_tables_from_lli(
2110 struct sep_device *sep,
2111 struct sep_lli_entry *lli_in_array,
2112 u32 sep_in_lli_entries,
2113 struct sep_lli_entry *lli_out_array,
2114 u32 sep_out_lli_entries,
2116 dma_addr_t *lli_table_in_ptr,
2117 dma_addr_t *lli_table_out_ptr,
2118 u32 *in_num_entries_ptr,
2119 u32 *out_num_entries_ptr,
2120 u32 *table_data_size_ptr,
2121 void **dmatables_region,
2122 struct sep_dma_context *dma_ctx)
2124 /* Points to the area where next lli table can be allocated */
2125 void *lli_table_alloc_addr = NULL;
2127 * Points to the area in shared region where next lli table
2130 void *dma_lli_table_alloc_addr = NULL;
2131 /* Input lli table in dmatables_region or shared region */
2132 struct sep_lli_entry *in_lli_table_ptr = NULL;
2133 /* Input lli table location in the shared region */
2134 struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2135 /* Output lli table in dmatables_region or shared region */
2136 struct sep_lli_entry *out_lli_table_ptr = NULL;
2137 /* Output lli table location in the shared region */
2138 struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2139 /* Pointer to the info entry of the table - the last entry */
2140 struct sep_lli_entry *info_in_entry_ptr = NULL;
2141 /* Pointer to the info entry of the table - the last entry */
2142 struct sep_lli_entry *info_out_entry_ptr = NULL;
2143 /* Points to the first entry to be processed in the lli_in_array */
2144 u32 current_in_entry = 0;
2145 /* Points to the first entry to be processed in the lli_out_array */
2146 u32 current_out_entry = 0;
2147 /* Max size of the input table */
2148 u32 in_table_data_size = 0;
2149 /* Max size of the output table */
2150 u32 out_table_data_size = 0;
2151 /* Flag te signifies if this is the last tables build */
2152 u32 last_table_flag = 0;
2153 /* The data size that should be in table */
2154 u32 table_data_size = 0;
2155 /* Number of entries in the input table */
2156 u32 num_entries_in_table = 0;
2157 /* Number of entries in the output table */
2158 u32 num_entries_out_table = 0;
2161 dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2165 /* Initiate to point after the message area */
2166 lli_table_alloc_addr = (void *)(sep->shared_addr +
2167 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2168 (dma_ctx->num_lli_tables_created *
2169 (sizeof(struct sep_lli_entry) *
2170 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2171 dma_lli_table_alloc_addr = lli_table_alloc_addr;
2173 if (dmatables_region) {
2174 /* 2 for both in+out table */
2175 if (sep_allocate_dmatables_region(sep,
2178 2*sep_in_lli_entries))
2180 lli_table_alloc_addr = *dmatables_region;
2183 /* Loop till all the entries in in array are not processed */
2184 while (current_in_entry < sep_in_lli_entries) {
2185 /* Set the new input and output tables */
2187 (struct sep_lli_entry *)lli_table_alloc_addr;
2188 dma_in_lli_table_ptr =
2189 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2191 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2192 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2193 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2194 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2196 /* Set the first output tables */
2198 (struct sep_lli_entry *)lli_table_alloc_addr;
2199 dma_out_lli_table_ptr =
2200 (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2202 /* Check if the DMA table area limit was overrun */
2203 if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2204 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
2205 ((void *)sep->shared_addr +
2206 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
2207 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
2209 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2213 /* Update the number of the lli tables created */
2214 dma_ctx->num_lli_tables_created += 2;
2216 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2217 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2218 dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2219 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
2221 /* Calculate the maximum size of data for input table */
2222 in_table_data_size =
2223 sep_calculate_lli_table_max_size(sep,
2224 &lli_in_array[current_in_entry],
2225 (sep_in_lli_entries - current_in_entry),
2228 /* Calculate the maximum size of data for output table */
2229 out_table_data_size =
2230 sep_calculate_lli_table_max_size(sep,
2231 &lli_out_array[current_out_entry],
2232 (sep_out_lli_entries - current_out_entry),
2235 if (!last_table_flag) {
2236 in_table_data_size = (in_table_data_size /
2237 block_size) * block_size;
2238 out_table_data_size = (out_table_data_size /
2239 block_size) * block_size;
2242 table_data_size = in_table_data_size;
2243 if (table_data_size > out_table_data_size)
2244 table_data_size = out_table_data_size;
2246 dev_dbg(&sep->pdev->dev,
2247 "[PID%d] construct tables from lli"
2248 " in_table_data_size is (hex) %x\n", current->pid,
2249 in_table_data_size);
2251 dev_dbg(&sep->pdev->dev,
2252 "[PID%d] construct tables from lli"
2253 "out_table_data_size is (hex) %x\n", current->pid,
2254 out_table_data_size);
2256 /* Construct input lli table */
2257 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2260 &num_entries_in_table,
2263 /* Construct output lli table */
2264 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2267 &num_entries_out_table,
2270 /* If info entry is null - this is the first table built */
2271 if (info_in_entry_ptr == NULL || info_out_entry_ptr == NULL) {
2272 /* Set the output parameters to physical addresses */
2274 sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2276 *in_num_entries_ptr = num_entries_in_table;
2278 *lli_table_out_ptr =
2279 sep_shared_area_virt_to_bus(sep,
2280 dma_out_lli_table_ptr);
2282 *out_num_entries_ptr = num_entries_out_table;
2283 *table_data_size_ptr = table_data_size;
2285 dev_dbg(&sep->pdev->dev,
2286 "[PID%d] output lli_table_in_ptr is %08lx\n",
2288 (unsigned long)*lli_table_in_ptr);
2289 dev_dbg(&sep->pdev->dev,
2290 "[PID%d] output lli_table_out_ptr is %08lx\n",
2292 (unsigned long)*lli_table_out_ptr);
2294 /* Update the info entry of the previous in table */
2295 info_in_entry_ptr->bus_address =
2296 sep_shared_area_virt_to_bus(sep,
2297 dma_in_lli_table_ptr);
2299 info_in_entry_ptr->block_size =
2300 ((num_entries_in_table) << 24) |
2303 /* Update the info entry of the previous in table */
2304 info_out_entry_ptr->bus_address =
2305 sep_shared_area_virt_to_bus(sep,
2306 dma_out_lli_table_ptr);
2308 info_out_entry_ptr->block_size =
2309 ((num_entries_out_table) << 24) |
2312 dev_dbg(&sep->pdev->dev,
2313 "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2315 (unsigned long)info_in_entry_ptr->bus_address,
2316 info_in_entry_ptr->block_size);
2318 dev_dbg(&sep->pdev->dev,
2319 "[PID%d] output lli_table_out_ptr:"
2322 (unsigned long)info_out_entry_ptr->bus_address,
2323 info_out_entry_ptr->block_size);
2326 /* Save the pointer to the info entry of the current tables */
2327 info_in_entry_ptr = in_lli_table_ptr +
2328 num_entries_in_table - 1;
2329 info_out_entry_ptr = out_lli_table_ptr +
2330 num_entries_out_table - 1;
2332 dev_dbg(&sep->pdev->dev,
2333 "[PID%d] output num_entries_out_table is %x\n",
2335 (u32)num_entries_out_table);
2336 dev_dbg(&sep->pdev->dev,
2337 "[PID%d] output info_in_entry_ptr is %lx\n",
2339 (unsigned long)info_in_entry_ptr);
2340 dev_dbg(&sep->pdev->dev,
2341 "[PID%d] output info_out_entry_ptr is %lx\n",
2343 (unsigned long)info_out_entry_ptr);
2346 /* Print input tables */
2347 if (!dmatables_region) {
2348 sep_debug_print_lli_tables(
2350 (struct sep_lli_entry *)
2351 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2352 *in_num_entries_ptr,
2353 *table_data_size_ptr);
2356 /* Print output tables */
2357 if (!dmatables_region) {
2358 sep_debug_print_lli_tables(
2360 (struct sep_lli_entry *)
2361 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2362 *out_num_entries_ptr,
2363 *table_data_size_ptr);
2370 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2371 * @app_virt_in_addr:
2372 * @app_virt_out_addr:
2375 * @lli_table_in_ptr:
2376 * @lli_table_out_ptr:
2377 * @in_num_entries_ptr:
2378 * @out_num_entries_ptr:
2379 * @table_data_size_ptr:
2380 * @is_kva: set for kernel data; used only for kernel crypto module
2382 * This function builds input and output DMA tables for synchronic
2383 * symmetric operations (AES, DES, HASH). It also checks that each table
2384 * is of the modular block size
2385 * Note that all bus addresses that are passed to the SEP
2386 * are in 32 bit format; the SEP is a 32 bit device
2388 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2389 unsigned long app_virt_in_addr,
2390 unsigned long app_virt_out_addr,
2393 dma_addr_t *lli_table_in_ptr,
2394 dma_addr_t *lli_table_out_ptr,
2395 u32 *in_num_entries_ptr,
2396 u32 *out_num_entries_ptr,
2397 u32 *table_data_size_ptr,
2399 void **dmatables_region,
2400 struct sep_dma_context *dma_ctx)
2404 /* Array of pointers of page */
2405 struct sep_lli_entry *lli_in_array;
2406 /* Array of pointers of page */
2407 struct sep_lli_entry *lli_out_array;
2414 if (data_size == 0) {
2415 /* Prepare empty table for input and output */
2416 if (dmatables_region) {
2417 error = sep_allocate_dmatables_region(
2425 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2426 in_num_entries_ptr, table_data_size_ptr,
2427 dmatables_region, dma_ctx);
2429 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2430 out_num_entries_ptr, table_data_size_ptr,
2431 dmatables_region, dma_ctx);
2433 goto update_dcb_counter;
2436 /* Initialize the pages pointers */
2437 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2438 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2440 /* Lock the pages of the buffer and translate them to pages */
2442 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2444 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2445 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2448 dev_warn(&sep->pdev->dev,
2449 "[PID%d] sep_lock_kernel_pages for input "
2450 "virtual buffer failed\n", current->pid);
2455 dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2457 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2458 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2462 dev_warn(&sep->pdev->dev,
2463 "[PID%d] sep_lock_kernel_pages for output "
2464 "virtual buffer failed\n", current->pid);
2466 goto end_function_free_lli_in;
2472 dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2474 error = sep_lock_user_pages(sep, app_virt_in_addr,
2475 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2478 dev_warn(&sep->pdev->dev,
2479 "[PID%d] sep_lock_user_pages for input "
2480 "virtual buffer failed\n", current->pid);
2485 if (dma_ctx->secure_dma) {
2486 /* secure_dma requires use of non accessible memory */
2487 dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2489 error = sep_lli_table_secure_dma(sep,
2490 app_virt_out_addr, data_size, &lli_out_array,
2491 SEP_DRIVER_OUT_FLAG, dma_ctx);
2493 dev_warn(&sep->pdev->dev,
2494 "[PID%d] secure dma table setup "
2495 " for output virtual buffer failed\n",
2498 goto end_function_free_lli_in;
2501 /* For normal, non-secure dma */
2502 dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2505 dev_dbg(&sep->pdev->dev,
2506 "[PID%d] Locking user output pages\n",
2509 error = sep_lock_user_pages(sep, app_virt_out_addr,
2510 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2514 dev_warn(&sep->pdev->dev,
2515 "[PID%d] sep_lock_user_pages"
2516 " for output virtual buffer failed\n",
2519 goto end_function_free_lli_in;
2524 dev_dbg(&sep->pdev->dev,
2525 "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2527 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2529 dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2531 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2533 dev_dbg(&sep->pdev->dev,
2534 "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2535 current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2537 /* Call the function that creates table from the lli arrays */
2538 dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2540 error = sep_construct_dma_tables_from_lli(
2542 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2545 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2547 block_size, lli_table_in_ptr, lli_table_out_ptr,
2548 in_num_entries_ptr, out_num_entries_ptr,
2549 table_data_size_ptr, dmatables_region, dma_ctx);
2552 dev_warn(&sep->pdev->dev,
2553 "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2555 goto end_function_with_error;
2558 kfree(lli_out_array);
2559 kfree(lli_in_array);
2562 /* Update DCB counter */
2563 dma_ctx->nr_dcb_creat++;
2567 end_function_with_error:
2568 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2569 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2570 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2571 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2572 kfree(lli_out_array);
2575 end_function_free_lli_in:
2576 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2577 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2578 kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2579 dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2580 kfree(lli_in_array);
2589 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2590 * @app_in_address: unsigned long; for data buffer in (user space)
2591 * @app_out_address: unsigned long; for data buffer out (user space)
2592 * @data_in_size: u32; for size of data
2593 * @block_size: u32; for block size
2594 * @tail_block_size: u32; for size of tail block
2595 * @isapplet: bool; to indicate external app
2596 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2597 * @secure_dma; indicates whether this is secure_dma using IMR
2599 * This function prepares the linked DMA tables and puts the
2600 * address for the linked list of tables inta a DCB (data control
2601 * block) the address of which is known by the SEP hardware
2602 * Note that all bus addresses that are passed to the SEP
2603 * are in 32 bit format; the SEP is a 32 bit device
2605 int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2606 unsigned long app_in_address,
2607 unsigned long app_out_address,
2610 u32 tail_block_size,
2614 struct sep_dcblock *dcb_region,
2615 void **dmatables_region,
2616 struct sep_dma_context **dma_ctx,
2617 struct scatterlist *src_sg,
2618 struct scatterlist *dst_sg)
2623 /* Address of the created DCB table */
2624 struct sep_dcblock *dcb_table_ptr = NULL;
2625 /* The physical address of the first input DMA table */
2626 dma_addr_t in_first_mlli_address = 0;
2627 /* Number of entries in the first input DMA table */
2628 u32 in_first_num_entries = 0;
2629 /* The physical address of the first output DMA table */
2630 dma_addr_t out_first_mlli_address = 0;
2631 /* Number of entries in the first output DMA table */
2632 u32 out_first_num_entries = 0;
2633 /* Data in the first input/output table */
2634 u32 first_data_size = 0;
2636 dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2637 current->pid, app_in_address);
2639 dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2640 current->pid, app_out_address);
2642 dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2643 current->pid, data_in_size);
2645 dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2646 current->pid, block_size);
2648 dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2649 current->pid, tail_block_size);
2651 dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2652 current->pid, isapplet);
2654 dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2655 current->pid, is_kva);
2657 dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2658 current->pid, src_sg);
2660 dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2661 current->pid, dst_sg);
2664 dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2671 /* In case there are multiple DCBs for this transaction */
2672 dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2675 *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2677 dev_dbg(&sep->pdev->dev,
2678 "[PID%d] Not enough memory for DMA context\n",
2683 dev_dbg(&sep->pdev->dev,
2684 "[PID%d] Created DMA context addr at 0x%p\n",
2685 current->pid, *dma_ctx);
2688 (*dma_ctx)->secure_dma = secure_dma;
2690 /* these are for kernel crypto only */
2691 (*dma_ctx)->src_sg = src_sg;
2692 (*dma_ctx)->dst_sg = dst_sg;
2694 if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2695 /* No more DCBs to allocate */
2696 dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2699 goto end_function_error;
2702 /* Allocate new DCB */
2704 dcb_table_ptr = dcb_region;
2706 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2707 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2708 ((*dma_ctx)->nr_dcb_creat *
2709 sizeof(struct sep_dcblock)));
2712 /* Set the default values in the DCB */
2713 dcb_table_ptr->input_mlli_address = 0;
2714 dcb_table_ptr->input_mlli_num_entries = 0;
2715 dcb_table_ptr->input_mlli_data_size = 0;
2716 dcb_table_ptr->output_mlli_address = 0;
2717 dcb_table_ptr->output_mlli_num_entries = 0;
2718 dcb_table_ptr->output_mlli_data_size = 0;
2719 dcb_table_ptr->tail_data_size = 0;
2720 dcb_table_ptr->out_vr_tail_pt = 0;
2724 /* Check if there is enough data for DMA operation */
2725 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2728 goto end_function_error;
2730 if (copy_from_user(dcb_table_ptr->tail_data,
2731 (void __user *)app_in_address,
2734 goto end_function_error;
2738 dcb_table_ptr->tail_data_size = data_in_size;
2740 /* Set the output user-space address for mem2mem op */
2741 if (app_out_address)
2742 dcb_table_ptr->out_vr_tail_pt =
2743 (aligned_u64)app_out_address;
2746 * Update both data length parameters in order to avoid
2747 * second data copy and allow building of empty mlli
2754 if (!app_out_address) {
2755 tail_size = data_in_size % block_size;
2757 if (tail_block_size == block_size)
2758 tail_size = block_size;
2765 if (tail_size > sizeof(dcb_table_ptr->tail_data))
2769 goto end_function_error;
2771 /* We have tail data - copy it to DCB */
2772 if (copy_from_user(dcb_table_ptr->tail_data,
2773 (void __user *)(app_in_address +
2774 data_in_size - tail_size), tail_size)) {
2776 goto end_function_error;
2779 if (app_out_address)
2781 * Calculate the output address
2782 * according to tail data size
2784 dcb_table_ptr->out_vr_tail_pt =
2785 (aligned_u64)app_out_address +
2786 data_in_size - tail_size;
2788 /* Save the real tail data size */
2789 dcb_table_ptr->tail_data_size = tail_size;
2791 * Update the data size without the tail
2792 * data size AKA data for the dma
2794 data_in_size = (data_in_size - tail_size);
2797 /* Check if we need to build only input table or input/output */
2798 if (app_out_address) {
2799 /* Prepare input/output tables */
2800 error = sep_prepare_input_output_dma_table(sep,
2805 &in_first_mlli_address,
2806 &out_first_mlli_address,
2807 &in_first_num_entries,
2808 &out_first_num_entries,
2814 /* Prepare input tables */
2815 error = sep_prepare_input_dma_table(sep,
2819 &in_first_mlli_address,
2820 &in_first_num_entries,
2828 dev_warn(&sep->pdev->dev,
2829 "prepare DMA table call failed "
2830 "from prepare DCB call\n");
2831 goto end_function_error;
2834 /* Set the DCB values */
2835 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2836 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2837 dcb_table_ptr->input_mlli_data_size = first_data_size;
2838 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2839 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2840 dcb_table_ptr->output_mlli_data_size = first_data_size;
2855 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2856 * @sep: pointer to struct sep_device
2857 * @isapplet: indicates external application (used for kernel access)
2858 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2860 * This function frees the DMA tables and DCB
2862 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2863 bool is_kva, struct sep_dma_context **dma_ctx)
2865 struct sep_dcblock *dcb_table_ptr;
2866 unsigned long pt_hold;
2873 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2875 if (!dma_ctx || !*dma_ctx) /* nothing to be done here*/
2878 if (!(*dma_ctx)->secure_dma && isapplet) {
2879 dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2882 /* Tail stuff is only for non secure_dma */
2883 /* Set pointer to first DCB table */
2884 dcb_table_ptr = (struct sep_dcblock *)
2886 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2889 * Go over each DCB and see if
2890 * tail pointer must be updated
2892 for (i = 0; i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2893 if (dcb_table_ptr->out_vr_tail_pt) {
2894 pt_hold = (unsigned long)dcb_table_ptr->
2896 tail_pt = (void *)pt_hold;
2901 error_temp = copy_to_user(
2902 (void __user *)tail_pt,
2903 dcb_table_ptr->tail_data,
2904 dcb_table_ptr->tail_data_size);
2907 /* Release the DMA resource */
2915 /* Free the output pages, if any */
2916 sep_free_dma_table_data_handler(sep, dma_ctx);
2918 dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2925 * sep_prepare_dcb_handler - prepare a control block
2926 * @sep: pointer to struct sep_device
2927 * @arg: pointer to user parameters
2928 * @secure_dma: indicate whether we are using secure_dma on IMR
2930 * This function will retrieve the RAR buffer physical addresses, type
2931 * & size corresponding to the RAR handles provided in the buffers vector.
2933 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2935 struct sep_dma_context **dma_ctx)
2938 /* Command arguments */
2939 static struct build_dcb_struct command_args;
2941 /* Get the command arguments */
2942 if (copy_from_user(&command_args, (void __user *)arg,
2943 sizeof(struct build_dcb_struct))) {
2948 dev_dbg(&sep->pdev->dev,
2949 "[PID%d] prep dcb handler app_in_address is %08llx\n",
2950 current->pid, command_args.app_in_address);
2951 dev_dbg(&sep->pdev->dev,
2952 "[PID%d] app_out_address is %08llx\n",
2953 current->pid, command_args.app_out_address);
2954 dev_dbg(&sep->pdev->dev,
2955 "[PID%d] data_size is %x\n",
2956 current->pid, command_args.data_in_size);
2957 dev_dbg(&sep->pdev->dev,
2958 "[PID%d] block_size is %x\n",
2959 current->pid, command_args.block_size);
2960 dev_dbg(&sep->pdev->dev,
2961 "[PID%d] tail block_size is %x\n",
2962 current->pid, command_args.tail_block_size);
2963 dev_dbg(&sep->pdev->dev,
2964 "[PID%d] is_applet is %x\n",
2965 current->pid, command_args.is_applet);
2967 if (!command_args.app_in_address) {
2968 dev_warn(&sep->pdev->dev,
2969 "[PID%d] null app_in_address\n", current->pid);
2974 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2975 (unsigned long)command_args.app_in_address,
2976 (unsigned long)command_args.app_out_address,
2977 command_args.data_in_size, command_args.block_size,
2978 command_args.tail_block_size,
2979 command_args.is_applet, false,
2980 secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
2988 * sep_free_dcb_handler - free control block resources
2989 * @sep: pointer to struct sep_device
2991 * This function frees the DCB resources and updates the needed
2992 * user-space buffers.
2994 static int sep_free_dcb_handler(struct sep_device *sep,
2995 struct sep_dma_context **dma_ctx)
2997 if (!dma_ctx || !(*dma_ctx)) {
2998 dev_dbg(&sep->pdev->dev,
2999 "[PID%d] no dma context defined, nothing to free\n",
3004 dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3006 (*dma_ctx)->nr_dcb_creat);
3008 return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3012 * sep_ioctl - ioctl handler for sep device
3013 * @filp: pointer to struct file
3015 * @arg: pointer to argument structure
3017 * Implement the ioctl methods available on the SEP device.
3019 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3021 struct sep_private_data * const private_data = filp->private_data;
3022 struct sep_call_status *call_status = &private_data->call_status;
3023 struct sep_device *sep = private_data->device;
3024 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3025 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3028 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3030 dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3031 current->pid, *dma_ctx);
3033 /* Make sure we own this device */
3034 error = sep_check_transaction_owner(sep);
3036 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3041 /* Check that sep_mmap has been called before */
3042 if (0 == test_bit(SEP_LEGACY_MMAP_DONE_OFFSET,
3043 &call_status->status)) {
3044 dev_dbg(&sep->pdev->dev,
3045 "[PID%d] mmap not called\n", current->pid);
3050 /* Check that the command is for SEP device */
3051 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3057 case SEP_IOCSENDSEPCOMMAND:
3058 dev_dbg(&sep->pdev->dev,
3059 "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3061 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3062 &call_status->status)) {
3063 dev_warn(&sep->pdev->dev,
3064 "[PID%d] send msg already done\n",
3069 /* Send command to SEP */
3070 error = sep_send_command_handler(sep);
3072 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3073 &call_status->status);
3074 dev_dbg(&sep->pdev->dev,
3075 "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3078 case SEP_IOCENDTRANSACTION:
3079 dev_dbg(&sep->pdev->dev,
3080 "[PID%d] SEP_IOCENDTRANSACTION start\n",
3082 error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3084 dev_dbg(&sep->pdev->dev,
3085 "[PID%d] SEP_IOCENDTRANSACTION end\n",
3088 case SEP_IOCPREPAREDCB:
3089 dev_dbg(&sep->pdev->dev,
3090 "[PID%d] SEP_IOCPREPAREDCB start\n",
3092 case SEP_IOCPREPAREDCB_SECURE_DMA:
3093 dev_dbg(&sep->pdev->dev,
3094 "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3096 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3097 &call_status->status)) {
3098 dev_dbg(&sep->pdev->dev,
3099 "[PID%d] dcb prep needed before send msg\n",
3106 dev_dbg(&sep->pdev->dev,
3107 "[PID%d] dcb null arg\n", current->pid);
3112 if (cmd == SEP_IOCPREPAREDCB) {
3114 dev_dbg(&sep->pdev->dev,
3115 "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3118 error = sep_prepare_dcb_handler(sep, arg, false,
3122 dev_dbg(&sep->pdev->dev,
3123 "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3126 error = sep_prepare_dcb_handler(sep, arg, true,
3129 dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3132 case SEP_IOCFREEDCB:
3133 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3135 case SEP_IOCFREEDCB_SECURE_DMA:
3136 dev_dbg(&sep->pdev->dev,
3137 "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3139 error = sep_free_dcb_handler(sep, dma_ctx);
3140 dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3145 dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3151 dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3157 * sep_inthandler - interrupt handler for sep device
3159 * @dev_id: device id
3161 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3163 unsigned long lock_irq_flag;
3164 u32 reg_val, reg_val2 = 0;
3165 struct sep_device *sep = dev_id;
3166 irqreturn_t int_error = IRQ_HANDLED;
3168 /* Are we in power save? */
3169 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3170 if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3171 dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3176 if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3177 dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3181 /* Read the IRR register to check if this is SEP interrupt */
3182 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3184 dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3186 if (reg_val & (0x1 << 13)) {
3188 /* Lock and update the counter of reply messages */
3189 spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3191 spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3193 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3194 sep->send_ct, sep->reply_ct);
3196 /* Is this a kernel client request */
3197 if (sep->in_kernel) {
3198 tasklet_schedule(&sep->finish_tasklet);
3199 goto finished_interrupt;
3202 /* Is this printf or daemon request? */
3203 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3204 dev_dbg(&sep->pdev->dev,
3205 "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3207 clear_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags);
3209 if ((reg_val2 >> 30) & 0x1) {
3210 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3211 } else if (reg_val2 >> 31) {
3212 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3214 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3215 wake_up(&sep->event_interrupt);
3218 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3219 int_error = IRQ_NONE;
3224 if (int_error == IRQ_HANDLED)
3225 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3231 * sep_reconfig_shared_area - reconfigure shared area
3232 * @sep: pointer to struct sep_device
3234 * Reconfig the shared area between HOST and SEP - needed in case
3235 * the DX_CC_Init function was called before OS loading.
3237 static int sep_reconfig_shared_area(struct sep_device *sep)
3241 /* use to limit waiting for SEP */
3242 unsigned long end_time;
3244 /* Send the new SHARED MESSAGE AREA to the SEP */
3245 dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3246 (unsigned long long)sep->shared_bus);
3248 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3250 /* Poll for SEP response */
3251 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3253 end_time = jiffies + (WAIT_TIME * HZ);
3255 while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3256 (ret_val != sep->shared_bus))
3257 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3259 /* Check the return value (register) */
3260 if (ret_val != sep->shared_bus) {
3261 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3262 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3267 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3273 * sep_activate_dcb_dmatables_context - Takes DCB & DMA tables
3276 * @dcb_region: DCB region copy
3277 * @dmatables_region: MLLI/DMA tables copy
3278 * @dma_ctx: DMA context for current transaction
3280 ssize_t sep_activate_dcb_dmatables_context(struct sep_device *sep,
3281 struct sep_dcblock **dcb_region,
3282 void **dmatables_region,
3283 struct sep_dma_context *dma_ctx)
3285 void *dmaregion_free_start = NULL;
3286 void *dmaregion_free_end = NULL;
3287 void *dcbregion_free_start = NULL;
3288 void *dcbregion_free_end = NULL;
3291 dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3294 if (1 > dma_ctx->nr_dcb_creat) {
3295 dev_warn(&sep->pdev->dev,
3296 "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3297 current->pid, dma_ctx->nr_dcb_creat);
3302 dmaregion_free_start = sep->shared_addr
3303 + SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES;
3304 dmaregion_free_end = dmaregion_free_start
3305 + SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES - 1;
3307 if (dmaregion_free_start
3308 + dma_ctx->dmatables_len > dmaregion_free_end) {
3312 memcpy(dmaregion_free_start,
3314 dma_ctx->dmatables_len);
3315 /* Free MLLI table copy */
3316 kfree(*dmatables_region);
3317 *dmatables_region = NULL;
3319 /* Copy thread's DCB table copy to DCB table region */
3320 dcbregion_free_start = sep->shared_addr +
3321 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES;
3322 dcbregion_free_end = dcbregion_free_start +
3323 (SEP_MAX_NUM_SYNC_DMA_OPS *
3324 sizeof(struct sep_dcblock)) - 1;
3326 if (dcbregion_free_start
3327 + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3328 > dcbregion_free_end) {
3333 memcpy(dcbregion_free_start,
3335 dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3337 /* Print the tables */
3338 dev_dbg(&sep->pdev->dev, "activate: input table\n");
3339 sep_debug_print_lli_tables(sep,
3340 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3341 (*dcb_region)->input_mlli_address),
3342 (*dcb_region)->input_mlli_num_entries,
3343 (*dcb_region)->input_mlli_data_size);
3345 dev_dbg(&sep->pdev->dev, "activate: output table\n");
3346 sep_debug_print_lli_tables(sep,
3347 (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3348 (*dcb_region)->output_mlli_address),
3349 (*dcb_region)->output_mlli_num_entries,
3350 (*dcb_region)->output_mlli_data_size);
3352 dev_dbg(&sep->pdev->dev,
3353 "[PID%d] printing activated tables\n", current->pid);
3356 kfree(*dmatables_region);
3357 *dmatables_region = NULL;
3366 * sep_create_dcb_dmatables_context - Creates DCB & MLLI/DMA table context
3368 * @dcb_region: DCB region buf to create for current transaction
3369 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3370 * @dma_ctx: DMA context buf to create for current transaction
3371 * @user_dcb_args: User arguments for DCB/MLLI creation
3372 * @num_dcbs: Number of DCBs to create
3373 * @secure_dma: Indicate use of IMR restricted memory secure dma
3375 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3376 struct sep_dcblock **dcb_region,
3377 void **dmatables_region,
3378 struct sep_dma_context **dma_ctx,
3379 const struct build_dcb_struct __user *user_dcb_args,
3380 const u32 num_dcbs, bool secure_dma)
3384 struct build_dcb_struct *dcb_args = NULL;
3386 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3389 if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3394 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3395 dev_warn(&sep->pdev->dev,
3396 "[PID%d] invalid number of dcbs 0x%08X\n",
3397 current->pid, num_dcbs);
3402 dcb_args = kcalloc(num_dcbs, sizeof(struct build_dcb_struct),
3409 if (copy_from_user(dcb_args,
3411 num_dcbs * sizeof(struct build_dcb_struct))) {
3416 /* Allocate thread-specific memory for DCB */
3417 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3419 if (!(*dcb_region)) {
3424 /* Prepare DCB and MLLI table into the allocated regions */
3425 for (i = 0; i < num_dcbs; i++) {
3426 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3427 (unsigned long)dcb_args[i].app_in_address,
3428 (unsigned long)dcb_args[i].app_out_address,
3429 dcb_args[i].data_in_size,
3430 dcb_args[i].block_size,
3431 dcb_args[i].tail_block_size,
3432 dcb_args[i].is_applet,
3434 *dcb_region, dmatables_region,
3439 dev_warn(&sep->pdev->dev,
3440 "[PID%d] dma table creation failed\n",
3445 if (dcb_args[i].app_in_address != 0)
3446 (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3456 * sep_create_dcb_dmatables_context_kernel - Creates DCB & MLLI/DMA table context
3459 * @dcb_region: DCB region buf to create for current transaction
3460 * @dmatables_region: MLLI/DMA tables buf to create for current transaction
3461 * @dma_ctx: DMA context buf to create for current transaction
3462 * @user_dcb_args: User arguments for DCB/MLLI creation
3463 * @num_dcbs: Number of DCBs to create
3464 * This does that same thing as sep_create_dcb_dmatables_context
3465 * except that it is used only for the kernel crypto operation. It is
3466 * separate because there is no user data involved; the dcb data structure
3467 * is specific for kernel crypto (build_dcb_struct_kernel)
3469 int sep_create_dcb_dmatables_context_kernel(struct sep_device *sep,
3470 struct sep_dcblock **dcb_region,
3471 void **dmatables_region,
3472 struct sep_dma_context **dma_ctx,
3473 const struct build_dcb_struct_kernel *dcb_data,
3479 dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3482 if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3487 if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3488 dev_warn(&sep->pdev->dev,
3489 "[PID%d] invalid number of dcbs 0x%08X\n",
3490 current->pid, num_dcbs);
3495 dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3496 current->pid, num_dcbs);
3498 /* Allocate thread-specific memory for DCB */
3499 *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3501 if (!(*dcb_region)) {
3506 /* Prepare DCB and MLLI table into the allocated regions */
3507 for (i = 0; i < num_dcbs; i++) {
3508 error = sep_prepare_input_output_dma_table_in_dcb(sep,
3509 (unsigned long)dcb_data->app_in_address,
3510 (unsigned long)dcb_data->app_out_address,
3511 dcb_data->data_in_size,
3512 dcb_data->block_size,
3513 dcb_data->tail_block_size,
3514 dcb_data->is_applet,
3517 *dcb_region, dmatables_region,
3522 dev_warn(&sep->pdev->dev,
3523 "[PID%d] dma table creation failed\n",
3535 * sep_activate_msgarea_context - Takes the message area context into use
3537 * @msg_region: Message area context buf
3538 * @msg_len: Message area context buffer size
3540 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3542 const size_t msg_len)
3544 dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3547 if (!msg_region || !(*msg_region) ||
3548 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES < msg_len) {
3549 dev_warn(&sep->pdev->dev,
3550 "[PID%d] invalid act msgarea len 0x%08zX\n",
3551 current->pid, msg_len);
3555 memcpy(sep->shared_addr, *msg_region, msg_len);
3561 * sep_create_msgarea_context - Creates message area context
3563 * @msg_region: Msg area region buf to create for current transaction
3564 * @msg_user: Content for msg area region from user
3565 * @msg_len: Message area size
3567 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3569 const void __user *msg_user,
3570 const size_t msg_len)
3574 dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3579 SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < msg_len ||
3580 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > msg_len) {
3581 dev_warn(&sep->pdev->dev,
3582 "[PID%d] invalid creat msgarea len 0x%08zX\n",
3583 current->pid, msg_len);
3588 /* Allocate thread-specific memory for message buffer */
3589 *msg_region = kzalloc(msg_len, GFP_KERNEL);
3590 if (!(*msg_region)) {
3595 /* Copy input data to write() to allocated message buffer */
3596 if (copy_from_user(*msg_region, msg_user, msg_len)) {
3602 if (error && msg_region) {
3612 * sep_read - Returns results of an operation for fastcall interface
3613 * @filp: File pointer
3614 * @buf_user: User buffer for storing results
3615 * @count_user: User buffer size
3616 * @offset: File offset, not supported
3618 * The implementation does not support reading in chunks, all data must be
3619 * consumed during a single read system call.
3621 static ssize_t sep_read(struct file *filp,
3622 char __user *buf_user, size_t count_user,
3625 struct sep_private_data * const private_data = filp->private_data;
3626 struct sep_call_status *call_status = &private_data->call_status;
3627 struct sep_device *sep = private_data->device;
3628 struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3629 struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3630 ssize_t error = 0, error_tmp = 0;
3632 /* Am I the process that owns the transaction? */
3633 error = sep_check_transaction_owner(sep);
3635 dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3640 /* Checks that user has called necessary apis */
3641 if (0 == test_bit(SEP_FASTCALL_WRITE_DONE_OFFSET,
3642 &call_status->status)) {
3643 dev_warn(&sep->pdev->dev,
3644 "[PID%d] fastcall write not called\n",
3647 goto end_function_error;
3651 dev_warn(&sep->pdev->dev,
3652 "[PID%d] null user buffer\n",
3655 goto end_function_error;
3659 /* Wait for SEP to finish */
3660 wait_event(sep->event_interrupt,
3661 test_bit(SEP_WORKING_LOCK_BIT,
3662 &sep->in_use_flags) == 0);
3664 sep_dump_message(sep);
3666 dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3667 current->pid, count_user);
3669 /* In case user has allocated bigger buffer */
3670 if (count_user > SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
3671 count_user = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES;
3673 if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3675 goto end_function_error;
3678 dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3682 /* Copy possible tail data to user and free DCB and MLLIs */
3683 error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3685 dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3688 /* End the transaction, wakeup pending ones */
3689 error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3692 dev_warn(&sep->pdev->dev,
3693 "[PID%d] ending transaction failed\n",
3701 * sep_fastcall_args_get - Gets fastcall params from user
3703 * @args: Parameters buffer
3704 * @buf_user: User buffer for operation parameters
3705 * @count_user: User buffer size
3707 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3708 struct sep_fastcall_hdr *args,
3709 const char __user *buf_user,
3710 const size_t count_user)
3713 size_t actual_count = 0;
3716 dev_warn(&sep->pdev->dev,
3717 "[PID%d] null user buffer\n",
3723 if (count_user < sizeof(struct sep_fastcall_hdr)) {
3724 dev_warn(&sep->pdev->dev,
3725 "[PID%d] too small message size 0x%08zX\n",
3726 current->pid, count_user);
3732 if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3737 if (SEP_FC_MAGIC != args->magic) {
3738 dev_warn(&sep->pdev->dev,
3739 "[PID%d] invalid fastcall magic 0x%08X\n",
3740 current->pid, args->magic);
3745 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3746 current->pid, args->num_dcbs);
3747 dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3748 current->pid, args->msg_len);
3750 if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3751 SEP_DRIVER_MIN_MESSAGE_SIZE_IN_BYTES > args->msg_len) {
3752 dev_warn(&sep->pdev->dev,
3753 "[PID%d] invalid message length\n",
3759 actual_count = sizeof(struct sep_fastcall_hdr)
3761 + (args->num_dcbs * sizeof(struct build_dcb_struct));
3763 if (actual_count != count_user) {
3764 dev_warn(&sep->pdev->dev,
3765 "[PID%d] inconsistent message "
3766 "sizes 0x%08zX vs 0x%08zX\n",
3767 current->pid, actual_count, count_user);
3777 * sep_write - Starts an operation for fastcall interface
3778 * @filp: File pointer
3779 * @buf_user: User buffer for operation parameters
3780 * @count_user: User buffer size
3781 * @offset: File offset, not supported
3783 * The implementation does not support writing in chunks,
3784 * all data must be given during a single write system call.
3786 static ssize_t sep_write(struct file *filp,
3787 const char __user *buf_user, size_t count_user,
3790 struct sep_private_data * const private_data = filp->private_data;
3791 struct sep_call_status *call_status = &private_data->call_status;
3792 struct sep_device *sep = private_data->device;
3793 struct sep_dma_context *dma_ctx = NULL;
3794 struct sep_fastcall_hdr call_hdr = {0};
3795 void *msg_region = NULL;
3796 void *dmatables_region = NULL;
3797 struct sep_dcblock *dcb_region = NULL;
3799 struct sep_queue_info *my_queue_elem = NULL;
3800 bool my_secure_dma; /* are we using secure_dma (IMR)? */
3802 dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3804 dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3805 current->pid, private_data);
3807 error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3811 buf_user += sizeof(struct sep_fastcall_hdr);
3813 if (call_hdr.secure_dma == 0)
3814 my_secure_dma = false;
3816 my_secure_dma = true;
3819 * Controlling driver memory usage by limiting amount of
3820 * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3821 * of threads can progress further at a time
3823 dev_dbg(&sep->pdev->dev,
3824 "[PID%d] waiting for double buffering region access\n",
3826 error = down_interruptible(&sep->sep_doublebuf);
3827 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3830 /* Signal received */
3831 goto end_function_error;
3836 * Prepare contents of the shared area regions for
3837 * the operation into temporary buffers
3839 if (0 < call_hdr.num_dcbs) {
3840 error = sep_create_dcb_dmatables_context(sep,
3844 (const struct build_dcb_struct __user *)
3846 call_hdr.num_dcbs, my_secure_dma);
3848 goto end_function_error_doublebuf;
3850 buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3853 error = sep_create_msgarea_context(sep,
3858 goto end_function_error_doublebuf;
3860 dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3862 my_queue_elem = sep_queue_status_add(sep,
3863 ((struct sep_msgarea_hdr *)msg_region)->opcode,
3864 (dma_ctx) ? dma_ctx->input_data_len : 0,
3866 current->comm, sizeof(current->comm));
3868 if (!my_queue_elem) {
3869 dev_dbg(&sep->pdev->dev,
3870 "[PID%d] updating queue status error\n", current->pid);
3872 goto end_function_error_doublebuf;
3875 /* Wait until current process gets the transaction */
3876 error = sep_wait_transaction(sep);
3879 /* Interrupted by signal, don't clear transaction */
3880 dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3882 sep_queue_status_remove(sep, &my_queue_elem);
3883 goto end_function_error_doublebuf;
3886 dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3888 private_data->my_queue_elem = my_queue_elem;
3890 /* Activate shared area regions for the transaction */
3891 error = sep_activate_msgarea_context(sep, &msg_region,
3894 goto end_function_error_clear_transact;
3896 sep_dump_message(sep);
3898 if (0 < call_hdr.num_dcbs) {
3899 error = sep_activate_dcb_dmatables_context(sep,
3904 goto end_function_error_clear_transact;
3907 /* Send command to SEP */
3908 error = sep_send_command_handler(sep);
3910 goto end_function_error_clear_transact;
3912 /* Store DMA context for the transaction */
3913 private_data->dma_ctx = dma_ctx;
3914 /* Update call status */
3915 set_bit(SEP_FASTCALL_WRITE_DONE_OFFSET, &call_status->status);
3918 up(&sep->sep_doublebuf);
3919 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3924 end_function_error_clear_transact:
3925 sep_end_transaction_handler(sep, &dma_ctx, call_status,
3926 &private_data->my_queue_elem);
3928 end_function_error_doublebuf:
3929 up(&sep->sep_doublebuf);
3930 dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3935 sep_free_dma_table_data_handler(sep, &dma_ctx);
3939 kfree(dmatables_region);
3945 * sep_seek - Handler for seek system call
3946 * @filp: File pointer
3947 * @offset: File offset
3948 * @origin: Options for offset
3950 * Fastcall interface does not support seeking, all reads
3951 * and writes are from/to offset zero
3953 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3961 * sep_file_operations - file operation on sep device
3962 * @sep_ioctl: ioctl handler from user space call
3963 * @sep_poll: poll handler
3964 * @sep_open: handles sep device open request
3965 * @sep_release:handles sep device release request
3966 * @sep_mmap: handles memory mapping requests
3967 * @sep_read: handles read request on sep device
3968 * @sep_write: handles write request on sep device
3969 * @sep_seek: handles seek request on sep device
3971 static const struct file_operations sep_file_operations = {
3972 .owner = THIS_MODULE,
3973 .unlocked_ioctl = sep_ioctl,
3976 .release = sep_release,
3984 * sep_sysfs_read - read sysfs entry per gives arguments
3985 * @filp: file pointer
3986 * @kobj: kobject pointer
3987 * @attr: binary file attributes
3988 * @buf: read to this buffer
3989 * @pos: offset to read
3990 * @count: amount of data to read
3992 * This function is to read sysfs entries for sep driver per given arguments.
3995 sep_sysfs_read(struct file *filp, struct kobject *kobj,
3996 struct bin_attribute *attr,
3997 char *buf, loff_t pos, size_t count)
3999 unsigned long lck_flags;
4000 size_t nleft = count;
4001 struct sep_device *sep = sep_dev;
4002 struct sep_queue_info *queue_elem = NULL;
4006 spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4008 queue_num = sep->sep_queue_num;
4009 if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4010 queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4013 if (count < sizeof(queue_num)
4014 + (queue_num * sizeof(struct sep_queue_data))) {
4015 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4019 memcpy(buf, &queue_num, sizeof(queue_num));
4020 buf += sizeof(queue_num);
4021 nleft -= sizeof(queue_num);
4023 list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4024 if (i++ > queue_num)
4027 memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4028 nleft -= sizeof(queue_elem->data);
4029 buf += sizeof(queue_elem->data);
4031 spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4033 return count - nleft;
4037 * bin_attributes - defines attributes for queue_status
4038 * @attr: attributes (name & permissions)
4039 * @read: function pointer to read this file
4040 * @size: maxinum size of binary attribute
4042 static const struct bin_attribute queue_status = {
4043 .attr = {.name = "queue_status", .mode = 0444},
4044 .read = sep_sysfs_read,
4046 + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4050 * sep_register_driver_with_fs - register misc devices
4051 * @sep: pointer to struct sep_device
4053 * This function registers the driver with the file system
4055 static int sep_register_driver_with_fs(struct sep_device *sep)
4059 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4060 sep->miscdev_sep.name = SEP_DEV_NAME;
4061 sep->miscdev_sep.fops = &sep_file_operations;
4063 ret_val = misc_register(&sep->miscdev_sep);
4065 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4070 ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4073 dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4075 misc_deregister(&sep->miscdev_sep);
4084 *sep_probe - probe a matching PCI device
4086 *@ent: pci_device_id
4088 *Attempt to set up and configure a SEP device that has been
4089 *discovered by the PCI layer. Allocates all required resources.
4091 static int sep_probe(struct pci_dev *pdev,
4092 const struct pci_device_id *ent)
4095 struct sep_device *sep = NULL;
4097 if (sep_dev != NULL) {
4098 dev_dbg(&pdev->dev, "only one SEP supported.\n");
4102 /* Enable the device */
4103 error = pci_enable_device(pdev);
4105 dev_warn(&pdev->dev, "error enabling pci device\n");
4109 /* Allocate the sep_device structure for this device */
4110 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4111 if (sep_dev == NULL) {
4113 goto end_function_disable_device;
4117 * We're going to use another variable for actually
4118 * working with the device; this way, if we have
4119 * multiple devices in the future, it would be easier
4120 * to make appropriate changes
4124 sep->pdev = pci_dev_get(pdev);
4126 init_waitqueue_head(&sep->event_transactions);
4127 init_waitqueue_head(&sep->event_interrupt);
4128 spin_lock_init(&sep->snd_rply_lck);
4129 spin_lock_init(&sep->sep_queue_lock);
4130 sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4132 INIT_LIST_HEAD(&sep->sep_queue_status);
4134 dev_dbg(&sep->pdev->dev,
4135 "sep probe: PCI obtained, device being prepared\n");
4137 /* Set up our register area */
4138 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4139 if (!sep->reg_physical_addr) {
4140 dev_warn(&sep->pdev->dev, "Error getting register start\n");
4142 goto end_function_free_sep_dev;
4145 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4146 if (!sep->reg_physical_end) {
4147 dev_warn(&sep->pdev->dev, "Error getting register end\n");
4149 goto end_function_free_sep_dev;
4152 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
4153 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4154 if (!sep->reg_addr) {
4155 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4157 goto end_function_free_sep_dev;
4160 dev_dbg(&sep->pdev->dev,
4161 "Register area start %llx end %llx virtual %p\n",
4162 (unsigned long long)sep->reg_physical_addr,
4163 (unsigned long long)sep->reg_physical_end,
4166 /* Allocate the shared area */
4167 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
4168 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
4169 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
4170 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
4171 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
4173 if (sep_map_and_alloc_shared_area(sep)) {
4175 /* Allocation failed */
4176 goto end_function_error;
4179 /* Clear ICR register */
4180 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4182 /* Set the IMR register - open only GPR 2 */
4183 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4185 /* Read send/receive counters from SEP */
4186 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4187 sep->reply_ct &= 0x3FFFFFFF;
4188 sep->send_ct = sep->reply_ct;
4190 /* Get the interrupt line */
4191 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4195 goto end_function_deallocate_sep_shared_area;
4197 /* The new chip requires a shared area reconfigure */
4198 error = sep_reconfig_shared_area(sep);
4200 goto end_function_free_irq;
4204 /* Finally magic up the device nodes */
4205 /* Register driver with the fs */
4206 error = sep_register_driver_with_fs(sep);
4209 dev_err(&sep->pdev->dev, "error registering dev file\n");
4210 goto end_function_free_irq;
4213 sep->in_use = 0; /* through touching the device */
4214 #ifdef SEP_ENABLE_RUNTIME_PM
4215 pm_runtime_put_noidle(&sep->pdev->dev);
4216 pm_runtime_allow(&sep->pdev->dev);
4217 pm_runtime_set_autosuspend_delay(&sep->pdev->dev,
4219 pm_runtime_use_autosuspend(&sep->pdev->dev);
4220 pm_runtime_mark_last_busy(&sep->pdev->dev);
4221 sep->power_save_setup = 1;
4223 /* register kernel crypto driver */
4224 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4225 error = sep_crypto_setup();
4227 dev_err(&sep->pdev->dev, "crypto setup failed\n");
4228 goto end_function_free_irq;
4233 end_function_free_irq:
4234 free_irq(pdev->irq, sep);
4236 end_function_deallocate_sep_shared_area:
4237 /* De-allocate shared area */
4238 sep_unmap_and_free_shared_area(sep);
4241 iounmap(sep->reg_addr);
4243 end_function_free_sep_dev:
4244 pci_dev_put(sep_dev->pdev);
4248 end_function_disable_device:
4249 pci_disable_device(pdev);
4256 * sep_remove - handles removing device from pci subsystem
4257 * @pdev: pointer to pci device
4259 * This function will handle removing our sep device from pci subsystem on exit
4260 * or unloading this module. It should free up all used resources, and unmap if
4261 * any memory regions mapped.
4263 static void sep_remove(struct pci_dev *pdev)
4265 struct sep_device *sep = sep_dev;
4267 /* Unregister from fs */
4268 misc_deregister(&sep->miscdev_sep);
4270 /* Unregister from kernel crypto */
4271 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4272 sep_crypto_takedown();
4275 free_irq(sep->pdev->irq, sep);
4277 /* Free the shared area */
4278 sep_unmap_and_free_shared_area(sep_dev);
4279 iounmap(sep_dev->reg_addr);
4281 #ifdef SEP_ENABLE_RUNTIME_PM
4284 pm_runtime_forbid(&sep->pdev->dev);
4285 pm_runtime_get_noresume(&sep->pdev->dev);
4288 pci_dev_put(sep_dev->pdev);
4293 /* Initialize struct pci_device_id for our driver */
4294 static const struct pci_device_id sep_pci_id_tbl[] = {
4295 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4296 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4300 /* Export our pci_device_id structure to user space */
4301 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4303 #ifdef SEP_ENABLE_RUNTIME_PM
4306 * sep_pm_resume - rsume routine while waking up from S3 state
4307 * @dev: pointer to sep device
4309 * This function is to be used to wake up sep driver while system awakes from S3
4310 * state i.e. suspend to ram. The RAM in intact.
4311 * Notes - revisit with more understanding of pm, ICR/IMR & counters.
4313 static int sep_pci_resume(struct device *dev)
4315 struct sep_device *sep = sep_dev;
4317 dev_dbg(&sep->pdev->dev, "pci resume called\n");
4319 if (sep->power_state == SEP_DRIVER_POWERON)
4322 /* Clear ICR register */
4323 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4325 /* Set the IMR register - open only GPR 2 */
4326 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4328 /* Read send/receive counters from SEP */
4329 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4330 sep->reply_ct &= 0x3FFFFFFF;
4331 sep->send_ct = sep->reply_ct;
4333 sep->power_state = SEP_DRIVER_POWERON;
4339 * sep_pm_suspend - suspend routine while going to S3 state
4340 * @dev: pointer to sep device
4342 * This function is to be used to suspend sep driver while system goes to S3
4343 * state i.e. suspend to ram. The RAM in intact and ON during this suspend.
4344 * Notes - revisit with more understanding of pm, ICR/IMR
4346 static int sep_pci_suspend(struct device *dev)
4348 struct sep_device *sep = sep_dev;
4350 dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4351 if (sep->in_use == 1)
4354 sep->power_state = SEP_DRIVER_POWEROFF;
4356 /* Clear ICR register */
4357 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4359 /* Set the IMR to block all */
4360 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4366 * sep_pm_runtime_resume - runtime resume routine
4367 * @dev: pointer to sep device
4369 * Notes - revisit with more understanding of pm, ICR/IMR & counters
4371 static int sep_pm_runtime_resume(struct device *dev)
4376 struct sep_device *sep = sep_dev;
4378 dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4381 * Wait until the SCU boot is ready
4382 * This is done by iterating SCU_DELAY_ITERATION (10
4383 * microseconds each) up to SCU_DELAY_MAX (50) times.
4384 * This bit can be set in a random time that is less
4385 * than 500 microseconds after each power resume
4389 while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4390 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4391 retval2 &= 0x00000008;
4393 udelay(SCU_DELAY_ITERATION);
4399 dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4403 /* Clear ICR register */
4404 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4406 /* Set the IMR register - open only GPR 2 */
4407 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4409 /* Read send/receive counters from SEP */
4410 sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4411 sep->reply_ct &= 0x3FFFFFFF;
4412 sep->send_ct = sep->reply_ct;
4418 * sep_pm_runtime_suspend - runtime suspend routine
4419 * @dev: pointer to sep device
4421 * Notes - revisit with more understanding of pm
4423 static int sep_pm_runtime_suspend(struct device *dev)
4425 struct sep_device *sep = sep_dev;
4427 dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4429 /* Clear ICR register */
4430 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4435 * sep_pm - power management for sep driver
4436 * @sep_pm_runtime_resume: resume- no communication with cpu & main memory
4437 * @sep_pm_runtime_suspend: suspend- no communication with cpu & main memory
4438 * @sep_pci_suspend: suspend - main memory is still ON
4439 * @sep_pci_resume: resume - main memory is still ON
4441 static const struct dev_pm_ops sep_pm = {
4442 .runtime_resume = sep_pm_runtime_resume,
4443 .runtime_suspend = sep_pm_runtime_suspend,
4444 .resume = sep_pci_resume,
4445 .suspend = sep_pci_suspend,
4447 #endif /* SEP_ENABLE_RUNTIME_PM */
4450 * sep_pci_driver - registers this device with pci subsystem
4451 * @name: name identifier for this driver
4452 * @sep_pci_id_tbl: pointer to struct pci_device_id table
4453 * @sep_probe: pointer to probe function in PCI driver
4454 * @sep_remove: pointer to remove function in PCI driver
4456 static struct pci_driver sep_pci_driver = {
4457 #ifdef SEP_ENABLE_RUNTIME_PM
4462 .name = "sep_sec_driver",
4463 .id_table = sep_pci_id_tbl,
4465 .remove = sep_remove
4468 module_pci_driver(sep_pci_driver);
4469 MODULE_LICENSE("GPL");