]> Git Repo - linux.git/blob - drivers/tee/optee/core.c
Merge tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux.git] / drivers / tee / optee / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/crash_dump.h>
10 #include <linux/errno.h>
11 #include <linux/io.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/tee_drv.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 #include "optee_private.h"
20
21 int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
22                                size_t size, size_t align,
23                                int (*shm_register)(struct tee_context *ctx,
24                                                    struct tee_shm *shm,
25                                                    struct page **pages,
26                                                    size_t num_pages,
27                                                    unsigned long start))
28 {
29         unsigned int order = get_order(size);
30         struct page *page;
31         int rc = 0;
32
33         /*
34          * Ignore alignment since this is already going to be page aligned
35          * and there's no need for any larger alignment.
36          */
37         page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
38         if (!page)
39                 return -ENOMEM;
40
41         shm->kaddr = page_address(page);
42         shm->paddr = page_to_phys(page);
43         shm->size = PAGE_SIZE << order;
44
45         if (shm_register) {
46                 unsigned int nr_pages = 1 << order, i;
47                 struct page **pages;
48
49                 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
50                 if (!pages) {
51                         rc = -ENOMEM;
52                         goto err;
53                 }
54
55                 for (i = 0; i < nr_pages; i++)
56                         pages[i] = page + i;
57
58                 rc = shm_register(shm->ctx, shm, pages, nr_pages,
59                                   (unsigned long)shm->kaddr);
60                 kfree(pages);
61                 if (rc)
62                         goto err;
63         }
64
65         return 0;
66
67 err:
68         free_pages((unsigned long)shm->kaddr, order);
69         return rc;
70 }
71
72 void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
73                                int (*shm_unregister)(struct tee_context *ctx,
74                                                      struct tee_shm *shm))
75 {
76         if (shm_unregister)
77                 shm_unregister(shm->ctx, shm);
78         free_pages((unsigned long)shm->kaddr, get_order(shm->size));
79         shm->kaddr = NULL;
80 }
81
82 static void optee_bus_scan(struct work_struct *work)
83 {
84         WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
85 }
86
87 int optee_open(struct tee_context *ctx, bool cap_memref_null)
88 {
89         struct optee_context_data *ctxdata;
90         struct tee_device *teedev = ctx->teedev;
91         struct optee *optee = tee_get_drvdata(teedev);
92
93         ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
94         if (!ctxdata)
95                 return -ENOMEM;
96
97         if (teedev == optee->supp_teedev) {
98                 bool busy = true;
99
100                 mutex_lock(&optee->supp.mutex);
101                 if (!optee->supp.ctx) {
102                         busy = false;
103                         optee->supp.ctx = ctx;
104                 }
105                 mutex_unlock(&optee->supp.mutex);
106                 if (busy) {
107                         kfree(ctxdata);
108                         return -EBUSY;
109                 }
110
111                 if (!optee->scan_bus_done) {
112                         INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
113                         optee->scan_bus_wq = create_workqueue("optee_bus_scan");
114                         if (!optee->scan_bus_wq) {
115                                 kfree(ctxdata);
116                                 return -ECHILD;
117                         }
118                         queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
119                         optee->scan_bus_done = true;
120                 }
121         }
122         mutex_init(&ctxdata->mutex);
123         INIT_LIST_HEAD(&ctxdata->sess_list);
124
125         ctx->cap_memref_null = cap_memref_null;
126         ctx->data = ctxdata;
127         return 0;
128 }
129
130 static void optee_release_helper(struct tee_context *ctx,
131                                  int (*close_session)(struct tee_context *ctx,
132                                                       u32 session))
133 {
134         struct optee_context_data *ctxdata = ctx->data;
135         struct optee_session *sess;
136         struct optee_session *sess_tmp;
137
138         if (!ctxdata)
139                 return;
140
141         list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
142                                  list_node) {
143                 list_del(&sess->list_node);
144                 close_session(ctx, sess->session_id);
145                 kfree(sess);
146         }
147         kfree(ctxdata);
148         ctx->data = NULL;
149 }
150
151 void optee_release(struct tee_context *ctx)
152 {
153         optee_release_helper(ctx, optee_close_session_helper);
154 }
155
156 void optee_release_supp(struct tee_context *ctx)
157 {
158         struct optee *optee = tee_get_drvdata(ctx->teedev);
159
160         optee_release_helper(ctx, optee_close_session_helper);
161         if (optee->scan_bus_wq) {
162                 destroy_workqueue(optee->scan_bus_wq);
163                 optee->scan_bus_wq = NULL;
164         }
165         optee_supp_release(&optee->supp);
166 }
167
168 void optee_remove_common(struct optee *optee)
169 {
170         /* Unregister OP-TEE specific client devices on TEE bus */
171         optee_unregister_devices();
172
173         optee_notif_uninit(optee);
174         optee_shm_arg_cache_uninit(optee);
175         teedev_close_context(optee->ctx);
176         /*
177          * The two devices have to be unregistered before we can free the
178          * other resources.
179          */
180         tee_device_unregister(optee->supp_teedev);
181         tee_device_unregister(optee->teedev);
182
183         tee_shm_pool_free(optee->pool);
184         optee_supp_uninit(&optee->supp);
185         mutex_destroy(&optee->call_queue.mutex);
186 }
187
188 static int smc_abi_rc;
189 static int ffa_abi_rc;
190
191 static int optee_core_init(void)
192 {
193         /*
194          * The kernel may have crashed at the same time that all available
195          * secure world threads were suspended and we cannot reschedule the
196          * suspended threads without access to the crashed kernel's wait_queue.
197          * Therefore, we cannot reliably initialize the OP-TEE driver in the
198          * kdump kernel.
199          */
200         if (is_kdump_kernel())
201                 return -ENODEV;
202
203         smc_abi_rc = optee_smc_abi_register();
204         ffa_abi_rc = optee_ffa_abi_register();
205
206         /* If both failed there's no point with this module */
207         if (smc_abi_rc && ffa_abi_rc)
208                 return smc_abi_rc;
209         return 0;
210 }
211 module_init(optee_core_init);
212
213 static void optee_core_exit(void)
214 {
215         if (!smc_abi_rc)
216                 optee_smc_abi_unregister();
217         if (!ffa_abi_rc)
218                 optee_ffa_abi_unregister();
219 }
220 module_exit(optee_core_exit);
221
222 MODULE_AUTHOR("Linaro");
223 MODULE_DESCRIPTION("OP-TEE driver");
224 MODULE_VERSION("1.0");
225 MODULE_LICENSE("GPL v2");
226 MODULE_ALIAS("platform:optee");
This page took 0.040518 seconds and 4 git commands to generate.