]> Git Repo - linux.git/blob - drivers/tee/optee/supp.c
Merge tag 'microblaze-v5.0-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[linux.git] / drivers / tee / optee / supp.c
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include "optee_private.h"
18
19 struct optee_supp_req {
20         struct list_head link;
21
22         bool busy;
23         u32 func;
24         u32 ret;
25         size_t num_params;
26         struct tee_param *param;
27
28         struct completion c;
29 };
30
31 void optee_supp_init(struct optee_supp *supp)
32 {
33         memset(supp, 0, sizeof(*supp));
34         mutex_init(&supp->mutex);
35         init_completion(&supp->reqs_c);
36         idr_init(&supp->idr);
37         INIT_LIST_HEAD(&supp->reqs);
38         supp->req_id = -1;
39 }
40
41 void optee_supp_uninit(struct optee_supp *supp)
42 {
43         mutex_destroy(&supp->mutex);
44         idr_destroy(&supp->idr);
45 }
46
47 void optee_supp_release(struct optee_supp *supp)
48 {
49         int id;
50         struct optee_supp_req *req;
51         struct optee_supp_req *req_tmp;
52
53         mutex_lock(&supp->mutex);
54
55         /* Abort all request retrieved by supplicant */
56         idr_for_each_entry(&supp->idr, req, id) {
57                 req->busy = false;
58                 idr_remove(&supp->idr, id);
59                 req->ret = TEEC_ERROR_COMMUNICATION;
60                 complete(&req->c);
61         }
62
63         /* Abort all queued requests */
64         list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
65                 list_del(&req->link);
66                 req->ret = TEEC_ERROR_COMMUNICATION;
67                 complete(&req->c);
68         }
69
70         supp->ctx = NULL;
71         supp->req_id = -1;
72
73         mutex_unlock(&supp->mutex);
74 }
75
76 /**
77  * optee_supp_thrd_req() - request service from supplicant
78  * @ctx:        context doing the request
79  * @func:       function requested
80  * @num_params: number of elements in @param array
81  * @param:      parameters for function
82  *
83  * Returns result of operation to be passed to secure world
84  */
85 u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
86                         struct tee_param *param)
87
88 {
89         struct optee *optee = tee_get_drvdata(ctx->teedev);
90         struct optee_supp *supp = &optee->supp;
91         struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
92         bool interruptable;
93         u32 ret;
94
95         if (!req)
96                 return TEEC_ERROR_OUT_OF_MEMORY;
97
98         init_completion(&req->c);
99         req->func = func;
100         req->num_params = num_params;
101         req->param = param;
102
103         /* Insert the request in the request list */
104         mutex_lock(&supp->mutex);
105         list_add_tail(&req->link, &supp->reqs);
106         mutex_unlock(&supp->mutex);
107
108         /* Tell an eventual waiter there's a new request */
109         complete(&supp->reqs_c);
110
111         /*
112          * Wait for supplicant to process and return result, once we've
113          * returned from wait_for_completion(&req->c) successfully we have
114          * exclusive access again.
115          */
116         while (wait_for_completion_interruptible(&req->c)) {
117                 mutex_lock(&supp->mutex);
118                 interruptable = !supp->ctx;
119                 if (interruptable) {
120                         /*
121                          * There's no supplicant available and since the
122                          * supp->mutex currently is held none can
123                          * become available until the mutex released
124                          * again.
125                          *
126                          * Interrupting an RPC to supplicant is only
127                          * allowed as a way of slightly improving the user
128                          * experience in case the supplicant hasn't been
129                          * started yet. During normal operation the supplicant
130                          * will serve all requests in a timely manner and
131                          * interrupting then wouldn't make sense.
132                          */
133                         interruptable = !req->busy;
134                         if (!req->busy)
135                                 list_del(&req->link);
136                 }
137                 mutex_unlock(&supp->mutex);
138
139                 if (interruptable) {
140                         req->ret = TEEC_ERROR_COMMUNICATION;
141                         break;
142                 }
143         }
144
145         ret = req->ret;
146         kfree(req);
147
148         return ret;
149 }
150
151 static struct optee_supp_req  *supp_pop_entry(struct optee_supp *supp,
152                                               int num_params, int *id)
153 {
154         struct optee_supp_req *req;
155
156         if (supp->req_id != -1) {
157                 /*
158                  * Supplicant should not mix synchronous and asnynchronous
159                  * requests.
160                  */
161                 return ERR_PTR(-EINVAL);
162         }
163
164         if (list_empty(&supp->reqs))
165                 return NULL;
166
167         req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
168
169         if (num_params < req->num_params) {
170                 /* Not enough room for parameters */
171                 return ERR_PTR(-EINVAL);
172         }
173
174         *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
175         if (*id < 0)
176                 return ERR_PTR(-ENOMEM);
177
178         list_del(&req->link);
179         req->busy = true;
180
181         return req;
182 }
183
184 static int supp_check_recv_params(size_t num_params, struct tee_param *params,
185                                   size_t *num_meta)
186 {
187         size_t n;
188
189         if (!num_params)
190                 return -EINVAL;
191
192         /*
193          * If there's memrefs we need to decrease those as they where
194          * increased earlier and we'll even refuse to accept any below.
195          */
196         for (n = 0; n < num_params; n++)
197                 if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
198                         tee_shm_put(params[n].u.memref.shm);
199
200         /*
201          * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
202          * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
203          */
204         for (n = 0; n < num_params; n++)
205                 if (params[n].attr &&
206                     params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
207                         return -EINVAL;
208
209         /* At most we'll need one meta parameter so no need to check for more */
210         if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
211                 *num_meta = 1;
212         else
213                 *num_meta = 0;
214
215         return 0;
216 }
217
218 /**
219  * optee_supp_recv() - receive request for supplicant
220  * @ctx:        context receiving the request
221  * @func:       requested function in supplicant
222  * @num_params: number of elements allocated in @param, updated with number
223  *              used elements
224  * @param:      space for parameters for @func
225  *
226  * Returns 0 on success or <0 on failure
227  */
228 int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
229                     struct tee_param *param)
230 {
231         struct tee_device *teedev = ctx->teedev;
232         struct optee *optee = tee_get_drvdata(teedev);
233         struct optee_supp *supp = &optee->supp;
234         struct optee_supp_req *req = NULL;
235         int id;
236         size_t num_meta;
237         int rc;
238
239         rc = supp_check_recv_params(*num_params, param, &num_meta);
240         if (rc)
241                 return rc;
242
243         while (true) {
244                 mutex_lock(&supp->mutex);
245                 req = supp_pop_entry(supp, *num_params - num_meta, &id);
246                 mutex_unlock(&supp->mutex);
247
248                 if (req) {
249                         if (IS_ERR(req))
250                                 return PTR_ERR(req);
251                         break;
252                 }
253
254                 /*
255                  * If we didn't get a request we'll block in
256                  * wait_for_completion() to avoid needless spinning.
257                  *
258                  * This is where supplicant will be hanging most of
259                  * the time, let's make this interruptable so we
260                  * can easily restart supplicant if needed.
261                  */
262                 if (wait_for_completion_interruptible(&supp->reqs_c))
263                         return -ERESTARTSYS;
264         }
265
266         if (num_meta) {
267                 /*
268                  * tee-supplicant support meta parameters -> requsts can be
269                  * processed asynchronously.
270                  */
271                 param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
272                               TEE_IOCTL_PARAM_ATTR_META;
273                 param->u.value.a = id;
274                 param->u.value.b = 0;
275                 param->u.value.c = 0;
276         } else {
277                 mutex_lock(&supp->mutex);
278                 supp->req_id = id;
279                 mutex_unlock(&supp->mutex);
280         }
281
282         *func = req->func;
283         *num_params = req->num_params + num_meta;
284         memcpy(param + num_meta, req->param,
285                sizeof(struct tee_param) * req->num_params);
286
287         return 0;
288 }
289
290 static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
291                                            size_t num_params,
292                                            struct tee_param *param,
293                                            size_t *num_meta)
294 {
295         struct optee_supp_req *req;
296         int id;
297         size_t nm;
298         const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
299                          TEE_IOCTL_PARAM_ATTR_META;
300
301         if (!num_params)
302                 return ERR_PTR(-EINVAL);
303
304         if (supp->req_id == -1) {
305                 if (param->attr != attr)
306                         return ERR_PTR(-EINVAL);
307                 id = param->u.value.a;
308                 nm = 1;
309         } else {
310                 id = supp->req_id;
311                 nm = 0;
312         }
313
314         req = idr_find(&supp->idr, id);
315         if (!req)
316                 return ERR_PTR(-ENOENT);
317
318         if ((num_params - nm) != req->num_params)
319                 return ERR_PTR(-EINVAL);
320
321         req->busy = false;
322         idr_remove(&supp->idr, id);
323         supp->req_id = -1;
324         *num_meta = nm;
325
326         return req;
327 }
328
329 /**
330  * optee_supp_send() - send result of request from supplicant
331  * @ctx:        context sending result
332  * @ret:        return value of request
333  * @num_params: number of parameters returned
334  * @param:      returned parameters
335  *
336  * Returns 0 on success or <0 on failure.
337  */
338 int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
339                     struct tee_param *param)
340 {
341         struct tee_device *teedev = ctx->teedev;
342         struct optee *optee = tee_get_drvdata(teedev);
343         struct optee_supp *supp = &optee->supp;
344         struct optee_supp_req *req;
345         size_t n;
346         size_t num_meta;
347
348         mutex_lock(&supp->mutex);
349         req = supp_pop_req(supp, num_params, param, &num_meta);
350         mutex_unlock(&supp->mutex);
351
352         if (IS_ERR(req)) {
353                 /* Something is wrong, let supplicant restart. */
354                 return PTR_ERR(req);
355         }
356
357         /* Update out and in/out parameters */
358         for (n = 0; n < req->num_params; n++) {
359                 struct tee_param *p = req->param + n;
360
361                 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
362                 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
363                 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
364                         p->u.value.a = param[n + num_meta].u.value.a;
365                         p->u.value.b = param[n + num_meta].u.value.b;
366                         p->u.value.c = param[n + num_meta].u.value.c;
367                         break;
368                 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
369                 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
370                         p->u.memref.size = param[n + num_meta].u.memref.size;
371                         break;
372                 default:
373                         break;
374                 }
375         }
376         req->ret = ret;
377
378         /* Let the requesting thread continue */
379         complete(&req->c);
380
381         return 0;
382 }
This page took 0.055821 seconds and 4 git commands to generate.