]> Git Repo - linux.git/blob - drivers/firmware/qcom/qcom_tzmem.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / firmware / qcom / qcom_tzmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory allocator for buffers shared with the TrustZone.
4  *
5  * Copyright (C) 2023-2024 Linaro Ltd.
6  */
7
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22
23 #include "qcom_tzmem.h"
24
25 struct qcom_tzmem_area {
26         struct list_head list;
27         void *vaddr;
28         dma_addr_t paddr;
29         size_t size;
30         void *priv;
31 };
32
33 struct qcom_tzmem_pool {
34         struct gen_pool *genpool;
35         struct list_head areas;
36         enum qcom_tzmem_policy policy;
37         size_t increment;
38         size_t max_size;
39         spinlock_t lock;
40 };
41
42 struct qcom_tzmem_chunk {
43         phys_addr_t paddr;
44         size_t size;
45         struct qcom_tzmem_pool *owner;
46 };
47
48 static struct device *qcom_tzmem_dev;
49 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
50 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51
52 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53
54 static int qcom_tzmem_init(void)
55 {
56         return 0;
57 }
58
59 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60 {
61         return 0;
62 }
63
64 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
65 {
66
67 }
68
69 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70
71 #include <linux/firmware/qcom/qcom_scm.h>
72 #include <linux/of.h>
73
74 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75
76 static bool qcom_tzmem_using_shm_bridge;
77
78 /* List of machines that are known to not support SHM bridge correctly. */
79 static const char *const qcom_tzmem_blacklist[] = {
80         "qcom,sc8180x",
81         "qcom,sdm845", /* reset in rmtfs memory assignment */
82         "qcom,sm8150", /* reset in rmtfs memory assignment */
83         NULL
84 };
85
86 static int qcom_tzmem_init(void)
87 {
88         const char *const *platform;
89         int ret;
90
91         for (platform = qcom_tzmem_blacklist; *platform; platform++) {
92                 if (of_machine_is_compatible(*platform))
93                         goto notsupp;
94         }
95
96         ret = qcom_scm_shm_bridge_enable();
97         if (ret == -EOPNOTSUPP)
98                 goto notsupp;
99
100         if (!ret)
101                 qcom_tzmem_using_shm_bridge = true;
102
103         return ret;
104
105 notsupp:
106         dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
107         return 0;
108 }
109
110 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
111 {
112         u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
113         int ret;
114
115         if (!qcom_tzmem_using_shm_bridge)
116                 return 0;
117
118         pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
119         ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
120         size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
121
122         u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
123         if (!handle)
124                 return -ENOMEM;
125
126         ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
127                                          ipfn_and_s_perm, size_and_flags,
128                                          QCOM_SCM_VMID_HLOS, handle);
129         if (ret)
130                 return ret;
131
132         area->priv = no_free_ptr(handle);
133
134         return 0;
135 }
136
137 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
138 {
139         u64 *handle = area->priv;
140
141         if (!qcom_tzmem_using_shm_bridge)
142                 return;
143
144         qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
145         kfree(handle);
146 }
147
148 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
149
150 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
151                                       size_t size, gfp_t gfp)
152 {
153         int ret;
154
155         struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
156                                                              gfp);
157         if (!area)
158                 return -ENOMEM;
159
160         area->size = PAGE_ALIGN(size);
161
162         area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
163                                          &area->paddr, gfp);
164         if (!area->vaddr)
165                 return -ENOMEM;
166
167         ret = qcom_tzmem_init_area(area);
168         if (ret) {
169                 dma_free_coherent(qcom_tzmem_dev, area->size,
170                                   area->vaddr, area->paddr);
171                 return ret;
172         }
173
174         ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
175                                 (phys_addr_t)area->paddr, size, -1);
176         if (ret) {
177                 dma_free_coherent(qcom_tzmem_dev, area->size,
178                                   area->vaddr, area->paddr);
179                 return ret;
180         }
181
182         scoped_guard(spinlock_irqsave, &pool->lock)
183                 list_add_tail(&area->list, &pool->areas);
184
185         area = NULL;
186         return 0;
187 }
188
189 /**
190  * qcom_tzmem_pool_new() - Create a new TZ memory pool.
191  * @config: Pool configuration.
192  *
193  * Create a new pool of memory suitable for sharing with the TrustZone.
194  *
195  * Must not be used in atomic context.
196  *
197  * Return: New memory pool address or ERR_PTR() on error.
198  */
199 struct qcom_tzmem_pool *
200 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
201 {
202         int ret = -ENOMEM;
203
204         might_sleep();
205
206         switch (config->policy) {
207         case QCOM_TZMEM_POLICY_STATIC:
208                 if (!config->initial_size)
209                         return ERR_PTR(-EINVAL);
210                 break;
211         case QCOM_TZMEM_POLICY_MULTIPLIER:
212                 if (!config->increment)
213                         return ERR_PTR(-EINVAL);
214                 break;
215         case QCOM_TZMEM_POLICY_ON_DEMAND:
216                 break;
217         default:
218                 return ERR_PTR(-EINVAL);
219         }
220
221         struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
222                                                              GFP_KERNEL);
223         if (!pool)
224                 return ERR_PTR(-ENOMEM);
225
226         pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
227         if (!pool->genpool)
228                 return ERR_PTR(-ENOMEM);
229
230         gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
231
232         pool->policy = config->policy;
233         pool->increment = config->increment;
234         pool->max_size = config->max_size;
235         INIT_LIST_HEAD(&pool->areas);
236         spin_lock_init(&pool->lock);
237
238         if (config->initial_size) {
239                 ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
240                                                  GFP_KERNEL);
241                 if (ret) {
242                         gen_pool_destroy(pool->genpool);
243                         return ERR_PTR(ret);
244                 }
245         }
246
247         return_ptr(pool);
248 }
249 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
250
251 /**
252  * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
253  * @pool: Memory pool to free.
254  *
255  * Must not be called if any of the allocated chunks has not been freed.
256  * Must not be used in atomic context.
257  */
258 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
259 {
260         struct qcom_tzmem_area *area, *next;
261         struct qcom_tzmem_chunk *chunk;
262         struct radix_tree_iter iter;
263         bool non_empty = false;
264         void __rcu **slot;
265
266         might_sleep();
267
268         if (!pool)
269                 return;
270
271         scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
272                 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
273                         chunk = radix_tree_deref_slot_protected(slot,
274                                                 &qcom_tzmem_chunks_lock);
275
276                         if (chunk->owner == pool)
277                                 non_empty = true;
278                 }
279         }
280
281         WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
282
283         list_for_each_entry_safe(area, next, &pool->areas, list) {
284                 list_del(&area->list);
285                 qcom_tzmem_cleanup_area(area);
286                 dma_free_coherent(qcom_tzmem_dev, area->size,
287                                   area->vaddr, area->paddr);
288                 kfree(area);
289         }
290
291         gen_pool_destroy(pool->genpool);
292         kfree(pool);
293 }
294 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
295
296 static void devm_qcom_tzmem_pool_free(void *data)
297 {
298         struct qcom_tzmem_pool *pool = data;
299
300         qcom_tzmem_pool_free(pool);
301 }
302
303 /**
304  * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
305  * @dev: Device managing this resource.
306  * @config: Pool configuration.
307  *
308  * Must not be used in atomic context.
309  *
310  * Return: Address of the managed pool or ERR_PTR() on failure.
311  */
312 struct qcom_tzmem_pool *
313 devm_qcom_tzmem_pool_new(struct device *dev,
314                          const struct qcom_tzmem_pool_config *config)
315 {
316         struct qcom_tzmem_pool *pool;
317         int ret;
318
319         pool = qcom_tzmem_pool_new(config);
320         if (IS_ERR(pool))
321                 return pool;
322
323         ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
324         if (ret)
325                 return ERR_PTR(ret);
326
327         return pool;
328 }
329 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
330
331 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
332                                      size_t requested, gfp_t gfp)
333 {
334         size_t current_size = gen_pool_size(pool->genpool);
335
336         if (pool->max_size && (current_size + requested) > pool->max_size)
337                 return false;
338
339         switch (pool->policy) {
340         case QCOM_TZMEM_POLICY_STATIC:
341                 return false;
342         case QCOM_TZMEM_POLICY_MULTIPLIER:
343                 requested = current_size * pool->increment;
344                 break;
345         case QCOM_TZMEM_POLICY_ON_DEMAND:
346                 break;
347         }
348
349         return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
350 }
351
352 /**
353  * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
354  * @pool: TZ memory pool from which to allocate memory.
355  * @size: Number of bytes to allocate.
356  * @gfp: GFP flags.
357  *
358  * Can be used in any context.
359  *
360  * Return:
361  * Address of the allocated buffer or NULL if no more memory can be allocated.
362  * The buffer must be released using qcom_tzmem_free().
363  */
364 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
365 {
366         unsigned long vaddr;
367         int ret;
368
369         if (!size)
370                 return NULL;
371
372         size = PAGE_ALIGN(size);
373
374         struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
375                                                                gfp);
376         if (!chunk)
377                 return NULL;
378
379 again:
380         vaddr = gen_pool_alloc(pool->genpool, size);
381         if (!vaddr) {
382                 if (qcom_tzmem_try_grow_pool(pool, size, gfp))
383                         goto again;
384
385                 return NULL;
386         }
387
388         chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr);
389         chunk->size = size;
390         chunk->owner = pool;
391
392         scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
393                 ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
394                 if (ret) {
395                         gen_pool_free(pool->genpool, vaddr, size);
396                         return NULL;
397                 }
398
399                 chunk = NULL;
400         }
401
402         return (void *)vaddr;
403 }
404 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
405
406 /**
407  * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
408  * @vaddr: Virtual address of the buffer.
409  *
410  * Can be used in any context.
411  */
412 void qcom_tzmem_free(void *vaddr)
413 {
414         struct qcom_tzmem_chunk *chunk;
415
416         scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
417                 chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
418                                                (unsigned long)vaddr, NULL);
419
420         if (!chunk) {
421                 WARN(1, "Virtual address %p not owned by TZ memory allocator",
422                      vaddr);
423                 return;
424         }
425
426         scoped_guard(spinlock_irqsave, &chunk->owner->lock)
427                 gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
428                               chunk->size);
429         kfree(chunk);
430 }
431 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
432
433 /**
434  * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical.
435  * @vaddr: Virtual address of the buffer allocated from a TZ memory pool.
436  *
437  * Can be used in any context. The address must have been returned by a call
438  * to qcom_tzmem_alloc().
439  *
440  * Returns: Physical address of the buffer.
441  */
442 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
443 {
444         struct qcom_tzmem_chunk *chunk;
445
446         guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
447
448         chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr);
449         if (!chunk)
450                 return 0;
451
452         return chunk->paddr;
453 }
454 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
455
456 int qcom_tzmem_enable(struct device *dev)
457 {
458         if (qcom_tzmem_dev)
459                 return -EBUSY;
460
461         qcom_tzmem_dev = dev;
462
463         return qcom_tzmem_init();
464 }
465 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
466
467 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
468 MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
469 MODULE_LICENSE("GPL");
This page took 0.054185 seconds and 4 git commands to generate.