1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory allocator for buffers shared with the TrustZone.
5 * Copyright (C) 2023-2024 Linaro Ltd.
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
23 #include "qcom_tzmem.h"
25 struct qcom_tzmem_area {
26 struct list_head list;
33 struct qcom_tzmem_pool {
34 struct gen_pool *genpool;
35 struct list_head areas;
36 enum qcom_tzmem_policy policy;
42 struct qcom_tzmem_chunk {
44 struct qcom_tzmem_pool *owner;
47 static struct device *qcom_tzmem_dev;
48 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
49 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53 static int qcom_tzmem_init(void)
58 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
63 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
68 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70 #include <linux/firmware/qcom/qcom_scm.h>
73 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75 static bool qcom_tzmem_using_shm_bridge;
77 /* List of machines that are known to not support SHM bridge correctly. */
78 static const char *const qcom_tzmem_blacklist[] = {
80 "qcom,sdm670", /* failure in GPU firmware loading */
81 "qcom,sdm845", /* reset in rmtfs memory assignment */
82 "qcom,sm8150", /* reset in rmtfs memory assignment */
86 static int qcom_tzmem_init(void)
88 const char *const *platform;
91 for (platform = qcom_tzmem_blacklist; *platform; platform++) {
92 if (of_machine_is_compatible(*platform))
96 ret = qcom_scm_shm_bridge_enable();
97 if (ret == -EOPNOTSUPP)
101 qcom_tzmem_using_shm_bridge = true;
106 dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
110 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
112 u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
115 if (!qcom_tzmem_using_shm_bridge)
118 pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
119 ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
120 size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
122 u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
126 ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
127 ipfn_and_s_perm, size_and_flags,
128 QCOM_SCM_VMID_HLOS, handle);
132 area->priv = no_free_ptr(handle);
137 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
139 u64 *handle = area->priv;
141 if (!qcom_tzmem_using_shm_bridge)
144 qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
148 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
150 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
151 size_t size, gfp_t gfp)
155 struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
160 area->size = PAGE_ALIGN(size);
162 area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
167 ret = qcom_tzmem_init_area(area);
169 dma_free_coherent(qcom_tzmem_dev, area->size,
170 area->vaddr, area->paddr);
174 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
175 (phys_addr_t)area->paddr, size, -1);
177 dma_free_coherent(qcom_tzmem_dev, area->size,
178 area->vaddr, area->paddr);
182 scoped_guard(spinlock_irqsave, &pool->lock)
183 list_add_tail(&area->list, &pool->areas);
190 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
191 * @config: Pool configuration.
193 * Create a new pool of memory suitable for sharing with the TrustZone.
195 * Must not be used in atomic context.
197 * Return: New memory pool address or ERR_PTR() on error.
199 struct qcom_tzmem_pool *
200 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
206 switch (config->policy) {
207 case QCOM_TZMEM_POLICY_STATIC:
208 if (!config->initial_size)
209 return ERR_PTR(-EINVAL);
211 case QCOM_TZMEM_POLICY_MULTIPLIER:
212 if (!config->increment)
213 return ERR_PTR(-EINVAL);
215 case QCOM_TZMEM_POLICY_ON_DEMAND:
218 return ERR_PTR(-EINVAL);
221 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
224 return ERR_PTR(-ENOMEM);
226 pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
228 return ERR_PTR(-ENOMEM);
230 gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
232 pool->policy = config->policy;
233 pool->increment = config->increment;
234 pool->max_size = config->max_size;
235 INIT_LIST_HEAD(&pool->areas);
236 spin_lock_init(&pool->lock);
238 if (config->initial_size) {
239 ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
242 gen_pool_destroy(pool->genpool);
249 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
252 * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
253 * @pool: Memory pool to free.
255 * Must not be called if any of the allocated chunks has not been freed.
256 * Must not be used in atomic context.
258 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
260 struct qcom_tzmem_area *area, *next;
261 struct qcom_tzmem_chunk *chunk;
262 struct radix_tree_iter iter;
263 bool non_empty = false;
271 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
272 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
273 chunk = radix_tree_deref_slot_protected(slot,
274 &qcom_tzmem_chunks_lock);
276 if (chunk->owner == pool)
281 WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
283 list_for_each_entry_safe(area, next, &pool->areas, list) {
284 list_del(&area->list);
285 qcom_tzmem_cleanup_area(area);
286 dma_free_coherent(qcom_tzmem_dev, area->size,
287 area->vaddr, area->paddr);
291 gen_pool_destroy(pool->genpool);
294 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
296 static void devm_qcom_tzmem_pool_free(void *data)
298 struct qcom_tzmem_pool *pool = data;
300 qcom_tzmem_pool_free(pool);
304 * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
305 * @dev: Device managing this resource.
306 * @config: Pool configuration.
308 * Must not be used in atomic context.
310 * Return: Address of the managed pool or ERR_PTR() on failure.
312 struct qcom_tzmem_pool *
313 devm_qcom_tzmem_pool_new(struct device *dev,
314 const struct qcom_tzmem_pool_config *config)
316 struct qcom_tzmem_pool *pool;
319 pool = qcom_tzmem_pool_new(config);
323 ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
329 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
331 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
332 size_t requested, gfp_t gfp)
334 size_t current_size = gen_pool_size(pool->genpool);
336 if (pool->max_size && (current_size + requested) > pool->max_size)
339 switch (pool->policy) {
340 case QCOM_TZMEM_POLICY_STATIC:
342 case QCOM_TZMEM_POLICY_MULTIPLIER:
343 requested = current_size * pool->increment;
345 case QCOM_TZMEM_POLICY_ON_DEMAND:
349 return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
353 * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
354 * @pool: TZ memory pool from which to allocate memory.
355 * @size: Number of bytes to allocate.
358 * Can be used in any context.
361 * Address of the allocated buffer or NULL if no more memory can be allocated.
362 * The buffer must be released using qcom_tzmem_free().
364 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
372 size = PAGE_ALIGN(size);
374 struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
380 vaddr = gen_pool_alloc(pool->genpool, size);
382 if (qcom_tzmem_try_grow_pool(pool, size, gfp))
391 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
392 ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
394 gen_pool_free(pool->genpool, vaddr, size);
401 return (void *)vaddr;
403 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
406 * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
407 * @vaddr: Virtual address of the buffer.
409 * Can be used in any context.
411 void qcom_tzmem_free(void *vaddr)
413 struct qcom_tzmem_chunk *chunk;
415 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
416 chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
417 (unsigned long)vaddr, NULL);
420 WARN(1, "Virtual address %p not owned by TZ memory allocator",
425 scoped_guard(spinlock_irqsave, &chunk->owner->lock)
426 gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
430 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
433 * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
434 * @vaddr: Virtual address of memory allocated from a TZ memory pool.
436 * Can be used in any context. The address must point to memory allocated
437 * using qcom_tzmem_alloc().
440 * Physical address mapped from the virtual or 0 if the mapping failed.
442 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
444 struct qcom_tzmem_chunk *chunk;
445 struct radix_tree_iter iter;
449 guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
451 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
452 chunk = radix_tree_deref_slot_protected(slot,
453 &qcom_tzmem_chunks_lock);
455 ret = gen_pool_virt_to_phys(chunk->owner->genpool,
456 (unsigned long)vaddr);
465 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
467 int qcom_tzmem_enable(struct device *dev)
472 qcom_tzmem_dev = dev;
474 return qcom_tzmem_init();
476 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
478 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
480 MODULE_LICENSE("GPL");