1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Linaro Limited
5 #include <linux/device.h>
6 #include <linux/dma-buf.h>
7 #include <linux/genalloc.h>
8 #include <linux/slab.h>
9 #include <linux/tee_drv.h>
10 #include "tee_private.h"
12 static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
13 struct tee_shm *shm, size_t size)
16 struct gen_pool *genpool = poolm->private_data;
17 size_t s = roundup(size, 1 << genpool->min_alloc_order);
19 va = gen_pool_alloc(genpool, s);
23 memset((void *)va, 0, s);
24 shm->kaddr = (void *)va;
25 shm->paddr = gen_pool_virt_to_phys(genpool, va);
30 static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
33 gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
38 static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
40 gen_pool_destroy(poolm->private_data);
44 static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
45 .alloc = pool_op_gen_alloc,
46 .free = pool_op_gen_free,
47 .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
51 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
53 * @priv_info: Information for driver private shared memory pool
54 * @dmabuf_info: Information for dma-buf shared memory pool
56 * Start and end of pools will must be page aligned.
58 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
59 * in @dmabuf, others will use the range provided by @priv.
61 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
64 tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
65 struct tee_shm_pool_mem_info *dmabuf_info)
67 struct tee_shm_pool_mgr *priv_mgr;
68 struct tee_shm_pool_mgr *dmabuf_mgr;
72 * Create the pool for driver private shared memory
74 rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
76 3 /* 8 byte aligned */);
82 * Create the pool for dma_buf shared memory
84 rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
86 dmabuf_info->size, PAGE_SHIFT);
88 goto err_free_priv_mgr;
91 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
93 goto err_free_dmabuf_mgr;
98 tee_shm_pool_mgr_destroy(dmabuf_mgr);
100 tee_shm_pool_mgr_destroy(priv_mgr);
104 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
106 struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
111 const size_t page_mask = PAGE_SIZE - 1;
112 struct tee_shm_pool_mgr *mgr;
115 /* Start and end must be page aligned */
116 if (vaddr & page_mask || paddr & page_mask || size & page_mask)
117 return ERR_PTR(-EINVAL);
119 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
121 return ERR_PTR(-ENOMEM);
123 mgr->private_data = gen_pool_create(min_alloc_order, -1);
124 if (!mgr->private_data) {
129 gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
130 rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
132 gen_pool_destroy(mgr->private_data);
136 mgr->ops = &pool_ops_generic;
144 EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
146 static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
148 return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
149 mgr->ops->destroy_poolmgr;
152 struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
153 struct tee_shm_pool_mgr *dmabuf_mgr)
155 struct tee_shm_pool *pool;
157 if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
158 return ERR_PTR(-EINVAL);
160 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
162 return ERR_PTR(-ENOMEM);
164 pool->private_mgr = priv_mgr;
165 pool->dma_buf_mgr = dmabuf_mgr;
169 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
172 * tee_shm_pool_free() - Free a shared memory pool
173 * @pool: The shared memory pool to free
175 * There must be no remaining shared memory allocated from this pool when
176 * this function is called.
178 void tee_shm_pool_free(struct tee_shm_pool *pool)
180 if (pool->private_mgr)
181 tee_shm_pool_mgr_destroy(pool->private_mgr);
182 if (pool->dma_buf_mgr)
183 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
186 EXPORT_SYMBOL_GPL(tee_shm_pool_free);