1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, NVIDIA Corporation.
6 #include <linux/device.h>
7 #include <linux/kref.h>
9 #include <linux/of_platform.h>
10 #include <linux/pid.h>
11 #include <linux/slab.h>
16 static void host1x_memory_context_release(struct device *dev)
18 /* context device is freed in host1x_memory_context_list_free() */
21 int host1x_memory_context_list_init(struct host1x *host1x)
23 struct host1x_memory_context_list *cdl = &host1x->context_list;
24 struct device_node *node = host1x->dev->of_node;
25 struct host1x_memory_context *ctx;
31 mutex_init(&cdl->lock);
33 err = of_property_count_u32_elems(node, "iommu-map");
37 cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
42 for (i = 0; i < cdl->len; i++) {
47 device_initialize(&ctx->dev);
50 * Due to an issue with T194 NVENC, only 38 bits can be used.
51 * Anyway, 256GiB of IOVA ought to be enough for anyone.
53 ctx->dma_mask = DMA_BIT_MASK(38);
54 ctx->dev.dma_mask = &ctx->dma_mask;
55 ctx->dev.coherent_dma_mask = ctx->dma_mask;
56 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
57 ctx->dev.bus = &host1x_context_device_bus_type;
58 ctx->dev.parent = host1x->dev;
59 ctx->dev.release = host1x_memory_context_release;
61 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
63 err = device_add(&ctx->dev);
65 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
66 put_device(&ctx->dev);
70 err = of_dma_configure_id(&ctx->dev, node, true, &i);
72 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
74 device_unregister(&ctx->dev);
78 if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
79 !device_iommu_mapped(&ctx->dev)) {
80 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
81 device_unregister(&ctx->dev);
90 device_unregister(&cdl->devs[i].dev);
99 void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
103 for (i = 0; i < cdl->len; i++)
104 device_unregister(&cdl->devs[i].dev);
110 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
114 struct host1x_memory_context_list *cdl = &host1x->context_list;
115 struct host1x_memory_context *free = NULL;
119 return ERR_PTR(-EOPNOTSUPP);
121 mutex_lock(&cdl->lock);
123 for (i = 0; i < cdl->len; i++) {
124 struct host1x_memory_context *cd = &cdl->devs[i];
126 if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
129 if (cd->owner == pid) {
130 refcount_inc(&cd->ref);
131 mutex_unlock(&cdl->lock);
133 } else if (!cd->owner && !free) {
139 mutex_unlock(&cdl->lock);
140 return ERR_PTR(-EBUSY);
143 refcount_set(&free->ref, 1);
144 free->owner = get_pid(pid);
146 mutex_unlock(&cdl->lock);
150 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
152 void host1x_memory_context_get(struct host1x_memory_context *cd)
154 refcount_inc(&cd->ref);
156 EXPORT_SYMBOL_GPL(host1x_memory_context_get);
158 void host1x_memory_context_put(struct host1x_memory_context *cd)
160 struct host1x_memory_context_list *cdl = &cd->host->context_list;
162 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
165 mutex_unlock(&cdl->lock);
168 EXPORT_SYMBOL_GPL(host1x_memory_context_put);