1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 NVIDIA Corporation */
4 #include <linux/host1x.h>
5 #include <linux/iommu.h>
6 #include <linux/list.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_utils.h>
15 static void tegra_drm_mapping_release(struct kref *ref)
17 struct tegra_drm_mapping *mapping =
18 container_of(ref, struct tegra_drm_mapping, ref);
20 host1x_bo_unpin(mapping->map);
21 host1x_bo_put(mapping->bo);
26 void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
28 kref_put(&mapping->ref, tegra_drm_mapping_release);
31 static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
33 struct tegra_drm_mapping *mapping;
36 if (context->memory_context)
37 host1x_memory_context_put(context->memory_context);
39 xa_for_each(&context->mappings, id, mapping)
40 tegra_drm_mapping_put(mapping);
42 xa_destroy(&context->mappings);
44 host1x_channel_put(context->channel);
49 void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
51 struct tegra_drm_context *context;
52 struct host1x_syncpt *sp;
55 xa_for_each(&file->contexts, id, context)
56 tegra_drm_channel_context_close(context);
58 xa_for_each(&file->syncpoints, id, sp)
59 host1x_syncpt_put(sp);
61 xa_destroy(&file->contexts);
62 xa_destroy(&file->syncpoints);
65 static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
67 struct tegra_drm_client *client;
69 list_for_each_entry(client, &tegra->clients, list)
70 if (client->base.class == class)
76 int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
78 struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
79 struct tegra_drm_file *fpriv = file->driver_priv;
80 struct tegra_drm *tegra = drm->dev_private;
81 struct drm_tegra_channel_open *args = data;
82 struct tegra_drm_client *client = NULL;
83 struct tegra_drm_context *context;
89 context = kzalloc(sizeof(*context), GFP_KERNEL);
93 client = tegra_drm_find_client(tegra, args->host1x_class);
99 if (client->shared_channel) {
100 context->channel = host1x_channel_get(client->shared_channel);
102 context->channel = host1x_channel_request(&client->base);
103 if (!context->channel) {
109 /* Only allocate context if the engine supports context isolation. */
110 if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
113 err = client->ops->can_use_memory_ctx(client, &supported);
118 context->memory_context = host1x_memory_context_alloc(
119 host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
121 if (IS_ERR(context->memory_context)) {
122 if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
123 err = PTR_ERR(context->memory_context);
127 * OK, HW does not support contexts or contexts
130 context->memory_context = NULL;
135 err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
140 context->client = client;
141 xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
143 args->version = client->version;
144 args->capabilities = 0;
146 if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
147 args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
152 if (context->memory_context)
153 host1x_memory_context_put(context->memory_context);
155 host1x_channel_put(context->channel);
162 int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
164 struct tegra_drm_file *fpriv = file->driver_priv;
165 struct drm_tegra_channel_close *args = data;
166 struct tegra_drm_context *context;
168 mutex_lock(&fpriv->lock);
170 context = xa_load(&fpriv->contexts, args->context);
172 mutex_unlock(&fpriv->lock);
176 xa_erase(&fpriv->contexts, args->context);
178 mutex_unlock(&fpriv->lock);
180 tegra_drm_channel_context_close(context);
185 int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
187 struct tegra_drm_file *fpriv = file->driver_priv;
188 struct drm_tegra_channel_map *args = data;
189 struct tegra_drm_mapping *mapping;
190 struct tegra_drm_context *context;
191 enum dma_data_direction direction;
192 struct device *mapping_dev;
195 if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
198 mutex_lock(&fpriv->lock);
200 context = xa_load(&fpriv->contexts, args->context);
202 mutex_unlock(&fpriv->lock);
206 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
212 kref_init(&mapping->ref);
214 if (context->memory_context)
215 mapping_dev = &context->memory_context->dev;
217 mapping_dev = context->client->base.dev;
219 mapping->bo = tegra_gem_lookup(file, args->handle);
225 switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
226 case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
227 direction = DMA_BIDIRECTIONAL;
230 case DRM_TEGRA_CHANNEL_MAP_WRITE:
231 direction = DMA_FROM_DEVICE;
234 case DRM_TEGRA_CHANNEL_MAP_READ:
235 direction = DMA_TO_DEVICE;
243 mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
244 if (IS_ERR(mapping->map)) {
245 err = PTR_ERR(mapping->map);
249 mapping->iova = mapping->map->phys;
250 mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
252 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
257 mutex_unlock(&fpriv->lock);
262 host1x_bo_unpin(mapping->map);
264 host1x_bo_put(mapping->bo);
268 mutex_unlock(&fpriv->lock);
272 int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
274 struct tegra_drm_file *fpriv = file->driver_priv;
275 struct drm_tegra_channel_unmap *args = data;
276 struct tegra_drm_mapping *mapping;
277 struct tegra_drm_context *context;
279 mutex_lock(&fpriv->lock);
281 context = xa_load(&fpriv->contexts, args->context);
283 mutex_unlock(&fpriv->lock);
287 mapping = xa_erase(&context->mappings, args->mapping);
289 mutex_unlock(&fpriv->lock);
294 tegra_drm_mapping_put(mapping);
298 int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
300 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
301 struct tegra_drm_file *fpriv = file->driver_priv;
302 struct drm_tegra_syncpoint_allocate *args = data;
303 struct host1x_syncpt *sp;
309 sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
313 args->id = host1x_syncpt_id(sp);
315 err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
317 host1x_syncpt_put(sp);
324 int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
326 struct tegra_drm_file *fpriv = file->driver_priv;
327 struct drm_tegra_syncpoint_allocate *args = data;
328 struct host1x_syncpt *sp;
330 mutex_lock(&fpriv->lock);
331 sp = xa_erase(&fpriv->syncpoints, args->id);
332 mutex_unlock(&fpriv->lock);
337 host1x_syncpt_put(sp);
342 int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
344 struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
345 struct drm_tegra_syncpoint_wait *args = data;
346 signed long timeout_jiffies;
347 struct host1x_syncpt *sp;
349 if (args->padding != 0)
352 sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
356 timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
358 return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);