]> Git Repo - linux.git/blob - drivers/gpu/host1x/context.c
drm/amd/amdgpu/amdgpu_device: Provide missing kerneldoc entry for 'reset_context'
[linux.git] / drivers / gpu / host1x / context.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, NVIDIA Corporation.
4  */
5
6 #include <linux/device.h>
7 #include <linux/kref.h>
8 #include <linux/of.h>
9 #include <linux/of_platform.h>
10 #include <linux/pid.h>
11 #include <linux/slab.h>
12
13 #include "context.h"
14 #include "dev.h"
15
16 int host1x_memory_context_list_init(struct host1x *host1x)
17 {
18         struct host1x_memory_context_list *cdl = &host1x->context_list;
19         struct device_node *node = host1x->dev->of_node;
20         struct host1x_memory_context *ctx;
21         unsigned int i;
22         int err;
23
24         cdl->devs = NULL;
25         cdl->len = 0;
26         mutex_init(&cdl->lock);
27
28         err = of_property_count_u32_elems(node, "iommu-map");
29         if (err < 0)
30                 return 0;
31
32         cdl->devs = kcalloc(err, sizeof(*cdl->devs), GFP_KERNEL);
33         if (!cdl->devs)
34                 return -ENOMEM;
35         cdl->len = err / 4;
36
37         for (i = 0; i < cdl->len; i++) {
38                 ctx = &cdl->devs[i];
39
40                 ctx->host = host1x;
41
42                 device_initialize(&ctx->dev);
43
44                 /*
45                  * Due to an issue with T194 NVENC, only 38 bits can be used.
46                  * Anyway, 256GiB of IOVA ought to be enough for anyone.
47                  */
48                 ctx->dma_mask = DMA_BIT_MASK(38);
49                 ctx->dev.dma_mask = &ctx->dma_mask;
50                 ctx->dev.coherent_dma_mask = ctx->dma_mask;
51                 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
52                 ctx->dev.bus = &host1x_context_device_bus_type;
53                 ctx->dev.parent = host1x->dev;
54
55                 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
56
57                 err = device_add(&ctx->dev);
58                 if (err) {
59                         dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
60                         goto del_devices;
61                 }
62
63                 err = of_dma_configure_id(&ctx->dev, node, true, &i);
64                 if (err) {
65                         dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
66                                 i, err);
67                         device_del(&ctx->dev);
68                         goto del_devices;
69                 }
70
71                 if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
72                     !device_iommu_mapped(&ctx->dev)) {
73                         dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
74                         device_del(&ctx->dev);
75                         goto del_devices;
76                 }
77         }
78
79         return 0;
80
81 del_devices:
82         while (i--)
83                 device_del(&cdl->devs[i].dev);
84
85         kfree(cdl->devs);
86         cdl->len = 0;
87
88         return err;
89 }
90
91 void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
92 {
93         unsigned int i;
94
95         for (i = 0; i < cdl->len; i++)
96                 device_del(&cdl->devs[i].dev);
97
98         kfree(cdl->devs);
99         cdl->len = 0;
100 }
101
102 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
103                                                           struct device *dev,
104                                                           struct pid *pid)
105 {
106         struct host1x_memory_context_list *cdl = &host1x->context_list;
107         struct host1x_memory_context *free = NULL;
108         int i;
109
110         if (!cdl->len)
111                 return ERR_PTR(-EOPNOTSUPP);
112
113         mutex_lock(&cdl->lock);
114
115         for (i = 0; i < cdl->len; i++) {
116                 struct host1x_memory_context *cd = &cdl->devs[i];
117
118                 if (cd->dev.iommu->iommu_dev != dev->iommu->iommu_dev)
119                         continue;
120
121                 if (cd->owner == pid) {
122                         refcount_inc(&cd->ref);
123                         mutex_unlock(&cdl->lock);
124                         return cd;
125                 } else if (!cd->owner && !free) {
126                         free = cd;
127                 }
128         }
129
130         if (!free) {
131                 mutex_unlock(&cdl->lock);
132                 return ERR_PTR(-EBUSY);
133         }
134
135         refcount_set(&free->ref, 1);
136         free->owner = get_pid(pid);
137
138         mutex_unlock(&cdl->lock);
139
140         return free;
141 }
142 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
143
144 void host1x_memory_context_get(struct host1x_memory_context *cd)
145 {
146         refcount_inc(&cd->ref);
147 }
148 EXPORT_SYMBOL_GPL(host1x_memory_context_get);
149
150 void host1x_memory_context_put(struct host1x_memory_context *cd)
151 {
152         struct host1x_memory_context_list *cdl = &cd->host->context_list;
153
154         if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
155                 put_pid(cd->owner);
156                 cd->owner = NULL;
157                 mutex_unlock(&cdl->lock);
158         }
159 }
160 EXPORT_SYMBOL_GPL(host1x_memory_context_put);
This page took 0.042659 seconds and 4 git commands to generate.