]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_memory_region.c
drm/amd/amdgpu: consolidate PSP TA context
[linux.git] / drivers / gpu / drm / i915 / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8
9 static const struct {
10         u16 class;
11         u16 instance;
12 } intel_region_map[] = {
13         [INTEL_REGION_SMEM] = {
14                 .class = INTEL_MEMORY_SYSTEM,
15                 .instance = 0,
16         },
17         [INTEL_REGION_LMEM] = {
18                 .class = INTEL_MEMORY_LOCAL,
19                 .instance = 0,
20         },
21         [INTEL_REGION_STOLEN_SMEM] = {
22                 .class = INTEL_MEMORY_STOLEN_SYSTEM,
23                 .instance = 0,
24         },
25         [INTEL_REGION_STOLEN_LMEM] = {
26                 .class = INTEL_MEMORY_STOLEN_LOCAL,
27                 .instance = 0,
28         },
29 };
30
31 struct intel_region_reserve {
32         struct list_head link;
33         struct ttm_resource *res;
34 };
35
36 struct intel_memory_region *
37 intel_memory_region_lookup(struct drm_i915_private *i915,
38                            u16 class, u16 instance)
39 {
40         struct intel_memory_region *mr;
41         int id;
42
43         /* XXX: consider maybe converting to an rb tree at some point */
44         for_each_memory_region(mr, i915, id) {
45                 if (mr->type == class && mr->instance == instance)
46                         return mr;
47         }
48
49         return NULL;
50 }
51
52 struct intel_memory_region *
53 intel_memory_region_by_type(struct drm_i915_private *i915,
54                             enum intel_memory_type mem_type)
55 {
56         struct intel_memory_region *mr;
57         int id;
58
59         for_each_memory_region(mr, i915, id)
60                 if (mr->type == mem_type)
61                         return mr;
62
63         return NULL;
64 }
65
66 /**
67  * intel_memory_region_unreserve - Unreserve all previously reserved
68  * ranges
69  * @mem: The region containing the reserved ranges.
70  */
71 void intel_memory_region_unreserve(struct intel_memory_region *mem)
72 {
73         struct intel_region_reserve *reserve, *next;
74
75         if (!mem->priv_ops || !mem->priv_ops->free)
76                 return;
77
78         mutex_lock(&mem->mm_lock);
79         list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
80                 list_del(&reserve->link);
81                 mem->priv_ops->free(mem, reserve->res);
82                 kfree(reserve);
83         }
84         mutex_unlock(&mem->mm_lock);
85 }
86
87 /**
88  * intel_memory_region_reserve - Reserve a memory range
89  * @mem: The region for which we want to reserve a range.
90  * @offset: Start of the range to reserve.
91  * @size: The size of the range to reserve.
92  *
93  * Return: 0 on success, negative error code on failure.
94  */
95 int intel_memory_region_reserve(struct intel_memory_region *mem,
96                                 resource_size_t offset,
97                                 resource_size_t size)
98 {
99         int ret;
100         struct intel_region_reserve *reserve;
101
102         if (!mem->priv_ops || !mem->priv_ops->reserve)
103                 return -EINVAL;
104
105         reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
106         if (!reserve)
107                 return -ENOMEM;
108
109         reserve->res = mem->priv_ops->reserve(mem, offset, size);
110         if (IS_ERR(reserve->res)) {
111                 ret = PTR_ERR(reserve->res);
112                 kfree(reserve);
113                 return ret;
114         }
115
116         mutex_lock(&mem->mm_lock);
117         list_add_tail(&reserve->link, &mem->reserved);
118         mutex_unlock(&mem->mm_lock);
119
120         return 0;
121 }
122
123 struct intel_memory_region *
124 intel_memory_region_create(struct drm_i915_private *i915,
125                            resource_size_t start,
126                            resource_size_t size,
127                            resource_size_t min_page_size,
128                            resource_size_t io_start,
129                            u16 type,
130                            u16 instance,
131                            const struct intel_memory_region_ops *ops)
132 {
133         struct intel_memory_region *mem;
134         int err;
135
136         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
137         if (!mem)
138                 return ERR_PTR(-ENOMEM);
139
140         mem->i915 = i915;
141         mem->region = (struct resource)DEFINE_RES_MEM(start, size);
142         mem->io_start = io_start;
143         mem->min_page_size = min_page_size;
144         mem->ops = ops;
145         mem->total = size;
146         mem->avail = mem->total;
147         mem->type = type;
148         mem->instance = instance;
149
150         mutex_init(&mem->objects.lock);
151         INIT_LIST_HEAD(&mem->objects.list);
152         INIT_LIST_HEAD(&mem->reserved);
153
154         mutex_init(&mem->mm_lock);
155
156         if (ops->init) {
157                 err = ops->init(mem);
158                 if (err)
159                         goto err_free;
160         }
161
162         kref_init(&mem->kref);
163         return mem;
164
165 err_free:
166         kfree(mem);
167         return ERR_PTR(err);
168 }
169
170 void intel_memory_region_set_name(struct intel_memory_region *mem,
171                                   const char *fmt, ...)
172 {
173         va_list ap;
174
175         va_start(ap, fmt);
176         vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
177         va_end(ap);
178 }
179
180 static void __intel_memory_region_destroy(struct kref *kref)
181 {
182         struct intel_memory_region *mem =
183                 container_of(kref, typeof(*mem), kref);
184
185         intel_memory_region_unreserve(mem);
186         if (mem->ops->release)
187                 mem->ops->release(mem);
188
189         mutex_destroy(&mem->mm_lock);
190         mutex_destroy(&mem->objects.lock);
191         kfree(mem);
192 }
193
194 struct intel_memory_region *
195 intel_memory_region_get(struct intel_memory_region *mem)
196 {
197         kref_get(&mem->kref);
198         return mem;
199 }
200
201 void intel_memory_region_put(struct intel_memory_region *mem)
202 {
203         kref_put(&mem->kref, __intel_memory_region_destroy);
204 }
205
206 /* Global memory region registration -- only slight layer inversions! */
207
208 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
209 {
210         int err, i;
211
212         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
213                 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
214                 u16 type, instance;
215
216                 if (!HAS_REGION(i915, BIT(i)))
217                         continue;
218
219                 type = intel_region_map[i].class;
220                 instance = intel_region_map[i].instance;
221                 switch (type) {
222                 case INTEL_MEMORY_SYSTEM:
223                         mem = i915_gem_shmem_setup(i915, type, instance);
224                         break;
225                 case INTEL_MEMORY_STOLEN_LOCAL:
226                         mem = i915_gem_stolen_lmem_setup(i915, type, instance);
227                         if (!IS_ERR(mem))
228                                 i915->mm.stolen_region = mem;
229                         break;
230                 case INTEL_MEMORY_STOLEN_SYSTEM:
231                         mem = i915_gem_stolen_smem_setup(i915, type, instance);
232                         if (!IS_ERR(mem))
233                                 i915->mm.stolen_region = mem;
234                         break;
235                 default:
236                         continue;
237                 }
238
239                 if (IS_ERR(mem)) {
240                         err = PTR_ERR(mem);
241                         drm_err(&i915->drm,
242                                 "Failed to setup region(%d) type=%d\n",
243                                 err, type);
244                         goto out_cleanup;
245                 }
246
247                 mem->id = i;
248                 i915->mm.regions[i] = mem;
249         }
250
251         return 0;
252
253 out_cleanup:
254         intel_memory_regions_driver_release(i915);
255         return err;
256 }
257
258 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
259 {
260         int i;
261
262         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
263                 struct intel_memory_region *region =
264                         fetch_and_zero(&i915->mm.regions[i]);
265
266                 if (region)
267                         intel_memory_region_put(region);
268         }
269 }
270
271 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
272 #include "selftests/intel_memory_region.c"
273 #include "selftests/mock_region.c"
274 #endif
This page took 0.049107 seconds and 4 git commands to generate.