]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/intel_memory_region.c
Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
[linux.git] / drivers / gpu / drm / i915 / intel_memory_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8
9 /* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
10 #define REGION_MAP(type, inst) \
11         BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
12
13 const u32 intel_region_map[] = {
14         [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
15         [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
16         [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
17 };
18
19 static u64
20 intel_memory_region_free_pages(struct intel_memory_region *mem,
21                                struct list_head *blocks)
22 {
23         struct i915_buddy_block *block, *on;
24         u64 size = 0;
25
26         list_for_each_entry_safe(block, on, blocks, link) {
27                 size += i915_buddy_block_size(&mem->mm, block);
28                 i915_buddy_free(&mem->mm, block);
29         }
30         INIT_LIST_HEAD(blocks);
31
32         return size;
33 }
34
35 void
36 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
37                                       struct list_head *blocks)
38 {
39         mutex_lock(&mem->mm_lock);
40         intel_memory_region_free_pages(mem, blocks);
41         mutex_unlock(&mem->mm_lock);
42 }
43
44 void
45 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
46 {
47         struct list_head blocks;
48
49         INIT_LIST_HEAD(&blocks);
50         list_add(&block->link, &blocks);
51         __intel_memory_region_put_pages_buddy(block->private, &blocks);
52 }
53
54 int
55 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
56                                       resource_size_t size,
57                                       unsigned int flags,
58                                       struct list_head *blocks)
59 {
60         unsigned int min_order = 0;
61         unsigned long n_pages;
62
63         GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
64         GEM_BUG_ON(!list_empty(blocks));
65
66         if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
67                 min_order = ilog2(mem->min_page_size) -
68                             ilog2(mem->mm.chunk_size);
69         }
70
71         if (flags & I915_ALLOC_CONTIGUOUS) {
72                 size = roundup_pow_of_two(size);
73                 min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
74         }
75
76         n_pages = size >> ilog2(mem->mm.chunk_size);
77
78         mutex_lock(&mem->mm_lock);
79
80         do {
81                 struct i915_buddy_block *block;
82                 unsigned int order;
83
84                 order = fls(n_pages) - 1;
85                 GEM_BUG_ON(order > mem->mm.max_order);
86                 GEM_BUG_ON(order < min_order);
87
88                 do {
89                         block = i915_buddy_alloc(&mem->mm, order);
90                         if (!IS_ERR(block))
91                                 break;
92
93                         if (order-- == min_order)
94                                 goto err_free_blocks;
95                 } while (1);
96
97                 n_pages -= BIT(order);
98
99                 block->private = mem;
100                 list_add(&block->link, blocks);
101
102                 if (!n_pages)
103                         break;
104         } while (1);
105
106         mutex_unlock(&mem->mm_lock);
107         return 0;
108
109 err_free_blocks:
110         intel_memory_region_free_pages(mem, blocks);
111         mutex_unlock(&mem->mm_lock);
112         return -ENXIO;
113 }
114
115 struct i915_buddy_block *
116 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
117                                       resource_size_t size,
118                                       unsigned int flags)
119 {
120         struct i915_buddy_block *block;
121         LIST_HEAD(blocks);
122         int ret;
123
124         ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
125         if (ret)
126                 return ERR_PTR(ret);
127
128         block = list_first_entry(&blocks, typeof(*block), link);
129         list_del_init(&block->link);
130         return block;
131 }
132
133 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
134 {
135         return i915_buddy_init(&mem->mm, resource_size(&mem->region),
136                                PAGE_SIZE);
137 }
138
139 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
140 {
141         i915_buddy_fini(&mem->mm);
142 }
143
144 struct intel_memory_region *
145 intel_memory_region_create(struct drm_i915_private *i915,
146                            resource_size_t start,
147                            resource_size_t size,
148                            resource_size_t min_page_size,
149                            resource_size_t io_start,
150                            const struct intel_memory_region_ops *ops)
151 {
152         struct intel_memory_region *mem;
153         int err;
154
155         mem = kzalloc(sizeof(*mem), GFP_KERNEL);
156         if (!mem)
157                 return ERR_PTR(-ENOMEM);
158
159         mem->i915 = i915;
160         mem->region = (struct resource)DEFINE_RES_MEM(start, size);
161         mem->io_start = io_start;
162         mem->min_page_size = min_page_size;
163         mem->ops = ops;
164
165         mutex_init(&mem->objects.lock);
166         INIT_LIST_HEAD(&mem->objects.list);
167         INIT_LIST_HEAD(&mem->objects.purgeable);
168
169         mutex_init(&mem->mm_lock);
170
171         if (ops->init) {
172                 err = ops->init(mem);
173                 if (err)
174                         goto err_free;
175         }
176
177         kref_init(&mem->kref);
178         return mem;
179
180 err_free:
181         kfree(mem);
182         return ERR_PTR(err);
183 }
184
185 static void __intel_memory_region_destroy(struct kref *kref)
186 {
187         struct intel_memory_region *mem =
188                 container_of(kref, typeof(*mem), kref);
189
190         if (mem->ops->release)
191                 mem->ops->release(mem);
192
193         mutex_destroy(&mem->mm_lock);
194         mutex_destroy(&mem->objects.lock);
195         kfree(mem);
196 }
197
198 struct intel_memory_region *
199 intel_memory_region_get(struct intel_memory_region *mem)
200 {
201         kref_get(&mem->kref);
202         return mem;
203 }
204
205 void intel_memory_region_put(struct intel_memory_region *mem)
206 {
207         kref_put(&mem->kref, __intel_memory_region_destroy);
208 }
209
210 /* Global memory region registration -- only slight layer inversions! */
211
212 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
213 {
214         int err, i;
215
216         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
217                 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
218                 u32 type;
219
220                 if (!HAS_REGION(i915, BIT(i)))
221                         continue;
222
223                 type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
224                 switch (type) {
225                 case INTEL_MEMORY_SYSTEM:
226                         mem = i915_gem_shmem_setup(i915);
227                         break;
228                 case INTEL_MEMORY_STOLEN:
229                         mem = i915_gem_stolen_setup(i915);
230                         break;
231                 case INTEL_MEMORY_LOCAL:
232                         mem = intel_setup_fake_lmem(i915);
233                         break;
234                 }
235
236                 if (IS_ERR(mem)) {
237                         err = PTR_ERR(mem);
238                         DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
239                         goto out_cleanup;
240                 }
241
242                 mem->id = intel_region_map[i];
243                 mem->type = type;
244                 mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
245
246                 i915->mm.regions[i] = mem;
247         }
248
249         return 0;
250
251 out_cleanup:
252         intel_memory_regions_driver_release(i915);
253         return err;
254 }
255
256 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
257 {
258         int i;
259
260         for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
261                 struct intel_memory_region *region =
262                         fetch_and_zero(&i915->mm.regions[i]);
263
264                 if (region)
265                         intel_memory_region_put(region);
266         }
267 }
268
269 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
270 #include "selftests/intel_memory_region.c"
271 #include "selftests/mock_region.c"
272 #endif
This page took 0.049765 seconds and 4 git commands to generate.