2 * Copyright 2011 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
47 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
50 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 struct amdgpu_sa_manager *sa_manager,
52 unsigned size, u32 align, u32 domain)
56 init_waitqueue_head(&sa_manager->wq);
57 sa_manager->bo = NULL;
58 sa_manager->size = size;
59 sa_manager->domain = domain;
60 sa_manager->align = align;
61 sa_manager->hole = &sa_manager->olist;
62 INIT_LIST_HEAD(&sa_manager->olist);
63 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
64 INIT_LIST_HEAD(&sa_manager->flist[i]);
67 r = amdgpu_bo_create(adev, size, align, true, domain,
68 0, NULL, NULL, &sa_manager->bo);
70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
77 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78 struct amdgpu_sa_manager *sa_manager)
80 struct amdgpu_sa_bo *sa_bo, *tmp;
82 if (!list_empty(&sa_manager->olist)) {
83 sa_manager->hole = &sa_manager->olist,
84 amdgpu_sa_bo_try_free(sa_manager);
85 if (!list_empty(&sa_manager->olist)) {
86 dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
89 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90 amdgpu_sa_bo_remove_locked(sa_bo);
92 amdgpu_bo_unref(&sa_manager->bo);
96 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
97 struct amdgpu_sa_manager *sa_manager)
101 if (sa_manager->bo == NULL) {
102 dev_err(adev->dev, "no bo for sa manager\n");
107 r = amdgpu_bo_reserve(sa_manager->bo, false);
109 dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
112 r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
114 amdgpu_bo_unreserve(sa_manager->bo);
115 dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
118 r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119 amdgpu_bo_unreserve(sa_manager->bo);
123 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
124 struct amdgpu_sa_manager *sa_manager)
128 if (sa_manager->bo == NULL) {
129 dev_err(adev->dev, "no bo for sa manager\n");
133 r = amdgpu_bo_reserve(sa_manager->bo, false);
135 amdgpu_bo_kunmap(sa_manager->bo);
136 amdgpu_bo_unpin(sa_manager->bo);
137 amdgpu_bo_unreserve(sa_manager->bo);
142 static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
144 struct amdgpu_fence *a_fence;
145 struct amd_sched_fence *s_fence;
147 s_fence = to_amd_sched_fence(f);
149 struct amdgpu_ring *ring;
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
155 a_fence = to_amdgpu_fence(f);
157 return a_fence->ring->idx;
161 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
163 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
164 if (sa_manager->hole == &sa_bo->olist) {
165 sa_manager->hole = sa_bo->olist.prev;
167 list_del_init(&sa_bo->olist);
168 list_del_init(&sa_bo->flist);
169 fence_put(sa_bo->fence);
173 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
175 struct amdgpu_sa_bo *sa_bo, *tmp;
177 if (sa_manager->hole->next == &sa_manager->olist)
180 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
181 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
182 if (sa_bo->fence == NULL ||
183 !fence_is_signaled(sa_bo->fence)) {
186 amdgpu_sa_bo_remove_locked(sa_bo);
190 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
192 struct list_head *hole = sa_manager->hole;
194 if (hole != &sa_manager->olist) {
195 return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
200 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
202 struct list_head *hole = sa_manager->hole;
204 if (hole->next != &sa_manager->olist) {
205 return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
207 return sa_manager->size;
210 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
211 struct amdgpu_sa_bo *sa_bo,
212 unsigned size, unsigned align)
214 unsigned soffset, eoffset, wasted;
216 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
217 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
218 wasted = (align - (soffset % align)) % align;
220 if ((eoffset - soffset) >= (size + wasted)) {
223 sa_bo->manager = sa_manager;
224 sa_bo->soffset = soffset;
225 sa_bo->eoffset = soffset + size;
226 list_add(&sa_bo->olist, sa_manager->hole);
227 INIT_LIST_HEAD(&sa_bo->flist);
228 sa_manager->hole = &sa_bo->olist;
235 * amdgpu_sa_event - Check if we can stop waiting
237 * @sa_manager: pointer to the sa_manager
238 * @size: number of bytes we want to allocate
239 * @align: alignment we need to match
241 * Check if either there is a fence we can wait for or
242 * enough free memory to satisfy the allocation directly
244 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
245 unsigned size, unsigned align)
247 unsigned soffset, eoffset, wasted;
250 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
251 if (!list_empty(&sa_manager->flist[i])) {
256 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
257 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
258 wasted = (align - (soffset % align)) % align;
260 if ((eoffset - soffset) >= (size + wasted)) {
267 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
268 struct fence **fences,
271 struct amdgpu_sa_bo *best_bo = NULL;
272 unsigned i, soffset, best, tmp;
274 /* if hole points to the end of the buffer */
275 if (sa_manager->hole->next == &sa_manager->olist) {
276 /* try again with its beginning */
277 sa_manager->hole = &sa_manager->olist;
281 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
282 /* to handle wrap around we add sa_manager->size */
283 best = sa_manager->size * 2;
284 /* go over all fence list and try to find the closest sa_bo
285 * of the current last
287 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
288 struct amdgpu_sa_bo *sa_bo;
290 if (list_empty(&sa_manager->flist[i])) {
294 sa_bo = list_first_entry(&sa_manager->flist[i],
295 struct amdgpu_sa_bo, flist);
297 if (!fence_is_signaled(sa_bo->fence)) {
298 fences[i] = sa_bo->fence;
302 /* limit the number of tries each ring gets */
307 tmp = sa_bo->soffset;
309 /* wrap around, pretend it's after */
310 tmp += sa_manager->size;
314 /* this sa bo is the closest one */
321 uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
323 sa_manager->hole = best_bo->olist.prev;
325 /* we knew that this one is signaled,
326 so it's save to remote it */
327 amdgpu_sa_bo_remove_locked(best_bo);
333 int amdgpu_sa_bo_new(struct amdgpu_device *adev,
334 struct amdgpu_sa_manager *sa_manager,
335 struct amdgpu_sa_bo **sa_bo,
336 unsigned size, unsigned align)
338 struct fence *fences[AMDGPU_MAX_RINGS];
339 unsigned tries[AMDGPU_MAX_RINGS];
343 BUG_ON(align > sa_manager->align);
344 BUG_ON(size > sa_manager->size);
346 *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
347 if ((*sa_bo) == NULL) {
350 (*sa_bo)->manager = sa_manager;
351 (*sa_bo)->fence = NULL;
352 INIT_LIST_HEAD(&(*sa_bo)->olist);
353 INIT_LIST_HEAD(&(*sa_bo)->flist);
355 spin_lock(&sa_manager->wq.lock);
357 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
363 amdgpu_sa_bo_try_free(sa_manager);
365 if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
367 spin_unlock(&sa_manager->wq.lock);
371 /* see if we can skip over some allocations */
372 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
374 spin_unlock(&sa_manager->wq.lock);
375 t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
376 false, MAX_SCHEDULE_TIMEOUT);
378 spin_lock(&sa_manager->wq.lock);
379 /* if we have nothing to wait for block */
381 r = wait_event_interruptible_locked(
383 amdgpu_sa_event(sa_manager, size, align)
389 spin_unlock(&sa_manager->wq.lock);
395 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
398 struct amdgpu_sa_manager *sa_manager;
400 if (sa_bo == NULL || *sa_bo == NULL) {
404 sa_manager = (*sa_bo)->manager;
405 spin_lock(&sa_manager->wq.lock);
406 if (fence && !fence_is_signaled(fence)) {
408 (*sa_bo)->fence = fence_get(fence);
409 idx = amdgpu_sa_get_ring_from_fence(fence);
410 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
412 amdgpu_sa_bo_remove_locked(*sa_bo);
414 wake_up_all_locked(&sa_manager->wq);
415 spin_unlock(&sa_manager->wq.lock);
419 #if defined(CONFIG_DEBUG_FS)
421 static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
431 struct amdgpu_ring *ring;
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
440 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
443 struct amdgpu_sa_bo *i;
445 spin_lock(&sa_manager->wq.lock);
446 list_for_each_entry(i, &sa_manager->olist, olist) {
447 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
448 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
449 if (&i->olist == sa_manager->hole) {
454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
455 soffset, eoffset, eoffset - soffset);
457 amdgpu_sa_bo_dump_fence(i->fence, m);
460 spin_unlock(&sa_manager->wq.lock);