1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
3 * Copyright 2020 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _XE_RES_CURSOR_H_
25 #define _XE_RES_CURSOR_H_
27 #include <linux/scatterlist.h>
29 #include <drm/drm_mm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 #include <drm/ttm/ttm_resource.h>
33 #include <drm/ttm/ttm_tt.h>
36 #include "xe_device.h"
37 #include "xe_macros.h"
38 #include "xe_ttm_vram_mgr.h"
40 /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
41 struct xe_res_cursor {
47 struct scatterlist *sgl;
51 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
53 struct ttm_resource_manager *mgr;
55 mgr = ttm_manager_type(res->bo->bdev, res->mem_type);
56 return &to_xe_ttm_vram_mgr(mgr)->mm;
60 * xe_res_first - initialize a xe_res_cursor
62 * @res: TTM resource object to walk
63 * @start: Start of the range
64 * @size: Size of the range
65 * @cur: cursor object to initialize
67 * Start walking over the range of allocations between @start and @size.
69 static inline void xe_res_first(struct ttm_resource *res,
71 struct xe_res_cursor *cur)
77 XE_WARN_ON(start + size > res->size);
79 cur->mem_type = res->mem_type;
81 switch (cur->mem_type) {
85 struct drm_buddy_block *block;
86 struct list_head *head, *next;
87 struct drm_buddy *mm = xe_res_get_buddy(res);
89 head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
91 block = list_first_entry_or_null(head,
92 struct drm_buddy_block,
97 while (start >= drm_buddy_block_size(mm, block)) {
98 start -= drm_buddy_block_size(mm, block);
100 next = block->link.next;
102 block = list_entry(next, struct drm_buddy_block,
107 cur->start = drm_buddy_block_offset(block) + start;
108 cur->size = min(drm_buddy_block_size(mm, block) - start,
110 cur->remaining = size;
123 cur->remaining = size;
125 cur->mem_type = XE_PL_TT;
126 XE_WARN_ON(res && start + size > res->size);
129 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
131 struct scatterlist *sgl = cur->sgl;
132 u64 start = cur->start;
134 while (start >= sg_dma_len(sgl)) {
135 start -= sg_dma_len(sgl);
141 cur->size = sg_dma_len(sgl) - start;
146 * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
148 * @sg: scatter gather table to walk
149 * @start: Start of the range
150 * @size: Size of the range
151 * @cur: cursor object to initialize
153 * Start walking over the range of allocations between @start and @size.
155 static inline void xe_res_first_sg(const struct sg_table *sg,
157 struct xe_res_cursor *cur)
160 XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
161 !IS_ALIGNED(size, PAGE_SIZE));
164 cur->remaining = size;
167 cur->mem_type = XE_PL_TT;
168 __xe_res_sg_next(cur);
172 * xe_res_next - advance the cursor
174 * @cur: the cursor to advance
175 * @size: number of bytes to move forward
177 * Move the cursor @size bytes forwrad, walking to the next node if necessary.
179 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
181 struct drm_buddy_block *block;
182 struct list_head *next;
185 XE_WARN_ON(size > cur->remaining);
187 cur->remaining -= size;
191 if (cur->size > size) {
199 __xe_res_sg_next(cur);
203 switch (cur->mem_type) {
207 start = size - cur->size;
210 next = block->link.next;
211 block = list_entry(next, struct drm_buddy_block, link);
214 while (start >= drm_buddy_block_size(cur->mm, block)) {
215 start -= drm_buddy_block_size(cur->mm, block);
217 next = block->link.next;
218 block = list_entry(next, struct drm_buddy_block, link);
221 cur->start = drm_buddy_block_offset(block) + start;
222 cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
232 * xe_res_dma - return dma address of cursor at current position
234 * @cur: the cursor to return the dma address from
236 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
238 return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;