2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include "i915_scatterlist.h"
8 #include "i915_ttm_buddy_manager.h"
10 #include <drm/drm_buddy.h>
11 #include <drm/drm_mm.h>
13 #include <linux/slab.h>
15 bool i915_sg_trim(struct sg_table *orig_st)
17 struct sg_table new_st;
18 struct scatterlist *sg, *new_sg;
21 if (orig_st->nents == orig_st->orig_nents)
24 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
28 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
29 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
30 sg_dma_address(new_sg) = sg_dma_address(sg);
31 sg_dma_len(new_sg) = sg_dma_len(sg);
33 new_sg = sg_next(new_sg);
35 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
37 sg_free_table(orig_st);
43 static void i915_refct_sgt_release(struct kref *ref)
45 struct i915_refct_sgt *rsgt =
46 container_of(ref, typeof(*rsgt), kref);
48 sg_free_table(&rsgt->table);
52 static const struct i915_refct_sgt_ops rsgt_ops = {
53 .release = i915_refct_sgt_release
57 * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
58 * @rsgt: The struct i915_refct_sgt to initialize.
59 * @size: The size of the underlying memory buffer.
61 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
63 __i915_refct_sgt_init(rsgt, size, &rsgt_ops);
67 * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
69 * @node: The drm_mm_node.
70 * @region_start: An offset to add to the dma addresses of the sg list.
71 * @page_alignment: Required page alignment for each sg entry. Power of two.
73 * Create a struct sg_table, initializing it from a struct drm_mm_node,
74 * taking a maximum segment length into account, splitting into segments
77 * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
78 * error code cast to an error pointer on failure.
80 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
84 const u32 max_segment = round_down(UINT_MAX, page_alignment);
85 const u32 segment_pages = max_segment >> PAGE_SHIFT;
86 u64 block_size, offset, prev_end;
87 struct i915_refct_sgt *rsgt;
89 struct scatterlist *sg;
91 GEM_BUG_ON(!max_segment);
93 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN);
95 return ERR_PTR(-ENOMEM);
97 i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
99 /* restricted by sg_alloc_table */
100 if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
102 i915_refct_sgt_put(rsgt);
103 return ERR_PTR(-E2BIG);
106 if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
107 GFP_KERNEL | __GFP_NOWARN)) {
108 i915_refct_sgt_put(rsgt);
109 return ERR_PTR(-ENOMEM);
114 prev_end = (resource_size_t)-1;
115 block_size = node->size << PAGE_SHIFT;
116 offset = node->start << PAGE_SHIFT;
121 if (offset != prev_end || sg->length >= max_segment) {
125 sg_dma_address(sg) = region_start + offset;
126 GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
133 len = min_t(u64, block_size, max_segment - sg->length);
135 sg_dma_len(sg) += len;
150 * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
151 * i915_buddy_block list
152 * @res: The struct i915_ttm_buddy_resource.
153 * @region_start: An offset to add to the dma addresses of the sg list.
154 * @page_alignment: Required page alignment for each sg entry. Power of two.
156 * Create a struct sg_table, initializing it from struct i915_buddy_block list,
157 * taking a maximum segment length into account, splitting into segments
160 * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
161 * error code cast to an error pointer on failure.
163 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
167 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
168 const u64 size = res->size;
169 const u32 max_segment = round_down(UINT_MAX, page_alignment);
170 struct drm_buddy *mm = bman_res->mm;
171 struct list_head *blocks = &bman_res->blocks;
172 struct drm_buddy_block *block;
173 struct i915_refct_sgt *rsgt;
174 struct scatterlist *sg;
176 resource_size_t prev_end;
178 GEM_BUG_ON(list_empty(blocks));
179 GEM_BUG_ON(!max_segment);
181 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL | __GFP_NOWARN);
183 return ERR_PTR(-ENOMEM);
185 i915_refct_sgt_init(rsgt, size);
187 /* restricted by sg_alloc_table */
188 if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
189 i915_refct_sgt_put(rsgt);
190 return ERR_PTR(-E2BIG);
193 if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) {
194 i915_refct_sgt_put(rsgt);
195 return ERR_PTR(-ENOMEM);
200 prev_end = (resource_size_t)-1;
202 list_for_each_entry(block, blocks, link) {
203 u64 block_size, offset;
205 block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
206 offset = drm_buddy_block_offset(block);
211 if (offset != prev_end || sg->length >= max_segment) {
215 sg_dma_address(sg) = region_start + offset;
216 GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
223 len = min_t(u64, block_size, max_segment - sg->length);
225 sg_dma_len(sg) += len;
240 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
241 #include "selftests/scatterlist.c"