2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
11 #include <drm/drm_cache.h>
13 #include "gt/intel_gt.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_object_frontbuffer.h"
17 #include "i915_gem_region.h"
18 #include "i915_gem_tiling.h"
19 #include "i915_scatterlist.h"
21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
23 struct address_space *mapping = obj->base.filp->f_mapping;
24 struct drm_i915_private *i915 = to_i915(obj->base.dev);
25 struct scatterlist *sg;
32 /* Contiguous chunk, with a single scatterlist element */
33 if (overflows_type(obj->base.size, sg->length))
36 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
40 * Always aligning to the object size, allows a single allocation
41 * to handle all possible callers, and given typical object sizes,
42 * the alignment of the buddy allocation will naturally match.
44 vaddr = dma_alloc_coherent(obj->base.dev->dev,
45 roundup_pow_of_two(obj->base.size),
50 st = kmalloc(sizeof(*st), GFP_KERNEL);
54 if (sg_alloc_table(st, 1, GFP_KERNEL))
59 sg->length = obj->base.size;
61 sg_assign_page(sg, (struct page *)vaddr);
62 sg_dma_address(sg) = dma;
63 sg_dma_len(sg) = obj->base.size;
66 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
70 page = shmem_read_mapping_page(mapping, i);
74 src = kmap_atomic(page);
75 memcpy(dst, src, PAGE_SIZE);
76 drm_clflush_virt_range(dst, PAGE_SIZE);
83 intel_gt_chipset_flush(to_gt(i915));
85 /* We're no longer struct page backed */
86 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
87 __i915_gem_object_set_pages(obj, st);
94 dma_free_coherent(obj->base.dev->dev,
95 roundup_pow_of_two(obj->base.size),
101 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
102 struct sg_table *pages)
104 dma_addr_t dma = sg_dma_address(pages->sgl);
105 void *vaddr = sg_page(pages->sgl);
107 __i915_gem_object_release_shmem(obj, pages, false);
110 struct address_space *mapping = obj->base.filp->f_mapping;
114 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
118 page = shmem_read_mapping_page(mapping, i);
122 dst = kmap_atomic(page);
123 drm_clflush_virt_range(src, PAGE_SIZE);
124 memcpy(dst, src, PAGE_SIZE);
127 set_page_dirty(page);
128 if (obj->mm.madv == I915_MADV_WILLNEED)
129 mark_page_accessed(page);
134 obj->mm.dirty = false;
137 sg_free_table(pages);
140 dma_free_coherent(obj->base.dev->dev,
141 roundup_pow_of_two(obj->base.size),
145 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
146 const struct drm_i915_gem_pwrite *args)
148 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
149 char __user *user_data = u64_to_user_ptr(args->data_ptr);
150 struct drm_i915_private *i915 = to_i915(obj->base.dev);
153 err = i915_gem_object_wait(obj,
154 I915_WAIT_INTERRUPTIBLE |
156 MAX_SCHEDULE_TIMEOUT);
161 * We manually control the domain here and pretend that it
162 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
164 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
166 if (copy_from_user(vaddr, user_data, args->size))
169 drm_clflush_virt_range(vaddr, args->size);
170 intel_gt_chipset_flush(to_gt(i915));
172 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
176 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
177 const struct drm_i915_gem_pread *args)
179 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
180 char __user *user_data = u64_to_user_ptr(args->data_ptr);
183 err = i915_gem_object_wait(obj,
184 I915_WAIT_INTERRUPTIBLE,
185 MAX_SCHEDULE_TIMEOUT);
189 drm_clflush_virt_range(vaddr, args->size);
190 if (copy_to_user(user_data, vaddr, args->size))
196 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
198 struct sg_table *pages;
201 pages = __i915_gem_object_unset_pages(obj);
203 err = i915_gem_object_get_pages_phys(obj);
207 /* Perma-pin (until release) the physical set of pages */
208 __i915_gem_object_pin_pages(obj);
210 if (!IS_ERR_OR_NULL(pages))
211 i915_gem_object_put_pages_shmem(obj, pages);
213 i915_gem_object_release_memory_region(obj);
217 if (!IS_ERR_OR_NULL(pages))
218 __i915_gem_object_set_pages(obj, pages);
222 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
226 assert_object_held(obj);
228 if (align > obj->base.size)
231 if (!i915_gem_object_is_shmem(obj))
234 if (!i915_gem_object_has_struct_page(obj))
237 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
241 if (obj->mm.madv != I915_MADV_WILLNEED)
244 if (i915_gem_object_has_tiling_quirk(obj))
247 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
250 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
251 drm_dbg(obj->base.dev,
252 "Attempting to obtain a purgeable object\n");
256 return i915_gem_object_shmem_to_phys(obj);
259 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
260 #include "selftests/i915_gem_phys.c"