2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include <drm/drm_cache.h>
9 #include "i915_config.h"
11 #include "i915_gem_clflush.h"
12 #include "i915_gem_object_frontbuffer.h"
13 #include "i915_sw_fence_work.h"
14 #include "i915_trace.h"
17 struct dma_fence_work base;
18 struct drm_i915_gem_object *obj;
21 static void __do_clflush(struct drm_i915_gem_object *obj)
23 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
24 drm_clflush_sg(obj->mm.pages);
26 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
29 static void clflush_work(struct dma_fence_work *base)
31 struct clflush *clflush = container_of(base, typeof(*clflush), base);
33 __do_clflush(clflush->obj);
36 static void clflush_release(struct dma_fence_work *base)
38 struct clflush *clflush = container_of(base, typeof(*clflush), base);
40 i915_gem_object_unpin_pages(clflush->obj);
41 i915_gem_object_put(clflush->obj);
44 static const struct dma_fence_work_ops clflush_ops = {
47 .release = clflush_release,
50 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
52 struct clflush *clflush;
54 GEM_BUG_ON(!obj->cache_dirty);
56 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
60 if (__i915_gem_object_get_pages(obj) < 0) {
65 dma_fence_work_init(&clflush->base, &clflush_ops);
66 clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
71 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
74 struct drm_i915_private *i915 = to_i915(obj->base.dev);
75 struct clflush *clflush;
77 assert_object_held(obj);
80 WARN_ON_ONCE(obj->cache_dirty);
85 * Stolen memory is always coherent with the GPU as it is explicitly
86 * marked as wc by the system, or the system is cache-coherent.
87 * Similarly, we only access struct pages through the CPU cache, so
88 * anything not backed by physical memory we consider to be always
89 * coherent and not need clflushing.
91 if (!i915_gem_object_has_struct_page(obj)) {
92 obj->cache_dirty = false;
96 /* If the GPU is snooping the contents of the CPU cache,
97 * we do not need to manually clear the CPU cache lines. However,
98 * the caches are only snooped when the render cache is
99 * flushed/invalidated. As we always have to emit invalidations
100 * and flushes when moving into and out of the RENDER domain, correct
101 * snooping behaviour occurs naturally as the result of our domain
104 if (!(flags & I915_CLFLUSH_FORCE) &&
105 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
108 trace_i915_gem_object_clflush(obj);
111 if (!(flags & I915_CLFLUSH_SYNC) &&
112 dma_resv_reserve_fences(obj->base.resv, 1) == 0)
113 clflush = clflush_work_create(obj);
115 i915_sw_fence_await_reservation(&clflush->base.chain,
116 obj->base.resv, true,
117 i915_fence_timeout(i915),
119 dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
120 DMA_RESV_USAGE_KERNEL);
121 dma_fence_work_commit(&clflush->base);
123 * We must have successfully populated the pages(since we are
124 * holding a pin on the pages as per the flush worker) to reach
125 * this point, which must mean we have already done the required
126 * flush-on-acquire, hence resetting cache_dirty here should be
129 obj->cache_dirty = false;
130 } else if (obj->mm.pages) {
132 obj->cache_dirty = false;
134 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);