1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
6 #include "gem/i915_gem_object.h"
9 #include "intel_engine_pm.h"
10 #include "intel_gt_buffer_pool.h"
12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
14 return container_of(pool, struct intel_gt, buffer_pool);
17 static struct list_head *
18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
23 * Compute a power-of-two bucket, but throw everything greater than
24 * 16KiB into the same bucket: i.e. the buckets hold objects of
25 * (1 page, 2 pages, 4 pages, 8+ pages).
27 n = fls(sz >> PAGE_SHIFT) - 1;
28 if (n >= ARRAY_SIZE(pool->cache_list))
29 n = ARRAY_SIZE(pool->cache_list) - 1;
31 return &pool->cache_list[n];
34 static void node_free(struct intel_gt_buffer_pool_node *node)
36 i915_gem_object_put(node->obj);
37 i915_active_fini(&node->active);
41 static void pool_free_work(struct work_struct *wrk)
43 struct intel_gt_buffer_pool *pool =
44 container_of(wrk, typeof(*pool), work.work);
45 struct intel_gt_buffer_pool_node *node, *next;
46 unsigned long old = jiffies - HZ;
51 /* Free buffers that have not been used in the past second */
52 spin_lock_irq(&pool->lock);
53 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
54 struct list_head *list = &pool->cache_list[n];
56 /* Most recent at head; oldest at tail */
57 list_for_each_entry_safe_reverse(node, next, list, link) {
58 if (time_before(node->age, old))
61 list_move(&node->link, &stale);
63 active |= !list_empty(list);
65 spin_unlock_irq(&pool->lock);
67 list_for_each_entry_safe(node, next, &stale, link)
71 schedule_delayed_work(&pool->work,
72 round_jiffies_up_relative(HZ));
75 static int pool_active(struct i915_active *ref)
77 struct intel_gt_buffer_pool_node *node =
78 container_of(ref, typeof(*node), active);
79 struct dma_resv *resv = node->obj->base.resv;
82 if (dma_resv_trylock(resv)) {
83 dma_resv_add_excl_fence(resv, NULL);
84 dma_resv_unlock(resv);
87 err = i915_gem_object_pin_pages(node->obj);
91 /* Hide this pinned object from the shrinker until retired */
92 i915_gem_object_make_unshrinkable(node->obj);
98 static void pool_retire(struct i915_active *ref)
100 struct intel_gt_buffer_pool_node *node =
101 container_of(ref, typeof(*node), active);
102 struct intel_gt_buffer_pool *pool = node->pool;
103 struct list_head *list = bucket_for_size(pool, node->obj->base.size);
106 i915_gem_object_unpin_pages(node->obj);
108 /* Return this object to the shrinker pool */
109 i915_gem_object_make_purgeable(node->obj);
111 spin_lock_irqsave(&pool->lock, flags);
113 list_add(&node->link, list);
114 spin_unlock_irqrestore(&pool->lock, flags);
116 schedule_delayed_work(&pool->work,
117 round_jiffies_up_relative(HZ));
120 static struct intel_gt_buffer_pool_node *
121 node_create(struct intel_gt_buffer_pool *pool, size_t sz)
123 struct intel_gt *gt = to_gt(pool);
124 struct intel_gt_buffer_pool_node *node;
125 struct drm_i915_gem_object *obj;
127 node = kmalloc(sizeof(*node),
128 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
130 return ERR_PTR(-ENOMEM);
133 i915_active_init(&node->active, pool_active, pool_retire);
135 obj = i915_gem_object_create_internal(gt->i915, sz);
137 i915_active_fini(&node->active);
139 return ERR_CAST(obj);
142 i915_gem_object_set_readonly(obj);
148 struct intel_gt_buffer_pool_node *
149 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
151 struct intel_gt_buffer_pool *pool = >->buffer_pool;
152 struct intel_gt_buffer_pool_node *node;
153 struct list_head *list;
157 size = PAGE_ALIGN(size);
158 list = bucket_for_size(pool, size);
160 spin_lock_irqsave(&pool->lock, flags);
161 list_for_each_entry(node, list, link) {
162 if (node->obj->base.size < size)
164 list_del(&node->link);
167 spin_unlock_irqrestore(&pool->lock, flags);
169 if (&node->link == list) {
170 node = node_create(pool, size);
175 ret = i915_active_acquire(&node->active);
184 void intel_gt_init_buffer_pool(struct intel_gt *gt)
186 struct intel_gt_buffer_pool *pool = >->buffer_pool;
189 spin_lock_init(&pool->lock);
190 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
191 INIT_LIST_HEAD(&pool->cache_list[n]);
192 INIT_DELAYED_WORK(&pool->work, pool_free_work);
195 static void pool_free_imm(struct intel_gt_buffer_pool *pool)
199 spin_lock_irq(&pool->lock);
200 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
201 struct intel_gt_buffer_pool_node *node, *next;
202 struct list_head *list = &pool->cache_list[n];
204 list_for_each_entry_safe(node, next, list, link)
206 INIT_LIST_HEAD(list);
208 spin_unlock_irq(&pool->lock);
211 void intel_gt_flush_buffer_pool(struct intel_gt *gt)
213 struct intel_gt_buffer_pool *pool = >->buffer_pool;
217 } while (cancel_delayed_work_sync(&pool->work));
220 void intel_gt_fini_buffer_pool(struct intel_gt *gt)
222 struct intel_gt_buffer_pool *pool = >->buffer_pool;
225 intel_gt_flush_buffer_pool(gt);
227 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
228 GEM_BUG_ON(!list_empty(&pool->cache_list[n]));