1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
13 #include "msm_gpu_trace.h"
15 /* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
18 static bool enable_eviction = false;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
22 static bool can_swap(void)
24 return enable_eviction && get_nr_swap_pages() > 0;
27 static bool can_block(struct shrink_control *sc)
29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
37 struct msm_drm_private *priv =
38 container_of(shrinker, struct msm_drm_private, shrinker);
39 unsigned count = priv->lru.dontneed.count;
42 count += priv->lru.willneed.count;
48 purge(struct drm_gem_object *obj)
50 if (!is_purgeable(to_msm_bo(obj)))
53 if (msm_gem_active(obj))
62 evict(struct drm_gem_object *obj)
64 if (is_unevictable(to_msm_bo(obj)))
67 if (msm_gem_active(obj))
76 wait_for_idle(struct drm_gem_object *obj)
78 enum dma_resv_usage usage = dma_resv_usage_rw(true);
79 return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
83 active_purge(struct drm_gem_object *obj)
85 if (!wait_for_idle(obj))
92 active_evict(struct drm_gem_object *obj)
94 if (!wait_for_idle(obj))
101 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
103 struct msm_drm_private *priv =
104 container_of(shrinker, struct msm_drm_private, shrinker);
106 struct drm_gem_lru *lru;
107 bool (*shrink)(struct drm_gem_object *obj);
111 /* Stages of progressively more aggressive/expensive reclaim: */
112 { &priv->lru.dontneed, purge, true },
113 { &priv->lru.willneed, evict, can_swap() },
114 { &priv->lru.dontneed, active_purge, can_block(sc) },
115 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
117 long nr = sc->nr_to_scan;
118 unsigned long freed = 0;
120 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
124 drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
125 nr -= stages[i].freed;
126 freed += stages[i].freed;
130 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
131 stages[1].freed, stages[2].freed,
135 return (freed > 0) ? freed : SHRINK_STOP;
138 #ifdef CONFIG_DEBUG_FS
140 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
142 struct msm_drm_private *priv = dev->dev_private;
143 struct shrink_control sc = {
144 .nr_to_scan = nr_to_scan,
148 fs_reclaim_acquire(GFP_KERNEL);
149 ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
150 fs_reclaim_release(GFP_KERNEL);
156 /* since we don't know any better, lets bail after a few
157 * and if necessary the shrinker will be invoked again.
158 * Seems better than unmapping *everything*
160 static const int vmap_shrink_limit = 15;
163 vmap_shrink(struct drm_gem_object *obj)
165 if (!is_vunmapable(to_msm_bo(obj)))
174 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
176 struct msm_drm_private *priv =
177 container_of(nb, struct msm_drm_private, vmap_notifier);
178 struct drm_gem_lru *lrus[] = {
184 unsigned idx, unmapped = 0;
186 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
187 unmapped += drm_gem_lru_scan(lrus[idx],
188 vmap_shrink_limit - unmapped,
192 *(unsigned long *)ptr += unmapped;
195 trace_msm_gem_purge_vmaps(unmapped);
201 * msm_gem_shrinker_init - Initialize msm shrinker
204 * This function registers and sets up the msm shrinker.
206 void msm_gem_shrinker_init(struct drm_device *dev)
208 struct msm_drm_private *priv = dev->dev_private;
209 priv->shrinker.count_objects = msm_gem_shrinker_count;
210 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
211 priv->shrinker.seeks = DEFAULT_SEEKS;
212 WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
214 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
215 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
219 * msm_gem_shrinker_cleanup - Clean up msm shrinker
222 * This function unregisters the msm shrinker.
224 void msm_gem_shrinker_cleanup(struct drm_device *dev)
226 struct msm_drm_private *priv = dev->dev_private;
228 if (priv->shrinker.nr_deferred) {
229 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
230 unregister_shrinker(&priv->shrinker);