]> Git Repo - linux.git/blob - drivers/gpu/drm/msm/msm_gem_shrinker.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <[email protected]>
5  */
6
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_gpu.h"
10 #include "msm_gpu_trace.h"
11
12 /* Default disabled for now until it has some more testing on the different
13  * iommu combinations that can be paired with the driver:
14  */
15 bool enable_eviction = false;
16 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
17 module_param(enable_eviction, bool, 0600);
18
19 static bool can_swap(void)
20 {
21         return enable_eviction && get_nr_swap_pages() > 0;
22 }
23
24 static unsigned long
25 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
26 {
27         struct msm_drm_private *priv =
28                 container_of(shrinker, struct msm_drm_private, shrinker);
29         unsigned count = priv->shrinkable_count;
30
31         if (can_swap())
32                 count += priv->evictable_count;
33
34         return count;
35 }
36
37 static bool
38 purge(struct msm_gem_object *msm_obj)
39 {
40         if (!is_purgeable(msm_obj))
41                 return false;
42
43         /*
44          * This will move the obj out of still_in_list to
45          * the purged list
46          */
47         msm_gem_purge(&msm_obj->base);
48
49         return true;
50 }
51
52 static bool
53 evict(struct msm_gem_object *msm_obj)
54 {
55         if (is_unevictable(msm_obj))
56                 return false;
57
58         msm_gem_evict(&msm_obj->base);
59
60         return true;
61 }
62
63 static unsigned long
64 scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
65                 bool (*shrink)(struct msm_gem_object *msm_obj))
66 {
67         unsigned freed = 0;
68         struct list_head still_in_list;
69
70         INIT_LIST_HEAD(&still_in_list);
71
72         mutex_lock(&priv->mm_lock);
73
74         while (freed < nr_to_scan) {
75                 struct msm_gem_object *msm_obj = list_first_entry_or_null(
76                                 list, typeof(*msm_obj), mm_list);
77
78                 if (!msm_obj)
79                         break;
80
81                 list_move_tail(&msm_obj->mm_list, &still_in_list);
82
83                 /*
84                  * If it is in the process of being freed, msm_gem_free_object
85                  * can be blocked on mm_lock waiting to remove it.  So just
86                  * skip it.
87                  */
88                 if (!kref_get_unless_zero(&msm_obj->base.refcount))
89                         continue;
90
91                 /*
92                  * Now that we own a reference, we can drop mm_lock for the
93                  * rest of the loop body, to reduce contention with the
94                  * retire_submit path (which could make more objects purgeable)
95                  */
96
97                 mutex_unlock(&priv->mm_lock);
98
99                 /*
100                  * Note that this still needs to be trylock, since we can
101                  * hit shrinker in response to trying to get backing pages
102                  * for this obj (ie. while it's lock is already held)
103                  */
104                 if (!msm_gem_trylock(&msm_obj->base))
105                         goto tail;
106
107                 if (shrink(msm_obj))
108                         freed += msm_obj->base.size >> PAGE_SHIFT;
109
110                 msm_gem_unlock(&msm_obj->base);
111
112 tail:
113                 drm_gem_object_put(&msm_obj->base);
114                 mutex_lock(&priv->mm_lock);
115         }
116
117         list_splice_tail(&still_in_list, list);
118         mutex_unlock(&priv->mm_lock);
119
120         return freed;
121 }
122
123 static unsigned long
124 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
125 {
126         struct msm_drm_private *priv =
127                 container_of(shrinker, struct msm_drm_private, shrinker);
128         unsigned long freed;
129
130         freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
131
132         if (freed > 0)
133                 trace_msm_gem_purge(freed << PAGE_SHIFT);
134
135         if (can_swap() && freed < sc->nr_to_scan) {
136                 int evicted = scan(priv, sc->nr_to_scan - freed,
137                                 &priv->inactive_willneed, evict);
138
139                 if (evicted > 0)
140                         trace_msm_gem_evict(evicted << PAGE_SHIFT);
141
142                 freed += evicted;
143         }
144
145         return (freed > 0) ? freed : SHRINK_STOP;
146 }
147
148 #ifdef CONFIG_DEBUG_FS
149 unsigned long
150 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
151 {
152         struct msm_drm_private *priv = dev->dev_private;
153         struct shrink_control sc = {
154                 .nr_to_scan = nr_to_scan,
155         };
156         int ret;
157
158         fs_reclaim_acquire(GFP_KERNEL);
159         ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
160         fs_reclaim_release(GFP_KERNEL);
161
162         return ret;
163 }
164 #endif
165
166 /* since we don't know any better, lets bail after a few
167  * and if necessary the shrinker will be invoked again.
168  * Seems better than unmapping *everything*
169  */
170 static const int vmap_shrink_limit = 15;
171
172 static bool
173 vmap_shrink(struct msm_gem_object *msm_obj)
174 {
175         if (!is_vunmapable(msm_obj))
176                 return false;
177
178         msm_gem_vunmap(&msm_obj->base);
179
180         return true;
181 }
182
183 static int
184 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
185 {
186         struct msm_drm_private *priv =
187                 container_of(nb, struct msm_drm_private, vmap_notifier);
188         struct list_head *mm_lists[] = {
189                 &priv->inactive_dontneed,
190                 &priv->inactive_willneed,
191                 priv->gpu ? &priv->gpu->active_list : NULL,
192                 NULL,
193         };
194         unsigned idx, unmapped = 0;
195
196         for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
197                 unmapped += scan(priv, vmap_shrink_limit - unmapped,
198                                 mm_lists[idx], vmap_shrink);
199         }
200
201         *(unsigned long *)ptr += unmapped;
202
203         if (unmapped > 0)
204                 trace_msm_gem_purge_vmaps(unmapped);
205
206         return NOTIFY_DONE;
207 }
208
209 /**
210  * msm_gem_shrinker_init - Initialize msm shrinker
211  * @dev: drm device
212  *
213  * This function registers and sets up the msm shrinker.
214  */
215 void msm_gem_shrinker_init(struct drm_device *dev)
216 {
217         struct msm_drm_private *priv = dev->dev_private;
218         priv->shrinker.count_objects = msm_gem_shrinker_count;
219         priv->shrinker.scan_objects = msm_gem_shrinker_scan;
220         priv->shrinker.seeks = DEFAULT_SEEKS;
221         WARN_ON(register_shrinker(&priv->shrinker));
222
223         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
224         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
225 }
226
227 /**
228  * msm_gem_shrinker_cleanup - Clean up msm shrinker
229  * @dev: drm device
230  *
231  * This function unregisters the msm shrinker.
232  */
233 void msm_gem_shrinker_cleanup(struct drm_device *dev)
234 {
235         struct msm_drm_private *priv = dev->dev_private;
236
237         if (priv->shrinker.nr_deferred) {
238                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
239                 unregister_shrinker(&priv->shrinker);
240         }
241 }
This page took 0.047994 seconds and 4 git commands to generate.