]> Git Repo - linux.git/blob - drivers/gpu/drm/msm/msm_gem_shrinker.c
Merge tag 'powerpc-5.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <[email protected]>
5  */
6
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9 #include "msm_gpu_trace.h"
10
11 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
12 {
13         /* NOTE: we are *closer* to being able to get rid of
14          * mutex_trylock_recursive().. the msm_gem code itself does
15          * not need struct_mutex, although codepaths that can trigger
16          * shrinker are still called in code-paths that hold the
17          * struct_mutex.
18          *
19          * Also, msm_obj->madv is protected by struct_mutex.
20          *
21          * The next step is probably split out a seperate lock for
22          * protecting inactive_list, so that shrinker does not need
23          * struct_mutex.
24          */
25         switch (mutex_trylock_recursive(&dev->struct_mutex)) {
26         case MUTEX_TRYLOCK_FAILED:
27                 return false;
28
29         case MUTEX_TRYLOCK_SUCCESS:
30                 *unlock = true;
31                 return true;
32
33         case MUTEX_TRYLOCK_RECURSIVE:
34                 *unlock = false;
35                 return true;
36         }
37
38         BUG();
39 }
40
41 static unsigned long
42 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
43 {
44         struct msm_drm_private *priv =
45                 container_of(shrinker, struct msm_drm_private, shrinker);
46         struct drm_device *dev = priv->dev;
47         struct msm_gem_object *msm_obj;
48         unsigned long count = 0;
49         bool unlock;
50
51         if (!msm_gem_shrinker_lock(dev, &unlock))
52                 return 0;
53
54         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
55                 if (is_purgeable(msm_obj))
56                         count += msm_obj->base.size >> PAGE_SHIFT;
57         }
58
59         if (unlock)
60                 mutex_unlock(&dev->struct_mutex);
61
62         return count;
63 }
64
65 static unsigned long
66 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
67 {
68         struct msm_drm_private *priv =
69                 container_of(shrinker, struct msm_drm_private, shrinker);
70         struct drm_device *dev = priv->dev;
71         struct msm_gem_object *msm_obj;
72         unsigned long freed = 0;
73         bool unlock;
74
75         if (!msm_gem_shrinker_lock(dev, &unlock))
76                 return SHRINK_STOP;
77
78         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
79                 if (freed >= sc->nr_to_scan)
80                         break;
81                 if (is_purgeable(msm_obj)) {
82                         msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
83                         freed += msm_obj->base.size >> PAGE_SHIFT;
84                 }
85         }
86
87         if (unlock)
88                 mutex_unlock(&dev->struct_mutex);
89
90         if (freed > 0)
91                 trace_msm_gem_purge(freed << PAGE_SHIFT);
92
93         return freed;
94 }
95
96 static int
97 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
98 {
99         struct msm_drm_private *priv =
100                 container_of(nb, struct msm_drm_private, vmap_notifier);
101         struct drm_device *dev = priv->dev;
102         struct msm_gem_object *msm_obj;
103         unsigned unmapped = 0;
104         bool unlock;
105
106         if (!msm_gem_shrinker_lock(dev, &unlock))
107                 return NOTIFY_DONE;
108
109         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
110                 if (is_vunmapable(msm_obj)) {
111                         msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
112                         /* since we don't know any better, lets bail after a few
113                          * and if necessary the shrinker will be invoked again.
114                          * Seems better than unmapping *everything*
115                          */
116                         if (++unmapped >= 15)
117                                 break;
118                 }
119         }
120
121         if (unlock)
122                 mutex_unlock(&dev->struct_mutex);
123
124         *(unsigned long *)ptr += unmapped;
125
126         if (unmapped > 0)
127                 trace_msm_gem_purge_vmaps(unmapped);
128
129         return NOTIFY_DONE;
130 }
131
132 /**
133  * msm_gem_shrinker_init - Initialize msm shrinker
134  * @dev_priv: msm device
135  *
136  * This function registers and sets up the msm shrinker.
137  */
138 void msm_gem_shrinker_init(struct drm_device *dev)
139 {
140         struct msm_drm_private *priv = dev->dev_private;
141         priv->shrinker.count_objects = msm_gem_shrinker_count;
142         priv->shrinker.scan_objects = msm_gem_shrinker_scan;
143         priv->shrinker.seeks = DEFAULT_SEEKS;
144         WARN_ON(register_shrinker(&priv->shrinker));
145
146         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
147         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
148 }
149
150 /**
151  * msm_gem_shrinker_cleanup - Clean up msm shrinker
152  * @dev_priv: msm device
153  *
154  * This function unregisters the msm shrinker.
155  */
156 void msm_gem_shrinker_cleanup(struct drm_device *dev)
157 {
158         struct msm_drm_private *priv = dev->dev_private;
159
160         if (priv->shrinker.nr_deferred) {
161                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
162                 unregister_shrinker(&priv->shrinker);
163         }
164 }
This page took 0.040412 seconds and 4 git commands to generate.