]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
67afa38e TC |
2 | /* |
3 | * Manage cache of swap slots to be used for and returned from | |
4 | * swap. | |
5 | * | |
6 | * Copyright(c) 2016 Intel Corporation. | |
7 | * | |
8 | * Author: Tim Chen <[email protected]> | |
9 | * | |
10 | * We allocate the swap slots from the global pool and put | |
11 | * it into local per cpu caches. This has the advantage | |
12 | * of no needing to acquire the swap_info lock every time | |
13 | * we need a new slot. | |
14 | * | |
15 | * There is also opportunity to simply return the slot | |
16 | * to local caches without needing to acquire swap_info | |
17 | * lock. We do not reuse the returned slots directly but | |
18 | * move them back to the global pool in a batch. This | |
19 | * allows the slots to coaellesce and reduce fragmentation. | |
20 | * | |
21 | * The swap entry allocated is marked with SWAP_HAS_CACHE | |
22 | * flag in map_count that prevents it from being allocated | |
23 | * again from the global pool. | |
24 | * | |
25 | * The swap slots cache is protected by a mutex instead of | |
26 | * a spin lock as when we search for slots with scan_swap_map, | |
27 | * we can possibly sleep. | |
28 | */ | |
29 | ||
30 | #include <linux/swap_slots.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/cpumask.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/mutex.h> | |
54f180d3 | 35 | #include <linux/mm.h> |
67afa38e | 36 | |
67afa38e TC |
37 | static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); |
38 | static bool swap_slot_cache_active; | |
ba81f838 | 39 | bool swap_slot_cache_enabled; |
67afa38e | 40 | static bool swap_slot_cache_initialized; |
31f21da1 | 41 | static DEFINE_MUTEX(swap_slots_cache_mutex); |
67afa38e | 42 | /* Serialize swap slots cache enable/disable operations */ |
31f21da1 | 43 | static DEFINE_MUTEX(swap_slots_cache_enable_mutex); |
67afa38e TC |
44 | |
45 | static void __drain_swap_slots_cache(unsigned int type); | |
46 | static void deactivate_swap_slots_cache(void); | |
47 | static void reactivate_swap_slots_cache(void); | |
48 | ||
e0f3ebba | 49 | #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) |
67afa38e TC |
50 | #define SLOTS_CACHE 0x1 |
51 | #define SLOTS_CACHE_RET 0x2 | |
52 | ||
53 | static void deactivate_swap_slots_cache(void) | |
54 | { | |
55 | mutex_lock(&swap_slots_cache_mutex); | |
56 | swap_slot_cache_active = false; | |
57 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); | |
58 | mutex_unlock(&swap_slots_cache_mutex); | |
59 | } | |
60 | ||
61 | static void reactivate_swap_slots_cache(void) | |
62 | { | |
63 | mutex_lock(&swap_slots_cache_mutex); | |
64 | swap_slot_cache_active = true; | |
65 | mutex_unlock(&swap_slots_cache_mutex); | |
66 | } | |
67 | ||
68 | /* Must not be called with cpu hot plug lock */ | |
69 | void disable_swap_slots_cache_lock(void) | |
70 | { | |
71 | mutex_lock(&swap_slots_cache_enable_mutex); | |
72 | swap_slot_cache_enabled = false; | |
73 | if (swap_slot_cache_initialized) { | |
74 | /* serialize with cpu hotplug operations */ | |
75 | get_online_cpus(); | |
76 | __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); | |
77 | put_online_cpus(); | |
78 | } | |
79 | } | |
80 | ||
81 | static void __reenable_swap_slots_cache(void) | |
82 | { | |
83 | swap_slot_cache_enabled = has_usable_swap(); | |
84 | } | |
85 | ||
86 | void reenable_swap_slots_cache_unlock(void) | |
87 | { | |
88 | __reenable_swap_slots_cache(); | |
89 | mutex_unlock(&swap_slots_cache_enable_mutex); | |
90 | } | |
91 | ||
92 | static bool check_cache_active(void) | |
93 | { | |
94 | long pages; | |
95 | ||
e0f3ebba | 96 | if (!swap_slot_cache_enabled) |
67afa38e TC |
97 | return false; |
98 | ||
99 | pages = get_nr_swap_pages(); | |
100 | if (!swap_slot_cache_active) { | |
101 | if (pages > num_online_cpus() * | |
102 | THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) | |
103 | reactivate_swap_slots_cache(); | |
104 | goto out; | |
105 | } | |
106 | ||
107 | /* if global pool of slot caches too low, deactivate cache */ | |
108 | if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) | |
109 | deactivate_swap_slots_cache(); | |
110 | out: | |
111 | return swap_slot_cache_active; | |
112 | } | |
113 | ||
114 | static int alloc_swap_slot_cache(unsigned int cpu) | |
115 | { | |
116 | struct swap_slots_cache *cache; | |
117 | swp_entry_t *slots, *slots_ret; | |
118 | ||
119 | /* | |
120 | * Do allocation outside swap_slots_cache_mutex | |
54f180d3 | 121 | * as kvzalloc could trigger reclaim and get_swap_page, |
67afa38e TC |
122 | * which can lock swap_slots_cache_mutex. |
123 | */ | |
778e1cdd | 124 | slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), |
54f180d3 | 125 | GFP_KERNEL); |
67afa38e TC |
126 | if (!slots) |
127 | return -ENOMEM; | |
128 | ||
778e1cdd | 129 | slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), |
54f180d3 | 130 | GFP_KERNEL); |
67afa38e | 131 | if (!slots_ret) { |
54f180d3 | 132 | kvfree(slots); |
67afa38e TC |
133 | return -ENOMEM; |
134 | } | |
135 | ||
136 | mutex_lock(&swap_slots_cache_mutex); | |
137 | cache = &per_cpu(swp_slots, cpu); | |
f90eae2a | 138 | if (cache->slots || cache->slots_ret) { |
67afa38e | 139 | /* cache already allocated */ |
f90eae2a ZL |
140 | mutex_unlock(&swap_slots_cache_mutex); |
141 | ||
142 | kvfree(slots); | |
143 | kvfree(slots_ret); | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
67afa38e TC |
148 | if (!cache->lock_initialized) { |
149 | mutex_init(&cache->alloc_lock); | |
150 | spin_lock_init(&cache->free_lock); | |
151 | cache->lock_initialized = true; | |
152 | } | |
153 | cache->nr = 0; | |
154 | cache->cur = 0; | |
155 | cache->n_ret = 0; | |
a2e16731 TC |
156 | /* |
157 | * We initialized alloc_lock and free_lock earlier. We use | |
158 | * !cache->slots or !cache->slots_ret to know if it is safe to acquire | |
159 | * the corresponding lock and use the cache. Memory barrier below | |
160 | * ensures the assumption. | |
161 | */ | |
162 | mb(); | |
67afa38e | 163 | cache->slots = slots; |
67afa38e | 164 | cache->slots_ret = slots_ret; |
67afa38e | 165 | mutex_unlock(&swap_slots_cache_mutex); |
67afa38e TC |
166 | return 0; |
167 | } | |
168 | ||
169 | static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, | |
170 | bool free_slots) | |
171 | { | |
172 | struct swap_slots_cache *cache; | |
173 | swp_entry_t *slots = NULL; | |
174 | ||
175 | cache = &per_cpu(swp_slots, cpu); | |
176 | if ((type & SLOTS_CACHE) && cache->slots) { | |
177 | mutex_lock(&cache->alloc_lock); | |
178 | swapcache_free_entries(cache->slots + cache->cur, cache->nr); | |
179 | cache->cur = 0; | |
180 | cache->nr = 0; | |
181 | if (free_slots && cache->slots) { | |
54f180d3 | 182 | kvfree(cache->slots); |
67afa38e TC |
183 | cache->slots = NULL; |
184 | } | |
185 | mutex_unlock(&cache->alloc_lock); | |
186 | } | |
187 | if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { | |
188 | spin_lock_irq(&cache->free_lock); | |
189 | swapcache_free_entries(cache->slots_ret, cache->n_ret); | |
190 | cache->n_ret = 0; | |
191 | if (free_slots && cache->slots_ret) { | |
192 | slots = cache->slots_ret; | |
193 | cache->slots_ret = NULL; | |
194 | } | |
195 | spin_unlock_irq(&cache->free_lock); | |
196 | if (slots) | |
54f180d3 | 197 | kvfree(slots); |
67afa38e TC |
198 | } |
199 | } | |
200 | ||
201 | static void __drain_swap_slots_cache(unsigned int type) | |
202 | { | |
203 | unsigned int cpu; | |
204 | ||
205 | /* | |
206 | * This function is called during | |
207 | * 1) swapoff, when we have to make sure no | |
208 | * left over slots are in cache when we remove | |
209 | * a swap device; | |
210 | * 2) disabling of swap slot cache, when we run low | |
211 | * on swap slots when allocating memory and need | |
212 | * to return swap slots to global pool. | |
213 | * | |
214 | * We cannot acquire cpu hot plug lock here as | |
215 | * this function can be invoked in the cpu | |
216 | * hot plug path: | |
217 | * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback | |
218 | * -> memory allocation -> direct reclaim -> get_swap_page | |
219 | * -> drain_swap_slots_cache | |
220 | * | |
221 | * Hence the loop over current online cpu below could miss cpu that | |
222 | * is being brought online but not yet marked as online. | |
223 | * That is okay as we do not schedule and run anything on a | |
224 | * cpu before it has been marked online. Hence, we will not | |
225 | * fill any swap slots in slots cache of such cpu. | |
226 | * There are no slots on such cpu that need to be drained. | |
227 | */ | |
228 | for_each_online_cpu(cpu) | |
229 | drain_slots_cache_cpu(cpu, type, false); | |
230 | } | |
231 | ||
232 | static int free_slot_cache(unsigned int cpu) | |
233 | { | |
234 | mutex_lock(&swap_slots_cache_mutex); | |
235 | drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); | |
236 | mutex_unlock(&swap_slots_cache_mutex); | |
237 | return 0; | |
238 | } | |
239 | ||
f3bc52cb | 240 | void enable_swap_slots_cache(void) |
67afa38e | 241 | { |
67afa38e | 242 | mutex_lock(&swap_slots_cache_enable_mutex); |
d69a9575 ZL |
243 | if (!swap_slot_cache_initialized) { |
244 | int ret; | |
67afa38e | 245 | |
d69a9575 ZL |
246 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", |
247 | alloc_swap_slot_cache, free_slot_cache); | |
248 | if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " | |
249 | "without swap slots cache.\n", __func__)) | |
250 | goto out_unlock; | |
251 | ||
252 | swap_slot_cache_initialized = true; | |
253 | } | |
9b7a8143 | 254 | |
67afa38e TC |
255 | __reenable_swap_slots_cache(); |
256 | out_unlock: | |
257 | mutex_unlock(&swap_slots_cache_enable_mutex); | |
67afa38e TC |
258 | } |
259 | ||
260 | /* called with swap slot cache's alloc lock held */ | |
261 | static int refill_swap_slots_cache(struct swap_slots_cache *cache) | |
262 | { | |
263 | if (!use_swap_slot_cache || cache->nr) | |
264 | return 0; | |
265 | ||
266 | cache->cur = 0; | |
267 | if (swap_slot_cache_active) | |
5d5e8f19 YH |
268 | cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, |
269 | cache->slots, 1); | |
67afa38e TC |
270 | |
271 | return cache->nr; | |
272 | } | |
273 | ||
274 | int free_swap_slot(swp_entry_t entry) | |
275 | { | |
276 | struct swap_slots_cache *cache; | |
277 | ||
f07e0f84 | 278 | cache = raw_cpu_ptr(&swp_slots); |
a2e16731 | 279 | if (likely(use_swap_slot_cache && cache->slots_ret)) { |
67afa38e TC |
280 | spin_lock_irq(&cache->free_lock); |
281 | /* Swap slots cache may be deactivated before acquiring lock */ | |
f07e0f84 | 282 | if (!use_swap_slot_cache || !cache->slots_ret) { |
67afa38e TC |
283 | spin_unlock_irq(&cache->free_lock); |
284 | goto direct_free; | |
285 | } | |
286 | if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { | |
287 | /* | |
288 | * Return slots to global pool. | |
289 | * The current swap_map value is SWAP_HAS_CACHE. | |
290 | * Set it to 0 to indicate it is available for | |
291 | * allocation in global pool | |
292 | */ | |
293 | swapcache_free_entries(cache->slots_ret, cache->n_ret); | |
294 | cache->n_ret = 0; | |
295 | } | |
296 | cache->slots_ret[cache->n_ret++] = entry; | |
297 | spin_unlock_irq(&cache->free_lock); | |
298 | } else { | |
299 | direct_free: | |
300 | swapcache_free_entries(&entry, 1); | |
301 | } | |
67afa38e TC |
302 | |
303 | return 0; | |
304 | } | |
305 | ||
38d8b4e6 | 306 | swp_entry_t get_swap_page(struct page *page) |
67afa38e | 307 | { |
2406b76f | 308 | swp_entry_t entry; |
67afa38e TC |
309 | struct swap_slots_cache *cache; |
310 | ||
38d8b4e6 YH |
311 | entry.val = 0; |
312 | ||
313 | if (PageTransHuge(page)) { | |
314 | if (IS_ENABLED(CONFIG_THP_SWAP)) | |
5d5e8f19 | 315 | get_swap_pages(1, &entry, HPAGE_PMD_NR); |
bb98f2c5 | 316 | goto out; |
38d8b4e6 YH |
317 | } |
318 | ||
67afa38e TC |
319 | /* |
320 | * Preemption is allowed here, because we may sleep | |
321 | * in refill_swap_slots_cache(). But it is safe, because | |
322 | * accesses to the per-CPU data structure are protected by the | |
323 | * mutex cache->alloc_lock. | |
324 | * | |
325 | * The alloc path here does not touch cache->slots_ret | |
326 | * so cache->free_lock is not taken. | |
327 | */ | |
328 | cache = raw_cpu_ptr(&swp_slots); | |
329 | ||
a2e16731 | 330 | if (likely(check_cache_active() && cache->slots)) { |
67afa38e TC |
331 | mutex_lock(&cache->alloc_lock); |
332 | if (cache->slots) { | |
333 | repeat: | |
334 | if (cache->nr) { | |
2406b76f WY |
335 | entry = cache->slots[cache->cur]; |
336 | cache->slots[cache->cur++].val = 0; | |
67afa38e | 337 | cache->nr--; |
2406b76f WY |
338 | } else if (refill_swap_slots_cache(cache)) { |
339 | goto repeat; | |
67afa38e TC |
340 | } |
341 | } | |
342 | mutex_unlock(&cache->alloc_lock); | |
343 | if (entry.val) | |
bb98f2c5 | 344 | goto out; |
67afa38e TC |
345 | } |
346 | ||
5d5e8f19 | 347 | get_swap_pages(1, &entry, 1); |
bb98f2c5 TH |
348 | out: |
349 | if (mem_cgroup_try_charge_swap(page, entry)) { | |
350 | put_swap_page(page, entry); | |
351 | entry.val = 0; | |
352 | } | |
67afa38e TC |
353 | return entry; |
354 | } |