]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a38e4082 DC |
2 | /* |
3 | * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. | |
4 | * Authors: David Chinner and Glauber Costa | |
5 | * | |
6 | * Generic LRU infrastructure | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
3b1d58a4 | 10 | #include <linux/mm.h> |
a38e4082 | 11 | #include <linux/list_lru.h> |
5ca302c8 | 12 | #include <linux/slab.h> |
c0a5b560 | 13 | #include <linux/mutex.h> |
60d3fd32 | 14 | #include <linux/memcontrol.h> |
4d96ba35 | 15 | #include "slab.h" |
88f2ef73 | 16 | #include "internal.h" |
c0a5b560 | 17 | |
84c07d11 | 18 | #ifdef CONFIG_MEMCG_KMEM |
3eef1127 | 19 | static LIST_HEAD(memcg_list_lrus); |
c0a5b560 VD |
20 | static DEFINE_MUTEX(list_lrus_mutex); |
21 | ||
3eef1127 MS |
22 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
23 | { | |
24 | return lru->memcg_aware; | |
25 | } | |
26 | ||
c0a5b560 VD |
27 | static void list_lru_register(struct list_lru *lru) |
28 | { | |
3eef1127 MS |
29 | if (!list_lru_memcg_aware(lru)) |
30 | return; | |
31 | ||
c0a5b560 | 32 | mutex_lock(&list_lrus_mutex); |
3eef1127 | 33 | list_add(&lru->list, &memcg_list_lrus); |
c0a5b560 VD |
34 | mutex_unlock(&list_lrus_mutex); |
35 | } | |
36 | ||
37 | static void list_lru_unregister(struct list_lru *lru) | |
38 | { | |
3eef1127 MS |
39 | if (!list_lru_memcg_aware(lru)) |
40 | return; | |
41 | ||
c0a5b560 VD |
42 | mutex_lock(&list_lrus_mutex); |
43 | list_del(&lru->list); | |
44 | mutex_unlock(&list_lrus_mutex); | |
45 | } | |
c0a5b560 | 46 | |
fae91d6d KT |
47 | static int lru_shrinker_id(struct list_lru *lru) |
48 | { | |
49 | return lru->shrinker_id; | |
50 | } | |
51 | ||
60d3fd32 | 52 | static inline struct list_lru_one * |
6a6b7b77 | 53 | list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) |
60d3fd32 | 54 | { |
bbca91cc | 55 | if (list_lru_memcg_aware(lru) && idx >= 0) { |
d7011070 | 56 | struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); |
5abc1e37 | 57 | |
5abc1e37 MS |
58 | return mlru ? &mlru->node[nid] : NULL; |
59 | } | |
bbca91cc | 60 | return &lru->node[nid].lru; |
60d3fd32 VD |
61 | } |
62 | ||
63 | static inline struct list_lru_one * | |
6a6b7b77 | 64 | list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, |
44bd4a47 | 65 | struct mem_cgroup **memcg_ptr) |
60d3fd32 | 66 | { |
6a6b7b77 | 67 | struct list_lru_node *nlru = &lru->node[nid]; |
44bd4a47 KT |
68 | struct list_lru_one *l = &nlru->lru; |
69 | struct mem_cgroup *memcg = NULL; | |
60d3fd32 | 70 | |
bbca91cc | 71 | if (!list_lru_memcg_aware(lru)) |
44bd4a47 | 72 | goto out; |
60d3fd32 | 73 | |
fc4db90f | 74 | memcg = mem_cgroup_from_slab_obj(ptr); |
60d3fd32 | 75 | if (!memcg) |
44bd4a47 | 76 | goto out; |
60d3fd32 | 77 | |
7c52f65d | 78 | l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); |
44bd4a47 KT |
79 | out: |
80 | if (memcg_ptr) | |
81 | *memcg_ptr = memcg; | |
82 | return l; | |
60d3fd32 VD |
83 | } |
84 | #else | |
e0295238 KT |
85 | static void list_lru_register(struct list_lru *lru) |
86 | { | |
87 | } | |
88 | ||
89 | static void list_lru_unregister(struct list_lru *lru) | |
90 | { | |
91 | } | |
92 | ||
fae91d6d KT |
93 | static int lru_shrinker_id(struct list_lru *lru) |
94 | { | |
95 | return -1; | |
96 | } | |
97 | ||
60d3fd32 VD |
98 | static inline bool list_lru_memcg_aware(struct list_lru *lru) |
99 | { | |
100 | return false; | |
101 | } | |
102 | ||
103 | static inline struct list_lru_one * | |
6a6b7b77 | 104 | list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) |
60d3fd32 | 105 | { |
6a6b7b77 | 106 | return &lru->node[nid].lru; |
60d3fd32 VD |
107 | } |
108 | ||
109 | static inline struct list_lru_one * | |
6a6b7b77 | 110 | list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, |
44bd4a47 | 111 | struct mem_cgroup **memcg_ptr) |
60d3fd32 | 112 | { |
44bd4a47 KT |
113 | if (memcg_ptr) |
114 | *memcg_ptr = NULL; | |
6a6b7b77 | 115 | return &lru->node[nid].lru; |
60d3fd32 | 116 | } |
84c07d11 | 117 | #endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32 | 118 | |
a38e4082 DC |
119 | bool list_lru_add(struct list_lru *lru, struct list_head *item) |
120 | { | |
3b1d58a4 DC |
121 | int nid = page_to_nid(virt_to_page(item)); |
122 | struct list_lru_node *nlru = &lru->node[nid]; | |
fae91d6d | 123 | struct mem_cgroup *memcg; |
60d3fd32 | 124 | struct list_lru_one *l; |
3b1d58a4 DC |
125 | |
126 | spin_lock(&nlru->lock); | |
a38e4082 | 127 | if (list_empty(item)) { |
6a6b7b77 | 128 | l = list_lru_from_kmem(lru, nid, item, &memcg); |
60d3fd32 | 129 | list_add_tail(item, &l->list); |
fae91d6d KT |
130 | /* Set shrinker bit if the first element was added */ |
131 | if (!l->nr_items++) | |
2bfd3637 YS |
132 | set_shrinker_bit(memcg, nid, |
133 | lru_shrinker_id(lru)); | |
2c80cd57 | 134 | nlru->nr_items++; |
3b1d58a4 | 135 | spin_unlock(&nlru->lock); |
a38e4082 DC |
136 | return true; |
137 | } | |
3b1d58a4 | 138 | spin_unlock(&nlru->lock); |
a38e4082 DC |
139 | return false; |
140 | } | |
141 | EXPORT_SYMBOL_GPL(list_lru_add); | |
142 | ||
143 | bool list_lru_del(struct list_lru *lru, struct list_head *item) | |
144 | { | |
3b1d58a4 DC |
145 | int nid = page_to_nid(virt_to_page(item)); |
146 | struct list_lru_node *nlru = &lru->node[nid]; | |
60d3fd32 | 147 | struct list_lru_one *l; |
3b1d58a4 DC |
148 | |
149 | spin_lock(&nlru->lock); | |
a38e4082 | 150 | if (!list_empty(item)) { |
6a6b7b77 | 151 | l = list_lru_from_kmem(lru, nid, item, NULL); |
a38e4082 | 152 | list_del_init(item); |
60d3fd32 | 153 | l->nr_items--; |
2c80cd57 | 154 | nlru->nr_items--; |
3b1d58a4 | 155 | spin_unlock(&nlru->lock); |
a38e4082 DC |
156 | return true; |
157 | } | |
3b1d58a4 | 158 | spin_unlock(&nlru->lock); |
a38e4082 DC |
159 | return false; |
160 | } | |
161 | EXPORT_SYMBOL_GPL(list_lru_del); | |
162 | ||
3f97b163 VD |
163 | void list_lru_isolate(struct list_lru_one *list, struct list_head *item) |
164 | { | |
165 | list_del_init(item); | |
166 | list->nr_items--; | |
167 | } | |
168 | EXPORT_SYMBOL_GPL(list_lru_isolate); | |
169 | ||
170 | void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, | |
171 | struct list_head *head) | |
172 | { | |
173 | list_move(item, head); | |
174 | list->nr_items--; | |
175 | } | |
176 | EXPORT_SYMBOL_GPL(list_lru_isolate_move); | |
177 | ||
930eaac5 AM |
178 | unsigned long list_lru_count_one(struct list_lru *lru, |
179 | int nid, struct mem_cgroup *memcg) | |
a38e4082 | 180 | { |
60d3fd32 | 181 | struct list_lru_one *l; |
41d17431 | 182 | long count; |
3b1d58a4 | 183 | |
0c7c1bed | 184 | rcu_read_lock(); |
7c52f65d | 185 | l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); |
5abc1e37 | 186 | count = l ? READ_ONCE(l->nr_items) : 0; |
0c7c1bed | 187 | rcu_read_unlock(); |
3b1d58a4 | 188 | |
41d17431 MS |
189 | if (unlikely(count < 0)) |
190 | count = 0; | |
191 | ||
3b1d58a4 DC |
192 | return count; |
193 | } | |
60d3fd32 VD |
194 | EXPORT_SYMBOL_GPL(list_lru_count_one); |
195 | ||
196 | unsigned long list_lru_count_node(struct list_lru *lru, int nid) | |
197 | { | |
2c80cd57 | 198 | struct list_lru_node *nlru; |
60d3fd32 | 199 | |
2c80cd57 ST |
200 | nlru = &lru->node[nid]; |
201 | return nlru->nr_items; | |
60d3fd32 | 202 | } |
6a4f496f | 203 | EXPORT_SYMBOL_GPL(list_lru_count_node); |
3b1d58a4 | 204 | |
60d3fd32 | 205 | static unsigned long |
6a6b7b77 | 206 | __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, |
60d3fd32 VD |
207 | list_lru_walk_cb isolate, void *cb_arg, |
208 | unsigned long *nr_to_walk) | |
3b1d58a4 | 209 | { |
6a6b7b77 | 210 | struct list_lru_node *nlru = &lru->node[nid]; |
60d3fd32 | 211 | struct list_lru_one *l; |
a38e4082 | 212 | struct list_head *item, *n; |
3b1d58a4 | 213 | unsigned long isolated = 0; |
a38e4082 | 214 | |
a38e4082 | 215 | restart: |
5abc1e37 MS |
216 | l = list_lru_from_memcg_idx(lru, nid, memcg_idx); |
217 | if (!l) | |
218 | goto out; | |
219 | ||
60d3fd32 | 220 | list_for_each_safe(item, n, &l->list) { |
a38e4082 | 221 | enum lru_status ret; |
5cedf721 DC |
222 | |
223 | /* | |
224 | * decrement nr_to_walk first so that we don't livelock if we | |
3dc5f032 | 225 | * get stuck on large numbers of LRU_RETRY items |
5cedf721 | 226 | */ |
c56b097a | 227 | if (!*nr_to_walk) |
5cedf721 | 228 | break; |
c56b097a | 229 | --*nr_to_walk; |
5cedf721 | 230 | |
3f97b163 | 231 | ret = isolate(item, l, &nlru->lock, cb_arg); |
a38e4082 | 232 | switch (ret) { |
449dd698 JW |
233 | case LRU_REMOVED_RETRY: |
234 | assert_spin_locked(&nlru->lock); | |
e4a9bc58 | 235 | fallthrough; |
a38e4082 | 236 | case LRU_REMOVED: |
3b1d58a4 | 237 | isolated++; |
2c80cd57 | 238 | nlru->nr_items--; |
449dd698 JW |
239 | /* |
240 | * If the lru lock has been dropped, our list | |
241 | * traversal is now invalid and so we have to | |
242 | * restart from scratch. | |
243 | */ | |
244 | if (ret == LRU_REMOVED_RETRY) | |
245 | goto restart; | |
a38e4082 DC |
246 | break; |
247 | case LRU_ROTATE: | |
60d3fd32 | 248 | list_move_tail(item, &l->list); |
a38e4082 DC |
249 | break; |
250 | case LRU_SKIP: | |
251 | break; | |
252 | case LRU_RETRY: | |
5cedf721 DC |
253 | /* |
254 | * The lru lock has been dropped, our list traversal is | |
255 | * now invalid and so we have to restart from scratch. | |
256 | */ | |
449dd698 | 257 | assert_spin_locked(&nlru->lock); |
a38e4082 DC |
258 | goto restart; |
259 | default: | |
260 | BUG(); | |
261 | } | |
a38e4082 | 262 | } |
5abc1e37 | 263 | out: |
3b1d58a4 DC |
264 | return isolated; |
265 | } | |
60d3fd32 VD |
266 | |
267 | unsigned long | |
268 | list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, | |
269 | list_lru_walk_cb isolate, void *cb_arg, | |
270 | unsigned long *nr_to_walk) | |
271 | { | |
6cfe57a9 SAS |
272 | struct list_lru_node *nlru = &lru->node[nid]; |
273 | unsigned long ret; | |
274 | ||
275 | spin_lock(&nlru->lock); | |
7c52f65d | 276 | ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, |
6a6b7b77 | 277 | cb_arg, nr_to_walk); |
6cfe57a9 SAS |
278 | spin_unlock(&nlru->lock); |
279 | return ret; | |
60d3fd32 VD |
280 | } |
281 | EXPORT_SYMBOL_GPL(list_lru_walk_one); | |
282 | ||
6b51e881 SAS |
283 | unsigned long |
284 | list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, | |
285 | list_lru_walk_cb isolate, void *cb_arg, | |
286 | unsigned long *nr_to_walk) | |
287 | { | |
288 | struct list_lru_node *nlru = &lru->node[nid]; | |
289 | unsigned long ret; | |
290 | ||
291 | spin_lock_irq(&nlru->lock); | |
7c52f65d | 292 | ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, |
6a6b7b77 | 293 | cb_arg, nr_to_walk); |
6b51e881 SAS |
294 | spin_unlock_irq(&nlru->lock); |
295 | return ret; | |
296 | } | |
297 | ||
60d3fd32 VD |
298 | unsigned long list_lru_walk_node(struct list_lru *lru, int nid, |
299 | list_lru_walk_cb isolate, void *cb_arg, | |
300 | unsigned long *nr_to_walk) | |
301 | { | |
302 | long isolated = 0; | |
60d3fd32 | 303 | |
87a5ffc1 SAS |
304 | isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, |
305 | nr_to_walk); | |
bbca91cc MS |
306 | |
307 | #ifdef CONFIG_MEMCG_KMEM | |
60d3fd32 | 308 | if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { |
d7011070 | 309 | struct list_lru_memcg *mlru; |
bbca91cc MS |
310 | unsigned long index; |
311 | ||
312 | xa_for_each(&lru->xa, index, mlru) { | |
6cfe57a9 SAS |
313 | struct list_lru_node *nlru = &lru->node[nid]; |
314 | ||
315 | spin_lock(&nlru->lock); | |
bbca91cc | 316 | isolated += __list_lru_walk_one(lru, nid, index, |
6e018968 SAS |
317 | isolate, cb_arg, |
318 | nr_to_walk); | |
6cfe57a9 SAS |
319 | spin_unlock(&nlru->lock); |
320 | ||
60d3fd32 VD |
321 | if (*nr_to_walk <= 0) |
322 | break; | |
323 | } | |
324 | } | |
bbca91cc MS |
325 | #endif |
326 | ||
60d3fd32 VD |
327 | return isolated; |
328 | } | |
3b1d58a4 DC |
329 | EXPORT_SYMBOL_GPL(list_lru_walk_node); |
330 | ||
60d3fd32 VD |
331 | static void init_one_lru(struct list_lru_one *l) |
332 | { | |
333 | INIT_LIST_HEAD(&l->list); | |
334 | l->nr_items = 0; | |
335 | } | |
336 | ||
84c07d11 | 337 | #ifdef CONFIG_MEMCG_KMEM |
d7011070 | 338 | static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) |
88f2ef73 MS |
339 | { |
340 | int nid; | |
d7011070 | 341 | struct list_lru_memcg *mlru; |
88f2ef73 MS |
342 | |
343 | mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); | |
344 | if (!mlru) | |
345 | return NULL; | |
346 | ||
347 | for_each_node(nid) | |
348 | init_one_lru(&mlru->node[nid]); | |
349 | ||
350 | return mlru; | |
351 | } | |
352 | ||
5abc1e37 | 353 | static void memcg_list_lru_free(struct list_lru *lru, int src_idx) |
60d3fd32 | 354 | { |
d7011070 | 355 | struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); |
5abc1e37 MS |
356 | |
357 | /* | |
358 | * The __list_lru_walk_one() can walk the list of this node. | |
359 | * We need kvfree_rcu() here. And the walking of the list | |
360 | * is under lru->node[nid]->lock, which can serve as a RCU | |
361 | * read-side critical section. | |
362 | */ | |
363 | if (mlru) | |
364 | kvfree_rcu(mlru, rcu); | |
60d3fd32 VD |
365 | } |
366 | ||
bbca91cc | 367 | static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
60d3fd32 | 368 | { |
bbca91cc MS |
369 | if (memcg_aware) |
370 | xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ); | |
6a6b7b77 | 371 | lru->memcg_aware = memcg_aware; |
60d3fd32 VD |
372 | } |
373 | ||
6a6b7b77 | 374 | static void memcg_destroy_list_lru(struct list_lru *lru) |
60d3fd32 | 375 | { |
bbca91cc | 376 | XA_STATE(xas, &lru->xa, 0); |
d7011070 | 377 | struct list_lru_memcg *mlru; |
6a6b7b77 MS |
378 | |
379 | if (!list_lru_memcg_aware(lru)) | |
380 | return; | |
381 | ||
bbca91cc MS |
382 | xas_lock_irq(&xas); |
383 | xas_for_each(&xas, mlru, ULONG_MAX) { | |
384 | kfree(mlru); | |
385 | xas_store(&xas, NULL); | |
60d3fd32 | 386 | } |
bbca91cc | 387 | xas_unlock_irq(&xas); |
60d3fd32 | 388 | } |
2788cf0c | 389 | |
1f391eb2 MS |
390 | static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, |
391 | int src_idx, struct mem_cgroup *dst_memcg) | |
2788cf0c | 392 | { |
3b82c4dc | 393 | struct list_lru_node *nlru = &lru->node[nid]; |
9bec5c35 | 394 | int dst_idx = dst_memcg->kmemcg_id; |
2788cf0c VD |
395 | struct list_lru_one *src, *dst; |
396 | ||
397 | /* | |
398 | * Since list_lru_{add,del} may be called under an IRQ-safe lock, | |
399 | * we have to use IRQ-safe primitives here to avoid deadlock. | |
400 | */ | |
401 | spin_lock_irq(&nlru->lock); | |
402 | ||
6a6b7b77 | 403 | src = list_lru_from_memcg_idx(lru, nid, src_idx); |
5abc1e37 MS |
404 | if (!src) |
405 | goto out; | |
6a6b7b77 | 406 | dst = list_lru_from_memcg_idx(lru, nid, dst_idx); |
2788cf0c VD |
407 | |
408 | list_splice_init(&src->list, &dst->list); | |
8199be00 YS |
409 | |
410 | if (src->nr_items) { | |
411 | dst->nr_items += src->nr_items; | |
2bfd3637 | 412 | set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); |
8199be00 YS |
413 | src->nr_items = 0; |
414 | } | |
5abc1e37 | 415 | out: |
2788cf0c VD |
416 | spin_unlock_irq(&nlru->lock); |
417 | } | |
418 | ||
1f391eb2 MS |
419 | static void memcg_reparent_list_lru(struct list_lru *lru, |
420 | int src_idx, struct mem_cgroup *dst_memcg) | |
2788cf0c VD |
421 | { |
422 | int i; | |
423 | ||
145949a1 | 424 | for_each_node(i) |
1f391eb2 | 425 | memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg); |
5abc1e37 MS |
426 | |
427 | memcg_list_lru_free(lru, src_idx); | |
2788cf0c VD |
428 | } |
429 | ||
1f391eb2 | 430 | void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent) |
2788cf0c | 431 | { |
5abc1e37 | 432 | struct cgroup_subsys_state *css; |
2788cf0c | 433 | struct list_lru *lru; |
1f391eb2 | 434 | int src_idx = memcg->kmemcg_id; |
5abc1e37 MS |
435 | |
436 | /* | |
437 | * Change kmemcg_id of this cgroup and all its descendants to the | |
438 | * parent's id, and then move all entries from this cgroup's list_lrus | |
439 | * to ones of the parent. | |
440 | * | |
441 | * After we have finished, all list_lrus corresponding to this cgroup | |
442 | * are guaranteed to remain empty. So we can safely free this cgroup's | |
443 | * list lrus in memcg_list_lru_free(). | |
444 | * | |
445 | * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc() | |
446 | * from allocating list lrus for this cgroup after memcg_list_lru_free() | |
447 | * call. | |
448 | */ | |
449 | rcu_read_lock(); | |
1f391eb2 MS |
450 | css_for_each_descendant_pre(css, &memcg->css) { |
451 | struct mem_cgroup *child; | |
5abc1e37 | 452 | |
1f391eb2 | 453 | child = mem_cgroup_from_css(css); |
bbca91cc | 454 | WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id); |
5abc1e37 MS |
455 | } |
456 | rcu_read_unlock(); | |
2788cf0c VD |
457 | |
458 | mutex_lock(&list_lrus_mutex); | |
3eef1127 | 459 | list_for_each_entry(lru, &memcg_list_lrus, list) |
1f391eb2 | 460 | memcg_reparent_list_lru(lru, src_idx, parent); |
2788cf0c VD |
461 | mutex_unlock(&list_lrus_mutex); |
462 | } | |
88f2ef73 | 463 | |
bbca91cc MS |
464 | static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg, |
465 | struct list_lru *lru) | |
88f2ef73 | 466 | { |
bbca91cc | 467 | int idx = memcg->kmemcg_id; |
88f2ef73 | 468 | |
bbca91cc | 469 | return idx < 0 || xa_load(&lru->xa, idx); |
88f2ef73 MS |
470 | } |
471 | ||
472 | int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, | |
473 | gfp_t gfp) | |
474 | { | |
475 | int i; | |
476 | unsigned long flags; | |
88f2ef73 | 477 | struct list_lru_memcg_table { |
d7011070 | 478 | struct list_lru_memcg *mlru; |
88f2ef73 MS |
479 | struct mem_cgroup *memcg; |
480 | } *table; | |
bbca91cc | 481 | XA_STATE(xas, &lru->xa, 0); |
88f2ef73 MS |
482 | |
483 | if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) | |
484 | return 0; | |
485 | ||
486 | gfp &= GFP_RECLAIM_MASK; | |
487 | table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp); | |
488 | if (!table) | |
489 | return -ENOMEM; | |
490 | ||
491 | /* | |
492 | * Because the list_lru can be reparented to the parent cgroup's | |
493 | * list_lru, we should make sure that this cgroup and all its | |
d7011070 | 494 | * ancestors have allocated list_lru_memcg. |
88f2ef73 MS |
495 | */ |
496 | for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { | |
497 | if (memcg_list_lru_allocated(memcg, lru)) | |
498 | break; | |
499 | ||
500 | table[i].memcg = memcg; | |
501 | table[i].mlru = memcg_init_list_lru_one(gfp); | |
502 | if (!table[i].mlru) { | |
503 | while (i--) | |
504 | kfree(table[i].mlru); | |
505 | kfree(table); | |
506 | return -ENOMEM; | |
507 | } | |
508 | } | |
509 | ||
bbca91cc | 510 | xas_lock_irqsave(&xas, flags); |
88f2ef73 | 511 | while (i--) { |
bbca91cc | 512 | int index = READ_ONCE(table[i].memcg->kmemcg_id); |
d7011070 | 513 | struct list_lru_memcg *mlru = table[i].mlru; |
88f2ef73 | 514 | |
bbca91cc MS |
515 | xas_set(&xas, index); |
516 | retry: | |
517 | if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { | |
5abc1e37 | 518 | kfree(mlru); |
bbca91cc MS |
519 | } else { |
520 | xas_store(&xas, mlru); | |
521 | if (xas_error(&xas) == -ENOMEM) { | |
522 | xas_unlock_irqrestore(&xas, flags); | |
523 | if (xas_nomem(&xas, gfp)) | |
524 | xas_set_err(&xas, 0); | |
525 | xas_lock_irqsave(&xas, flags); | |
526 | /* | |
527 | * The xas lock has been released, this memcg | |
528 | * can be reparented before us. So reload | |
529 | * memcg id. More details see the comments | |
530 | * in memcg_reparent_list_lrus(). | |
531 | */ | |
532 | index = READ_ONCE(table[i].memcg->kmemcg_id); | |
533 | if (index < 0) | |
534 | xas_set_err(&xas, 0); | |
535 | else if (!xas_error(&xas) && index != xas.xa_index) | |
536 | xas_set(&xas, index); | |
537 | goto retry; | |
538 | } | |
539 | } | |
88f2ef73 | 540 | } |
bbca91cc MS |
541 | /* xas_nomem() is used to free memory instead of memory allocation. */ |
542 | if (xas.xa_alloc) | |
543 | xas_nomem(&xas, gfp); | |
544 | xas_unlock_irqrestore(&xas, flags); | |
88f2ef73 MS |
545 | kfree(table); |
546 | ||
bbca91cc | 547 | return xas_error(&xas); |
88f2ef73 | 548 | } |
60d3fd32 | 549 | #else |
bbca91cc | 550 | static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) |
60d3fd32 | 551 | { |
60d3fd32 VD |
552 | } |
553 | ||
554 | static void memcg_destroy_list_lru(struct list_lru *lru) | |
555 | { | |
556 | } | |
84c07d11 | 557 | #endif /* CONFIG_MEMCG_KMEM */ |
60d3fd32 VD |
558 | |
559 | int __list_lru_init(struct list_lru *lru, bool memcg_aware, | |
c92e8e10 | 560 | struct lock_class_key *key, struct shrinker *shrinker) |
a38e4082 | 561 | { |
3b1d58a4 | 562 | int i; |
60d3fd32 | 563 | |
c92e8e10 KT |
564 | #ifdef CONFIG_MEMCG_KMEM |
565 | if (shrinker) | |
566 | lru->shrinker_id = shrinker->id; | |
567 | else | |
568 | lru->shrinker_id = -1; | |
569 | #endif | |
5ca302c8 | 570 | |
b9726c26 | 571 | lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); |
5ca302c8 | 572 | if (!lru->node) |
bbca91cc | 573 | return -ENOMEM; |
a38e4082 | 574 | |
145949a1 | 575 | for_each_node(i) { |
3b1d58a4 | 576 | spin_lock_init(&lru->node[i].lock); |
449dd698 JW |
577 | if (key) |
578 | lockdep_set_class(&lru->node[i].lock, key); | |
60d3fd32 VD |
579 | init_one_lru(&lru->node[i].lru); |
580 | } | |
581 | ||
bbca91cc | 582 | memcg_init_list_lru(lru, memcg_aware); |
c0a5b560 | 583 | list_lru_register(lru); |
bbca91cc MS |
584 | |
585 | return 0; | |
a38e4082 | 586 | } |
60d3fd32 | 587 | EXPORT_SYMBOL_GPL(__list_lru_init); |
5ca302c8 GC |
588 | |
589 | void list_lru_destroy(struct list_lru *lru) | |
590 | { | |
c0a5b560 VD |
591 | /* Already destroyed or not yet initialized? */ |
592 | if (!lru->node) | |
593 | return; | |
60d3fd32 | 594 | |
c0a5b560 | 595 | list_lru_unregister(lru); |
60d3fd32 VD |
596 | |
597 | memcg_destroy_list_lru(lru); | |
5ca302c8 | 598 | kfree(lru->node); |
c0a5b560 | 599 | lru->node = NULL; |
60d3fd32 | 600 | |
c92e8e10 KT |
601 | #ifdef CONFIG_MEMCG_KMEM |
602 | lru->shrinker_id = -1; | |
603 | #endif | |
5ca302c8 GC |
604 | } |
605 | EXPORT_SYMBOL_GPL(list_lru_destroy); |