]>
Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
8cdea7c0 BS |
2 | /* memcontrol.c - Memory Controller |
3 | * | |
4 | * Copyright IBM Corporation, 2007 | |
5 | * Author Balbir Singh <[email protected]> | |
6 | * | |
78fb7466 PE |
7 | * Copyright 2007 OpenVZ SWsoft Inc |
8 | * Author: Pavel Emelianov <[email protected]> | |
9 | * | |
2e72b634 KS |
10 | * Memory thresholds |
11 | * Copyright (C) 2009 Nokia Corporation | |
12 | * Author: Kirill A. Shutemov | |
13 | * | |
7ae1e1d0 GC |
14 | * Kernel Memory Controller |
15 | * Copyright (C) 2012 Parallels Inc. and Google Inc. | |
16 | * Authors: Glauber Costa and Suleiman Souhlal | |
17 | * | |
1575e68b JW |
18 | * Native page reclaim |
19 | * Charge lifetime sanitation | |
20 | * Lockless page tracking & accounting | |
21 | * Unified hierarchy configuration model | |
22 | * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner | |
6168d0da AS |
23 | * |
24 | * Per memcg lru locking | |
25 | * Copyright (C) 2020 Alibaba, Inc, Alex Shi | |
8cdea7c0 BS |
26 | */ |
27 | ||
3e32cb2e | 28 | #include <linux/page_counter.h> |
8cdea7c0 BS |
29 | #include <linux/memcontrol.h> |
30 | #include <linux/cgroup.h> | |
6e84f315 | 31 | #include <linux/sched/mm.h> |
3a4f8a0b | 32 | #include <linux/shmem_fs.h> |
4ffef5fe | 33 | #include <linux/hugetlb.h> |
d13d1443 | 34 | #include <linux/pagemap.h> |
4882c809 | 35 | #include <linux/pagevec.h> |
1ff9e6e1 | 36 | #include <linux/vm_event_item.h> |
d52aa412 | 37 | #include <linux/smp.h> |
8a9f3ccd | 38 | #include <linux/page-flags.h> |
66e1707b | 39 | #include <linux/backing-dev.h> |
8a9f3ccd BS |
40 | #include <linux/bit_spinlock.h> |
41 | #include <linux/rcupdate.h> | |
e222432b | 42 | #include <linux/limits.h> |
b9e15baf | 43 | #include <linux/export.h> |
8c7c6e34 | 44 | #include <linux/mutex.h> |
bb4cc1a8 | 45 | #include <linux/rbtree.h> |
b6ac57d5 | 46 | #include <linux/slab.h> |
02491447 | 47 | #include <linux/swapops.h> |
66e1707b BS |
48 | #include <linux/spinlock.h> |
49 | #include <linux/fs.h> | |
d2ceb9b7 | 50 | #include <linux/seq_file.h> |
68cd9050 | 51 | #include <linux/parser.h> |
70ddf637 | 52 | #include <linux/vmpressure.h> |
dc90f084 | 53 | #include <linux/memremap.h> |
b69408e8 | 54 | #include <linux/mm_inline.h> |
5d1ea48b | 55 | #include <linux/swap_cgroup.h> |
cdec2e42 | 56 | #include <linux/cpu.h> |
158e0a2d | 57 | #include <linux/oom.h> |
0056f4e6 | 58 | #include <linux/lockdep.h> |
03248add | 59 | #include <linux/resume_user_mode.h> |
0e4b01df | 60 | #include <linux/psi.h> |
c8713d0b | 61 | #include <linux/seq_buf.h> |
6a792697 | 62 | #include <linux/sched/isolation.h> |
6011be59 | 63 | #include <linux/kmemleak.h> |
08e552c6 | 64 | #include "internal.h" |
d1a4c0b3 | 65 | #include <net/sock.h> |
4bd2c1ee | 66 | #include <net/ip.h> |
f35c3a8e | 67 | #include "slab.h" |
d12f6d22 | 68 | #include "memcontrol-v1.h" |
8cdea7c0 | 69 | |
7c0f6ba6 | 70 | #include <linux/uaccess.h> |
8697d331 | 71 | |
cc8e970c KM |
72 | #include <trace/events/vmscan.h> |
73 | ||
073219e9 TH |
74 | struct cgroup_subsys memory_cgrp_subsys __read_mostly; |
75 | EXPORT_SYMBOL(memory_cgrp_subsys); | |
68ae564b | 76 | |
7d828602 JW |
77 | struct mem_cgroup *root_mem_cgroup __read_mostly; |
78 | ||
37d5985c RG |
79 | /* Active memory cgroup to use from an interrupt context */ |
80 | DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); | |
c74d40e8 | 81 | EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg); |
37d5985c | 82 | |
f7e1cb6e | 83 | /* Socket memory accounting disabled? */ |
0f0cace3 | 84 | static bool cgroup_memory_nosocket __ro_after_init; |
f7e1cb6e | 85 | |
04823c83 | 86 | /* Kernel memory accounting disabled? */ |
17c17367 | 87 | static bool cgroup_memory_nokmem __ro_after_init; |
04823c83 | 88 | |
b6c1a8af YS |
89 | /* BPF memory accounting disabled? */ |
90 | static bool cgroup_memory_nobpf __ro_after_init; | |
91 | ||
97b27821 TH |
92 | #ifdef CONFIG_CGROUP_WRITEBACK |
93 | static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); | |
94 | #endif | |
95 | ||
a0db00fc KS |
96 | #define THRESHOLDS_EVENTS_TARGET 128 |
97 | #define SOFTLIMIT_EVENTS_TARGET 1024 | |
e9f8974f | 98 | |
a4ebf1b6 | 99 | static inline bool task_is_dying(void) |
7775face TH |
100 | { |
101 | return tsk_is_oom_victim(current) || fatal_signal_pending(current) || | |
102 | (current->flags & PF_EXITING); | |
103 | } | |
104 | ||
70ddf637 AV |
105 | /* Some nice accessors for the vmpressure. */ |
106 | struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) | |
107 | { | |
108 | if (!memcg) | |
109 | memcg = root_mem_cgroup; | |
110 | return &memcg->vmpressure; | |
111 | } | |
112 | ||
9647875b | 113 | struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr) |
70ddf637 | 114 | { |
9647875b | 115 | return container_of(vmpr, struct mem_cgroup, vmpressure); |
70ddf637 AV |
116 | } |
117 | ||
1aacbd35 RG |
118 | #define CURRENT_OBJCG_UPDATE_BIT 0 |
119 | #define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT) | |
120 | ||
0764db9b | 121 | static DEFINE_SPINLOCK(objcg_lock); |
bf4f0599 | 122 | |
4d5c8aed RG |
123 | bool mem_cgroup_kmem_disabled(void) |
124 | { | |
125 | return cgroup_memory_nokmem; | |
126 | } | |
127 | ||
f1286fae MS |
128 | static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, |
129 | unsigned int nr_pages); | |
c1a660de | 130 | |
bf4f0599 RG |
131 | static void obj_cgroup_release(struct percpu_ref *ref) |
132 | { | |
133 | struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt); | |
bf4f0599 RG |
134 | unsigned int nr_bytes; |
135 | unsigned int nr_pages; | |
136 | unsigned long flags; | |
137 | ||
138 | /* | |
139 | * At this point all allocated objects are freed, and | |
140 | * objcg->nr_charged_bytes can't have an arbitrary byte value. | |
141 | * However, it can be PAGE_SIZE or (x * PAGE_SIZE). | |
142 | * | |
143 | * The following sequence can lead to it: | |
144 | * 1) CPU0: objcg == stock->cached_objcg | |
145 | * 2) CPU1: we do a small allocation (e.g. 92 bytes), | |
146 | * PAGE_SIZE bytes are charged | |
147 | * 3) CPU1: a process from another memcg is allocating something, | |
148 | * the stock if flushed, | |
149 | * objcg->nr_charged_bytes = PAGE_SIZE - 92 | |
150 | * 5) CPU0: we do release this object, | |
151 | * 92 bytes are added to stock->nr_bytes | |
152 | * 6) CPU0: stock is flushed, | |
153 | * 92 bytes are added to objcg->nr_charged_bytes | |
154 | * | |
155 | * In the result, nr_charged_bytes == PAGE_SIZE. | |
156 | * This page will be uncharged in obj_cgroup_release(). | |
157 | */ | |
158 | nr_bytes = atomic_read(&objcg->nr_charged_bytes); | |
159 | WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); | |
160 | nr_pages = nr_bytes >> PAGE_SHIFT; | |
161 | ||
bf4f0599 | 162 | if (nr_pages) |
f1286fae | 163 | obj_cgroup_uncharge_pages(objcg, nr_pages); |
271dd6b1 | 164 | |
0764db9b | 165 | spin_lock_irqsave(&objcg_lock, flags); |
bf4f0599 | 166 | list_del(&objcg->list); |
0764db9b | 167 | spin_unlock_irqrestore(&objcg_lock, flags); |
bf4f0599 RG |
168 | |
169 | percpu_ref_exit(ref); | |
170 | kfree_rcu(objcg, rcu); | |
171 | } | |
172 | ||
173 | static struct obj_cgroup *obj_cgroup_alloc(void) | |
174 | { | |
175 | struct obj_cgroup *objcg; | |
176 | int ret; | |
177 | ||
178 | objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL); | |
179 | if (!objcg) | |
180 | return NULL; | |
181 | ||
182 | ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, | |
183 | GFP_KERNEL); | |
184 | if (ret) { | |
185 | kfree(objcg); | |
186 | return NULL; | |
187 | } | |
188 | INIT_LIST_HEAD(&objcg->list); | |
189 | return objcg; | |
190 | } | |
191 | ||
192 | static void memcg_reparent_objcgs(struct mem_cgroup *memcg, | |
193 | struct mem_cgroup *parent) | |
194 | { | |
195 | struct obj_cgroup *objcg, *iter; | |
196 | ||
197 | objcg = rcu_replace_pointer(memcg->objcg, NULL, true); | |
198 | ||
0764db9b | 199 | spin_lock_irq(&objcg_lock); |
bf4f0599 | 200 | |
9838354e MS |
201 | /* 1) Ready to reparent active objcg. */ |
202 | list_add(&objcg->list, &memcg->objcg_list); | |
203 | /* 2) Reparent active objcg and already reparented objcgs to parent. */ | |
204 | list_for_each_entry(iter, &memcg->objcg_list, list) | |
205 | WRITE_ONCE(iter->memcg, parent); | |
206 | /* 3) Move already reparented objcgs to the parent's list */ | |
bf4f0599 RG |
207 | list_splice(&memcg->objcg_list, &parent->objcg_list); |
208 | ||
0764db9b | 209 | spin_unlock_irq(&objcg_lock); |
bf4f0599 RG |
210 | |
211 | percpu_ref_kill(&objcg->refcnt); | |
212 | } | |
213 | ||
d7f25f8a GC |
214 | /* |
215 | * A lot of the calls to the cache allocation functions are expected to be | |
9f9796b4 | 216 | * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are |
d7f25f8a GC |
217 | * conditional to this static branch, we'll have to allow modules that does |
218 | * kmem_cache_alloc and the such to see this symbol as well | |
219 | */ | |
f7a449f7 RG |
220 | DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key); |
221 | EXPORT_SYMBOL(memcg_kmem_online_key); | |
b6c1a8af YS |
222 | |
223 | DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key); | |
224 | EXPORT_SYMBOL(memcg_bpf_enabled_key); | |
17cc4dfe | 225 | |
ad7fa852 | 226 | /** |
75376c6f MWO |
227 | * mem_cgroup_css_from_folio - css of the memcg associated with a folio |
228 | * @folio: folio of interest | |
ad7fa852 TH |
229 | * |
230 | * If memcg is bound to the default hierarchy, css of the memcg associated | |
75376c6f | 231 | * with @folio is returned. The returned css remains associated with @folio |
ad7fa852 TH |
232 | * until it is released. |
233 | * | |
234 | * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup | |
235 | * is returned. | |
ad7fa852 | 236 | */ |
75376c6f | 237 | struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) |
ad7fa852 | 238 | { |
75376c6f | 239 | struct mem_cgroup *memcg = folio_memcg(folio); |
ad7fa852 | 240 | |
9e10a130 | 241 | if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
ad7fa852 TH |
242 | memcg = root_mem_cgroup; |
243 | ||
ad7fa852 TH |
244 | return &memcg->css; |
245 | } | |
246 | ||
2fc04524 VD |
247 | /** |
248 | * page_cgroup_ino - return inode number of the memcg a page is charged to | |
249 | * @page: the page | |
250 | * | |
251 | * Look up the closest online ancestor of the memory cgroup @page is charged to | |
252 | * and return its inode number or 0 if @page is not charged to any cgroup. It | |
253 | * is safe to call this function without holding a reference to @page. | |
254 | * | |
255 | * Note, this function is inherently racy, because there is nothing to prevent | |
256 | * the cgroup inode from getting torn down and potentially reallocated a moment | |
257 | * after page_cgroup_ino() returns, so it only should be used by callers that | |
258 | * do not care (such as procfs interfaces). | |
259 | */ | |
260 | ino_t page_cgroup_ino(struct page *page) | |
261 | { | |
262 | struct mem_cgroup *memcg; | |
263 | unsigned long ino = 0; | |
264 | ||
265 | rcu_read_lock(); | |
ec342603 YA |
266 | /* page_folio() is racy here, but the entire function is racy anyway */ |
267 | memcg = folio_memcg_check(page_folio(page)); | |
286e04b8 | 268 | |
2fc04524 VD |
269 | while (memcg && !(memcg->css.flags & CSS_ONLINE)) |
270 | memcg = parent_mem_cgroup(memcg); | |
271 | if (memcg) | |
272 | ino = cgroup_ino(memcg->css.cgroup); | |
273 | rcu_read_unlock(); | |
274 | return ino; | |
275 | } | |
276 | ||
ff48c71c SB |
277 | /* Subset of node_stat_item for memcg stats */ |
278 | static const unsigned int memcg_node_stat_items[] = { | |
279 | NR_INACTIVE_ANON, | |
280 | NR_ACTIVE_ANON, | |
281 | NR_INACTIVE_FILE, | |
282 | NR_ACTIVE_FILE, | |
283 | NR_UNEVICTABLE, | |
284 | NR_SLAB_RECLAIMABLE_B, | |
285 | NR_SLAB_UNRECLAIMABLE_B, | |
286 | WORKINGSET_REFAULT_ANON, | |
287 | WORKINGSET_REFAULT_FILE, | |
288 | WORKINGSET_ACTIVATE_ANON, | |
289 | WORKINGSET_ACTIVATE_FILE, | |
290 | WORKINGSET_RESTORE_ANON, | |
291 | WORKINGSET_RESTORE_FILE, | |
292 | WORKINGSET_NODERECLAIM, | |
293 | NR_ANON_MAPPED, | |
294 | NR_FILE_MAPPED, | |
295 | NR_FILE_PAGES, | |
296 | NR_FILE_DIRTY, | |
297 | NR_WRITEBACK, | |
298 | NR_SHMEM, | |
299 | NR_SHMEM_THPS, | |
300 | NR_FILE_THPS, | |
301 | NR_ANON_THPS, | |
302 | NR_KERNEL_STACK_KB, | |
303 | NR_PAGETABLE, | |
304 | NR_SECONDARY_PAGETABLE, | |
305 | #ifdef CONFIG_SWAP | |
306 | NR_SWAPCACHE, | |
307 | #endif | |
308 | }; | |
309 | ||
310 | static const unsigned int memcg_stat_items[] = { | |
311 | MEMCG_SWAP, | |
312 | MEMCG_SOCK, | |
313 | MEMCG_PERCPU_B, | |
314 | MEMCG_VMALLOC, | |
315 | MEMCG_KMEM, | |
316 | MEMCG_ZSWAP_B, | |
317 | MEMCG_ZSWAPPED, | |
318 | }; | |
319 | ||
320 | #define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items) | |
321 | #define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \ | |
322 | ARRAY_SIZE(memcg_stat_items)) | |
323 | static int8_t mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly; | |
324 | ||
325 | static void init_memcg_stats(void) | |
326 | { | |
327 | int8_t i, j = 0; | |
328 | ||
329 | BUILD_BUG_ON(MEMCG_NR_STAT >= S8_MAX); | |
330 | ||
331 | for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i) | |
332 | mem_cgroup_stats_index[memcg_node_stat_items[i]] = ++j; | |
333 | ||
334 | for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i) | |
335 | mem_cgroup_stats_index[memcg_stat_items[i]] = ++j; | |
336 | } | |
337 | ||
338 | static inline int memcg_stats_index(int idx) | |
339 | { | |
340 | return mem_cgroup_stats_index[idx] - 1; | |
341 | } | |
342 | ||
70a64b79 SB |
343 | struct lruvec_stats_percpu { |
344 | /* Local (CPU and cgroup) state */ | |
ff48c71c | 345 | long state[NR_MEMCG_NODE_STAT_ITEMS]; |
70a64b79 SB |
346 | |
347 | /* Delta calculation for lockless upward propagation */ | |
ff48c71c | 348 | long state_prev[NR_MEMCG_NODE_STAT_ITEMS]; |
70a64b79 SB |
349 | }; |
350 | ||
351 | struct lruvec_stats { | |
352 | /* Aggregated (CPU and subtree) state */ | |
ff48c71c | 353 | long state[NR_MEMCG_NODE_STAT_ITEMS]; |
70a64b79 SB |
354 | |
355 | /* Non-hierarchical (CPU aggregated) state */ | |
ff48c71c | 356 | long state_local[NR_MEMCG_NODE_STAT_ITEMS]; |
70a64b79 SB |
357 | |
358 | /* Pending child counts during tree propagation */ | |
ff48c71c | 359 | long state_pending[NR_MEMCG_NODE_STAT_ITEMS]; |
70a64b79 SB |
360 | }; |
361 | ||
362 | unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) | |
363 | { | |
364 | struct mem_cgroup_per_node *pn; | |
365 | long x; | |
ff48c71c | 366 | int i; |
70a64b79 SB |
367 | |
368 | if (mem_cgroup_disabled()) | |
369 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
370 | ||
ff48c71c | 371 | i = memcg_stats_index(idx); |
acb5fe2f | 372 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) |
ff48c71c SB |
373 | return 0; |
374 | ||
70a64b79 | 375 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
ff48c71c | 376 | x = READ_ONCE(pn->lruvec_stats->state[i]); |
70a64b79 SB |
377 | #ifdef CONFIG_SMP |
378 | if (x < 0) | |
379 | x = 0; | |
380 | #endif | |
381 | return x; | |
382 | } | |
383 | ||
384 | unsigned long lruvec_page_state_local(struct lruvec *lruvec, | |
385 | enum node_stat_item idx) | |
386 | { | |
387 | struct mem_cgroup_per_node *pn; | |
acb5fe2f | 388 | long x; |
ff48c71c | 389 | int i; |
70a64b79 SB |
390 | |
391 | if (mem_cgroup_disabled()) | |
392 | return node_page_state(lruvec_pgdat(lruvec), idx); | |
393 | ||
ff48c71c | 394 | i = memcg_stats_index(idx); |
acb5fe2f | 395 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) |
ff48c71c SB |
396 | return 0; |
397 | ||
70a64b79 | 398 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
ff48c71c | 399 | x = READ_ONCE(pn->lruvec_stats->state_local[i]); |
70a64b79 SB |
400 | #ifdef CONFIG_SMP |
401 | if (x < 0) | |
402 | x = 0; | |
403 | #endif | |
404 | return x; | |
405 | } | |
406 | ||
d396def5 SB |
407 | /* Subset of vm_event_item to report for memcg event stats */ |
408 | static const unsigned int memcg_vm_event_stat[] = { | |
8278f1c7 SB |
409 | PGPGIN, |
410 | PGPGOUT, | |
d396def5 SB |
411 | PGSCAN_KSWAPD, |
412 | PGSCAN_DIRECT, | |
57e9cc50 | 413 | PGSCAN_KHUGEPAGED, |
d396def5 SB |
414 | PGSTEAL_KSWAPD, |
415 | PGSTEAL_DIRECT, | |
57e9cc50 | 416 | PGSTEAL_KHUGEPAGED, |
d396def5 SB |
417 | PGFAULT, |
418 | PGMAJFAULT, | |
419 | PGREFILL, | |
420 | PGACTIVATE, | |
421 | PGDEACTIVATE, | |
422 | PGLAZYFREE, | |
423 | PGLAZYFREED, | |
3a3b7fec | 424 | #ifdef CONFIG_ZSWAP |
d396def5 SB |
425 | ZSWPIN, |
426 | ZSWPOUT, | |
e0bf1dc8 | 427 | ZSWPWB, |
d396def5 SB |
428 | #endif |
429 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
430 | THP_FAULT_ALLOC, | |
431 | THP_COLLAPSE_ALLOC, | |
811244a5 XH |
432 | THP_SWPOUT, |
433 | THP_SWPOUT_FALLBACK, | |
d396def5 SB |
434 | #endif |
435 | }; | |
436 | ||
8278f1c7 | 437 | #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat) |
59142d87 | 438 | static int8_t mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly; |
8278f1c7 SB |
439 | |
440 | static void init_memcg_events(void) | |
441 | { | |
59142d87 SB |
442 | int8_t i; |
443 | ||
444 | BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= S8_MAX); | |
8278f1c7 SB |
445 | |
446 | for (i = 0; i < NR_MEMCG_EVENTS; ++i) | |
447 | mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1; | |
448 | } | |
449 | ||
450 | static inline int memcg_events_index(enum vm_event_item idx) | |
451 | { | |
452 | return mem_cgroup_events_index[idx] - 1; | |
453 | } | |
454 | ||
410f8e82 | 455 | struct memcg_vmstats_percpu { |
9cee7e8e YA |
456 | /* Stats updates since the last flush */ |
457 | unsigned int stats_updates; | |
458 | ||
459 | /* Cached pointers for fast iteration in memcg_rstat_updated() */ | |
460 | struct memcg_vmstats_percpu *parent; | |
461 | struct memcg_vmstats *vmstats; | |
462 | ||
463 | /* The above should fit a single cacheline for memcg_rstat_updated() */ | |
464 | ||
410f8e82 | 465 | /* Local (CPU and cgroup) page state & events */ |
ff48c71c | 466 | long state[MEMCG_VMSTAT_SIZE]; |
8278f1c7 | 467 | unsigned long events[NR_MEMCG_EVENTS]; |
410f8e82 SB |
468 | |
469 | /* Delta calculation for lockless upward propagation */ | |
ff48c71c | 470 | long state_prev[MEMCG_VMSTAT_SIZE]; |
8278f1c7 | 471 | unsigned long events_prev[NR_MEMCG_EVENTS]; |
410f8e82 SB |
472 | |
473 | /* Cgroup1: threshold notifications & softlimit tree updates */ | |
474 | unsigned long nr_page_events; | |
475 | unsigned long targets[MEM_CGROUP_NTARGETS]; | |
9cee7e8e | 476 | } ____cacheline_aligned; |
410f8e82 SB |
477 | |
478 | struct memcg_vmstats { | |
479 | /* Aggregated (CPU and subtree) page state & events */ | |
ff48c71c | 480 | long state[MEMCG_VMSTAT_SIZE]; |
8278f1c7 | 481 | unsigned long events[NR_MEMCG_EVENTS]; |
410f8e82 | 482 | |
f82e6bf9 | 483 | /* Non-hierarchical (CPU aggregated) page state & events */ |
ff48c71c | 484 | long state_local[MEMCG_VMSTAT_SIZE]; |
f82e6bf9 YA |
485 | unsigned long events_local[NR_MEMCG_EVENTS]; |
486 | ||
410f8e82 | 487 | /* Pending child counts during tree propagation */ |
ff48c71c | 488 | long state_pending[MEMCG_VMSTAT_SIZE]; |
8278f1c7 | 489 | unsigned long events_pending[NR_MEMCG_EVENTS]; |
8d59d221 YA |
490 | |
491 | /* Stats updates since the last flush */ | |
492 | atomic64_t stats_updates; | |
410f8e82 SB |
493 | }; |
494 | ||
11192d9c SB |
495 | /* |
496 | * memcg and lruvec stats flushing | |
497 | * | |
498 | * Many codepaths leading to stats update or read are performance sensitive and | |
499 | * adding stats flushing in such codepaths is not desirable. So, to optimize the | |
500 | * flushing the kernel does: | |
501 | * | |
502 | * 1) Periodically and asynchronously flush the stats every 2 seconds to not let | |
503 | * rstat update tree grow unbounded. | |
504 | * | |
505 | * 2) Flush the stats synchronously on reader side only when there are more than | |
506 | * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization | |
507 | * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but | |
508 | * only for 2 seconds due to (1). | |
509 | */ | |
510 | static void flush_memcg_stats_dwork(struct work_struct *w); | |
511 | static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); | |
508bed88 | 512 | static u64 flush_last_time; |
9b301615 SB |
513 | |
514 | #define FLUSH_TIME (2UL*HZ) | |
11192d9c | 515 | |
be3e67b5 SAS |
516 | /* |
517 | * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can | |
518 | * not rely on this as part of an acquired spinlock_t lock. These functions are | |
519 | * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion | |
520 | * is sufficient. | |
521 | */ | |
522 | static void memcg_stats_lock(void) | |
523 | { | |
e575d401 TG |
524 | preempt_disable_nested(); |
525 | VM_WARN_ON_IRQS_ENABLED(); | |
be3e67b5 SAS |
526 | } |
527 | ||
528 | static void __memcg_stats_lock(void) | |
529 | { | |
e575d401 | 530 | preempt_disable_nested(); |
be3e67b5 SAS |
531 | } |
532 | ||
533 | static void memcg_stats_unlock(void) | |
534 | { | |
e575d401 | 535 | preempt_enable_nested(); |
be3e67b5 SAS |
536 | } |
537 | ||
8d59d221 | 538 | |
9cee7e8e | 539 | static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats) |
8d59d221 | 540 | { |
9cee7e8e | 541 | return atomic64_read(&vmstats->stats_updates) > |
8d59d221 YA |
542 | MEMCG_CHARGE_BATCH * num_online_cpus(); |
543 | } | |
544 | ||
5b3be698 | 545 | static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) |
11192d9c | 546 | { |
9cee7e8e | 547 | struct memcg_vmstats_percpu *statc; |
8d59d221 | 548 | int cpu = smp_processor_id(); |
78ec6f9d | 549 | unsigned int stats_updates; |
5b3be698 | 550 | |
f9d911ca YA |
551 | if (!val) |
552 | return; | |
553 | ||
8d59d221 | 554 | cgroup_rstat_updated(memcg->css.cgroup, cpu); |
9cee7e8e YA |
555 | statc = this_cpu_ptr(memcg->vmstats_percpu); |
556 | for (; statc; statc = statc->parent) { | |
78ec6f9d BL |
557 | stats_updates = READ_ONCE(statc->stats_updates) + abs(val); |
558 | WRITE_ONCE(statc->stats_updates, stats_updates); | |
559 | if (stats_updates < MEMCG_CHARGE_BATCH) | |
8d59d221 | 560 | continue; |
5b3be698 | 561 | |
873f64b7 | 562 | /* |
8d59d221 YA |
563 | * If @memcg is already flush-able, increasing stats_updates is |
564 | * redundant. Avoid the overhead of the atomic update. | |
873f64b7 | 565 | */ |
9cee7e8e | 566 | if (!memcg_vmstats_needs_flush(statc->vmstats)) |
78ec6f9d | 567 | atomic64_add(stats_updates, |
9cee7e8e | 568 | &statc->vmstats->stats_updates); |
78ec6f9d | 569 | WRITE_ONCE(statc->stats_updates, 0); |
5b3be698 | 570 | } |
11192d9c SB |
571 | } |
572 | ||
7d7ef0a4 | 573 | static void do_flush_stats(struct mem_cgroup *memcg) |
11192d9c | 574 | { |
7d7ef0a4 YA |
575 | if (mem_cgroup_is_root(memcg)) |
576 | WRITE_ONCE(flush_last_time, jiffies_64); | |
9fad9aee | 577 | |
7d7ef0a4 | 578 | cgroup_rstat_flush(memcg->css.cgroup); |
11192d9c SB |
579 | } |
580 | ||
7d7ef0a4 YA |
581 | /* |
582 | * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree | |
583 | * @memcg: root of the subtree to flush | |
584 | * | |
585 | * Flushing is serialized by the underlying global rstat lock. There is also a | |
586 | * minimum amount of work to be done even if there are no stat updates to flush. | |
587 | * Hence, we only flush the stats if the updates delta exceeds a threshold. This | |
588 | * avoids unnecessary work and contention on the underlying lock. | |
589 | */ | |
590 | void mem_cgroup_flush_stats(struct mem_cgroup *memcg) | |
11192d9c | 591 | { |
7d7ef0a4 YA |
592 | if (mem_cgroup_disabled()) |
593 | return; | |
594 | ||
595 | if (!memcg) | |
596 | memcg = root_mem_cgroup; | |
597 | ||
9cee7e8e | 598 | if (memcg_vmstats_needs_flush(memcg->vmstats)) |
7d7ef0a4 | 599 | do_flush_stats(memcg); |
9fad9aee YA |
600 | } |
601 | ||
7d7ef0a4 | 602 | void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) |
9b301615 | 603 | { |
508bed88 YA |
604 | /* Only flush if the periodic flusher is one full cycle late */ |
605 | if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME)) | |
7d7ef0a4 | 606 | mem_cgroup_flush_stats(memcg); |
9b301615 SB |
607 | } |
608 | ||
11192d9c SB |
609 | static void flush_memcg_stats_dwork(struct work_struct *w) |
610 | { | |
9fad9aee | 611 | /* |
9cee7e8e | 612 | * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing |
8d59d221 | 613 | * in latency-sensitive paths is as cheap as possible. |
9fad9aee | 614 | */ |
7d7ef0a4 | 615 | do_flush_stats(root_mem_cgroup); |
9b301615 | 616 | queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); |
11192d9c SB |
617 | } |
618 | ||
410f8e82 SB |
619 | unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) |
620 | { | |
ff48c71c SB |
621 | long x; |
622 | int i = memcg_stats_index(idx); | |
623 | ||
acb5fe2f | 624 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) |
ff48c71c SB |
625 | return 0; |
626 | ||
627 | x = READ_ONCE(memcg->vmstats->state[i]); | |
410f8e82 SB |
628 | #ifdef CONFIG_SMP |
629 | if (x < 0) | |
630 | x = 0; | |
631 | #endif | |
632 | return x; | |
633 | } | |
634 | ||
7bd5bc3c YA |
635 | static int memcg_page_state_unit(int item); |
636 | ||
637 | /* | |
638 | * Normalize the value passed into memcg_rstat_updated() to be in pages. Round | |
639 | * up non-zero sub-page updates to 1 page as zero page updates are ignored. | |
640 | */ | |
641 | static int memcg_state_val_in_pages(int idx, int val) | |
642 | { | |
643 | int unit = memcg_page_state_unit(idx); | |
644 | ||
645 | if (!val || unit == PAGE_SIZE) | |
646 | return val; | |
647 | else | |
648 | return max(val * unit / PAGE_SIZE, 1UL); | |
649 | } | |
650 | ||
db9adbcb JW |
651 | /** |
652 | * __mod_memcg_state - update cgroup memory statistics | |
653 | * @memcg: the memory cgroup | |
654 | * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item | |
655 | * @val: delta to add to the counter, can be negative | |
656 | */ | |
a94032b3 SB |
657 | void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, |
658 | int val) | |
db9adbcb | 659 | { |
ff48c71c SB |
660 | int i = memcg_stats_index(idx); |
661 | ||
acb5fe2f SB |
662 | if (mem_cgroup_disabled()) |
663 | return; | |
664 | ||
665 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) | |
db9adbcb JW |
666 | return; |
667 | ||
ff48c71c | 668 | __this_cpu_add(memcg->vmstats_percpu->state[i], val); |
7bd5bc3c | 669 | memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); |
db9adbcb JW |
670 | } |
671 | ||
2d146aa3 | 672 | /* idx can be of type enum memcg_stat_item or node_stat_item. */ |
ea1e8796 | 673 | unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) |
a18e6e6e | 674 | { |
ff48c71c SB |
675 | long x; |
676 | int i = memcg_stats_index(idx); | |
677 | ||
acb5fe2f | 678 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) |
ff48c71c | 679 | return 0; |
a18e6e6e | 680 | |
ff48c71c | 681 | x = READ_ONCE(memcg->vmstats->state_local[i]); |
a18e6e6e JW |
682 | #ifdef CONFIG_SMP |
683 | if (x < 0) | |
684 | x = 0; | |
685 | #endif | |
686 | return x; | |
687 | } | |
688 | ||
91882c16 SB |
689 | static void __mod_memcg_lruvec_state(struct lruvec *lruvec, |
690 | enum node_stat_item idx, | |
691 | int val) | |
db9adbcb JW |
692 | { |
693 | struct mem_cgroup_per_node *pn; | |
42a30035 | 694 | struct mem_cgroup *memcg; |
ff48c71c SB |
695 | int i = memcg_stats_index(idx); |
696 | ||
acb5fe2f | 697 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) |
ff48c71c | 698 | return; |
db9adbcb | 699 | |
db9adbcb | 700 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
42a30035 | 701 | memcg = pn->memcg; |
db9adbcb | 702 | |
be3e67b5 | 703 | /* |
be16dd76 | 704 | * The caller from rmap relies on disabled preemption because they never |
be3e67b5 SAS |
705 | * update their counter from in-interrupt context. For these two |
706 | * counters we check that the update is never performed from an | |
707 | * interrupt context while other caller need to have disabled interrupt. | |
708 | */ | |
709 | __memcg_stats_lock(); | |
e575d401 | 710 | if (IS_ENABLED(CONFIG_DEBUG_VM)) { |
be3e67b5 SAS |
711 | switch (idx) { |
712 | case NR_ANON_MAPPED: | |
713 | case NR_FILE_MAPPED: | |
714 | case NR_ANON_THPS: | |
be3e67b5 SAS |
715 | WARN_ON_ONCE(!in_task()); |
716 | break; | |
717 | default: | |
e575d401 | 718 | VM_WARN_ON_IRQS_ENABLED(); |
be3e67b5 SAS |
719 | } |
720 | } | |
721 | ||
db9adbcb | 722 | /* Update memcg */ |
ff48c71c | 723 | __this_cpu_add(memcg->vmstats_percpu->state[i], val); |
db9adbcb | 724 | |
b4c46484 | 725 | /* Update lruvec */ |
ff48c71c | 726 | __this_cpu_add(pn->lruvec_stats_percpu->state[i], val); |
11192d9c | 727 | |
7bd5bc3c | 728 | memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); |
be3e67b5 | 729 | memcg_stats_unlock(); |
db9adbcb JW |
730 | } |
731 | ||
eedc4e5a RG |
732 | /** |
733 | * __mod_lruvec_state - update lruvec memory statistics | |
734 | * @lruvec: the lruvec | |
735 | * @idx: the stat item | |
736 | * @val: delta to add to the counter, can be negative | |
737 | * | |
738 | * The lruvec is the intersection of the NUMA node and a cgroup. This | |
739 | * function updates the all three counters that are affected by a | |
740 | * change of state at this level: per-node, per-cgroup, per-lruvec. | |
741 | */ | |
742 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, | |
743 | int val) | |
744 | { | |
745 | /* Update node */ | |
746 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | |
747 | ||
748 | /* Update memcg and lruvec */ | |
749 | if (!mem_cgroup_disabled()) | |
750 | __mod_memcg_lruvec_state(lruvec, idx, val); | |
751 | } | |
752 | ||
c701123b | 753 | void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, |
c47d5032 SB |
754 | int val) |
755 | { | |
b4e0b68f | 756 | struct mem_cgroup *memcg; |
c701123b | 757 | pg_data_t *pgdat = folio_pgdat(folio); |
c47d5032 SB |
758 | struct lruvec *lruvec; |
759 | ||
b4e0b68f | 760 | rcu_read_lock(); |
c701123b | 761 | memcg = folio_memcg(folio); |
c47d5032 | 762 | /* Untracked pages have no memcg, no lruvec. Update only the node */ |
d635a69d | 763 | if (!memcg) { |
b4e0b68f | 764 | rcu_read_unlock(); |
c47d5032 SB |
765 | __mod_node_page_state(pgdat, idx, val); |
766 | return; | |
767 | } | |
768 | ||
d635a69d | 769 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
c47d5032 | 770 | __mod_lruvec_state(lruvec, idx, val); |
b4e0b68f | 771 | rcu_read_unlock(); |
c47d5032 | 772 | } |
c701123b | 773 | EXPORT_SYMBOL(__lruvec_stat_mod_folio); |
c47d5032 | 774 | |
da3ceeff | 775 | void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) |
ec9f0238 | 776 | { |
4f103c63 | 777 | pg_data_t *pgdat = page_pgdat(virt_to_page(p)); |
ec9f0238 RG |
778 | struct mem_cgroup *memcg; |
779 | struct lruvec *lruvec; | |
780 | ||
781 | rcu_read_lock(); | |
fc4db90f | 782 | memcg = mem_cgroup_from_slab_obj(p); |
ec9f0238 | 783 | |
8faeb1ff MS |
784 | /* |
785 | * Untracked pages have no memcg, no lruvec. Update only the | |
786 | * node. If we reparent the slab objects to the root memcg, | |
787 | * when we free the slab object, we need to update the per-memcg | |
788 | * vmstats to keep it correct for the root memcg. | |
789 | */ | |
790 | if (!memcg) { | |
ec9f0238 RG |
791 | __mod_node_page_state(pgdat, idx, val); |
792 | } else { | |
867e5e1d | 793 | lruvec = mem_cgroup_lruvec(memcg, pgdat); |
ec9f0238 RG |
794 | __mod_lruvec_state(lruvec, idx, val); |
795 | } | |
796 | rcu_read_unlock(); | |
797 | } | |
798 | ||
db9adbcb JW |
799 | /** |
800 | * __count_memcg_events - account VM events in a cgroup | |
801 | * @memcg: the memory cgroup | |
802 | * @idx: the event item | |
f0953a1b | 803 | * @count: the number of events that occurred |
db9adbcb JW |
804 | */ |
805 | void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, | |
806 | unsigned long count) | |
807 | { | |
acb5fe2f | 808 | int i = memcg_events_index(idx); |
8278f1c7 | 809 | |
acb5fe2f SB |
810 | if (mem_cgroup_disabled()) |
811 | return; | |
812 | ||
813 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, idx)) | |
db9adbcb JW |
814 | return; |
815 | ||
be3e67b5 | 816 | memcg_stats_lock(); |
acb5fe2f | 817 | __this_cpu_add(memcg->vmstats_percpu->events[i], count); |
5b3be698 | 818 | memcg_rstat_updated(memcg, count); |
be3e67b5 | 819 | memcg_stats_unlock(); |
db9adbcb JW |
820 | } |
821 | ||
ea1e8796 | 822 | unsigned long memcg_events(struct mem_cgroup *memcg, int event) |
e9f8974f | 823 | { |
acb5fe2f | 824 | int i = memcg_events_index(event); |
8278f1c7 | 825 | |
acb5fe2f | 826 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event)) |
8278f1c7 | 827 | return 0; |
acb5fe2f SB |
828 | |
829 | return READ_ONCE(memcg->vmstats->events[i]); | |
e9f8974f JW |
830 | } |
831 | ||
ea1e8796 | 832 | unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) |
42a30035 | 833 | { |
acb5fe2f | 834 | int i = memcg_events_index(event); |
8278f1c7 | 835 | |
acb5fe2f | 836 | if (WARN_ONCE(i < 0, "%s: missing stat item %d\n", __func__, event)) |
8278f1c7 | 837 | return 0; |
815744d7 | 838 | |
acb5fe2f | 839 | return READ_ONCE(memcg->vmstats->events_local[i]); |
42a30035 JW |
840 | } |
841 | ||
e548ad4a | 842 | void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages) |
d52aa412 | 843 | { |
e401f176 KH |
844 | /* pagein of a big page is an event. So, ignore page size */ |
845 | if (nr_pages > 0) | |
c9019e9b | 846 | __count_memcg_events(memcg, PGPGIN, 1); |
3751d604 | 847 | else { |
c9019e9b | 848 | __count_memcg_events(memcg, PGPGOUT, 1); |
3751d604 KH |
849 | nr_pages = -nr_pages; /* for event */ |
850 | } | |
e401f176 | 851 | |
871789d4 | 852 | __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); |
6d12e2d8 KH |
853 | } |
854 | ||
66d60c42 RG |
855 | bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, |
856 | enum mem_cgroup_events_target target) | |
7a159cc9 JW |
857 | { |
858 | unsigned long val, next; | |
859 | ||
871789d4 CD |
860 | val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); |
861 | next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); | |
7a159cc9 | 862 | /* from time_after() in jiffies.h */ |
6a1a8b80 | 863 | if ((long)(next - val) < 0) { |
f53d7ce3 JW |
864 | switch (target) { |
865 | case MEM_CGROUP_TARGET_THRESH: | |
866 | next = val + THRESHOLDS_EVENTS_TARGET; | |
867 | break; | |
bb4cc1a8 AM |
868 | case MEM_CGROUP_TARGET_SOFTLIMIT: |
869 | next = val + SOFTLIMIT_EVENTS_TARGET; | |
870 | break; | |
f53d7ce3 JW |
871 | default: |
872 | break; | |
873 | } | |
871789d4 | 874 | __this_cpu_write(memcg->vmstats_percpu->targets[target], next); |
f53d7ce3 | 875 | return true; |
7a159cc9 | 876 | } |
f53d7ce3 | 877 | return false; |
d2265e6f KH |
878 | } |
879 | ||
cf475ad2 | 880 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) |
78fb7466 | 881 | { |
31a78f23 BS |
882 | /* |
883 | * mm_update_next_owner() may clear mm->owner to NULL | |
884 | * if it races with swapoff, page migration, etc. | |
885 | * So this can be called with p == NULL. | |
886 | */ | |
887 | if (unlikely(!p)) | |
888 | return NULL; | |
889 | ||
073219e9 | 890 | return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); |
78fb7466 | 891 | } |
33398cf2 | 892 | EXPORT_SYMBOL(mem_cgroup_from_task); |
78fb7466 | 893 | |
04f94e3f DS |
894 | static __always_inline struct mem_cgroup *active_memcg(void) |
895 | { | |
55a68c82 | 896 | if (!in_task()) |
04f94e3f DS |
897 | return this_cpu_read(int_active_memcg); |
898 | else | |
899 | return current->active_memcg; | |
900 | } | |
901 | ||
d46eb14b SB |
902 | /** |
903 | * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg. | |
904 | * @mm: mm from which memcg should be extracted. It can be NULL. | |
905 | * | |
04f94e3f DS |
906 | * Obtain a reference on mm->memcg and returns it if successful. If mm |
907 | * is NULL, then the memcg is chosen as follows: | |
908 | * 1) The active memcg, if set. | |
909 | * 2) current->mm->memcg, if available | |
910 | * 3) root memcg | |
911 | * If mem_cgroup is disabled, NULL is returned. | |
d46eb14b SB |
912 | */ |
913 | struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) | |
54595fe2 | 914 | { |
d46eb14b SB |
915 | struct mem_cgroup *memcg; |
916 | ||
917 | if (mem_cgroup_disabled()) | |
918 | return NULL; | |
0b7f569e | 919 | |
2884b6b7 MS |
920 | /* |
921 | * Page cache insertions can happen without an | |
922 | * actual mm context, e.g. during disk probing | |
923 | * on boot, loopback IO, acct() writes etc. | |
924 | * | |
925 | * No need to css_get on root memcg as the reference | |
926 | * counting is disabled on the root level in the | |
927 | * cgroup core. See CSS_NO_REF. | |
928 | */ | |
04f94e3f DS |
929 | if (unlikely(!mm)) { |
930 | memcg = active_memcg(); | |
931 | if (unlikely(memcg)) { | |
932 | /* remote memcg must hold a ref */ | |
933 | css_get(&memcg->css); | |
934 | return memcg; | |
935 | } | |
936 | mm = current->mm; | |
937 | if (unlikely(!mm)) | |
938 | return root_mem_cgroup; | |
939 | } | |
2884b6b7 | 940 | |
54595fe2 KH |
941 | rcu_read_lock(); |
942 | do { | |
2884b6b7 MS |
943 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
944 | if (unlikely(!memcg)) | |
df381975 | 945 | memcg = root_mem_cgroup; |
00d484f3 | 946 | } while (!css_tryget(&memcg->css)); |
54595fe2 | 947 | rcu_read_unlock(); |
c0ff4b85 | 948 | return memcg; |
54595fe2 | 949 | } |
d46eb14b SB |
950 | EXPORT_SYMBOL(get_mem_cgroup_from_mm); |
951 | ||
4b569387 NP |
952 | /** |
953 | * get_mem_cgroup_from_current - Obtain a reference on current task's memcg. | |
954 | */ | |
955 | struct mem_cgroup *get_mem_cgroup_from_current(void) | |
956 | { | |
957 | struct mem_cgroup *memcg; | |
958 | ||
959 | if (mem_cgroup_disabled()) | |
960 | return NULL; | |
961 | ||
962 | again: | |
963 | rcu_read_lock(); | |
964 | memcg = mem_cgroup_from_task(current); | |
965 | if (!css_tryget(&memcg->css)) { | |
966 | rcu_read_unlock(); | |
967 | goto again; | |
968 | } | |
969 | rcu_read_unlock(); | |
970 | return memcg; | |
971 | } | |
972 | ||
5660048c JW |
973 | /** |
974 | * mem_cgroup_iter - iterate over memory cgroup hierarchy | |
975 | * @root: hierarchy root | |
976 | * @prev: previously returned memcg, NULL on first invocation | |
977 | * @reclaim: cookie for shared reclaim walks, NULL for full walks | |
978 | * | |
979 | * Returns references to children of the hierarchy below @root, or | |
980 | * @root itself, or %NULL after a full round-trip. | |
981 | * | |
982 | * Caller must pass the return value in @prev on subsequent | |
983 | * invocations for reference counting, or use mem_cgroup_iter_break() | |
984 | * to cancel a hierarchy walk before the round-trip is complete. | |
985 | * | |
05bdc520 ML |
986 | * Reclaimers can specify a node in @reclaim to divide up the memcgs |
987 | * in the hierarchy among all concurrent reclaimers operating on the | |
988 | * same node. | |
5660048c | 989 | */ |
694fbc0f | 990 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, |
5660048c | 991 | struct mem_cgroup *prev, |
694fbc0f | 992 | struct mem_cgroup_reclaim_cookie *reclaim) |
14067bb3 | 993 | { |
3f649ab7 | 994 | struct mem_cgroup_reclaim_iter *iter; |
5ac8fb31 | 995 | struct cgroup_subsys_state *css = NULL; |
9f3a0d09 | 996 | struct mem_cgroup *memcg = NULL; |
5ac8fb31 | 997 | struct mem_cgroup *pos = NULL; |
711d3d2c | 998 | |
694fbc0f AM |
999 | if (mem_cgroup_disabled()) |
1000 | return NULL; | |
5660048c | 1001 | |
9f3a0d09 JW |
1002 | if (!root) |
1003 | root = root_mem_cgroup; | |
7d74b06f | 1004 | |
542f85f9 | 1005 | rcu_read_lock(); |
5f578161 | 1006 | |
5ac8fb31 | 1007 | if (reclaim) { |
ef8f2327 | 1008 | struct mem_cgroup_per_node *mz; |
5ac8fb31 | 1009 | |
a3747b53 | 1010 | mz = root->nodeinfo[reclaim->pgdat->node_id]; |
9da83f3f | 1011 | iter = &mz->iter; |
5ac8fb31 | 1012 | |
a9320aae WY |
1013 | /* |
1014 | * On start, join the current reclaim iteration cycle. | |
1015 | * Exit when a concurrent walker completes it. | |
1016 | */ | |
1017 | if (!prev) | |
1018 | reclaim->generation = iter->generation; | |
1019 | else if (reclaim->generation != iter->generation) | |
5ac8fb31 JW |
1020 | goto out_unlock; |
1021 | ||
6df38689 | 1022 | while (1) { |
4db0c3c2 | 1023 | pos = READ_ONCE(iter->position); |
6df38689 VD |
1024 | if (!pos || css_tryget(&pos->css)) |
1025 | break; | |
5ac8fb31 | 1026 | /* |
6df38689 VD |
1027 | * css reference reached zero, so iter->position will |
1028 | * be cleared by ->css_released. However, we should not | |
1029 | * rely on this happening soon, because ->css_released | |
1030 | * is called from a work queue, and by busy-waiting we | |
1031 | * might block it. So we clear iter->position right | |
1032 | * away. | |
5ac8fb31 | 1033 | */ |
6df38689 VD |
1034 | (void)cmpxchg(&iter->position, pos, NULL); |
1035 | } | |
89d8330c WY |
1036 | } else if (prev) { |
1037 | pos = prev; | |
5ac8fb31 JW |
1038 | } |
1039 | ||
1040 | if (pos) | |
1041 | css = &pos->css; | |
1042 | ||
1043 | for (;;) { | |
1044 | css = css_next_descendant_pre(css, &root->css); | |
1045 | if (!css) { | |
1046 | /* | |
1047 | * Reclaimers share the hierarchy walk, and a | |
1048 | * new one might jump in right at the end of | |
1049 | * the hierarchy - make sure they see at least | |
1050 | * one group and restart from the beginning. | |
1051 | */ | |
1052 | if (!prev) | |
1053 | continue; | |
1054 | break; | |
527a5ec9 | 1055 | } |
7d74b06f | 1056 | |
5ac8fb31 JW |
1057 | /* |
1058 | * Verify the css and acquire a reference. The root | |
1059 | * is provided by the caller, so we know it's alive | |
1060 | * and kicking, and don't take an extra reference. | |
1061 | */ | |
41555dad WY |
1062 | if (css == &root->css || css_tryget(css)) { |
1063 | memcg = mem_cgroup_from_css(css); | |
0b8f73e1 | 1064 | break; |
41555dad | 1065 | } |
9f3a0d09 | 1066 | } |
5ac8fb31 JW |
1067 | |
1068 | if (reclaim) { | |
5ac8fb31 | 1069 | /* |
6df38689 VD |
1070 | * The position could have already been updated by a competing |
1071 | * thread, so check that the value hasn't changed since we read | |
1072 | * it to avoid reclaiming from the same cgroup twice. | |
5ac8fb31 | 1073 | */ |
6df38689 VD |
1074 | (void)cmpxchg(&iter->position, pos, memcg); |
1075 | ||
5ac8fb31 JW |
1076 | if (pos) |
1077 | css_put(&pos->css); | |
1078 | ||
1079 | if (!memcg) | |
1080 | iter->generation++; | |
9f3a0d09 | 1081 | } |
5ac8fb31 | 1082 | |
542f85f9 MH |
1083 | out_unlock: |
1084 | rcu_read_unlock(); | |
c40046f3 MH |
1085 | if (prev && prev != root) |
1086 | css_put(&prev->css); | |
1087 | ||
9f3a0d09 | 1088 | return memcg; |
14067bb3 | 1089 | } |
7d74b06f | 1090 | |
5660048c JW |
1091 | /** |
1092 | * mem_cgroup_iter_break - abort a hierarchy walk prematurely | |
1093 | * @root: hierarchy root | |
1094 | * @prev: last visited hierarchy member as returned by mem_cgroup_iter() | |
1095 | */ | |
1096 | void mem_cgroup_iter_break(struct mem_cgroup *root, | |
1097 | struct mem_cgroup *prev) | |
9f3a0d09 JW |
1098 | { |
1099 | if (!root) | |
1100 | root = root_mem_cgroup; | |
1101 | if (prev && prev != root) | |
1102 | css_put(&prev->css); | |
1103 | } | |
7d74b06f | 1104 | |
54a83d6b MC |
1105 | static void __invalidate_reclaim_iterators(struct mem_cgroup *from, |
1106 | struct mem_cgroup *dead_memcg) | |
6df38689 | 1107 | { |
6df38689 | 1108 | struct mem_cgroup_reclaim_iter *iter; |
ef8f2327 MG |
1109 | struct mem_cgroup_per_node *mz; |
1110 | int nid; | |
6df38689 | 1111 | |
54a83d6b | 1112 | for_each_node(nid) { |
a3747b53 | 1113 | mz = from->nodeinfo[nid]; |
9da83f3f YS |
1114 | iter = &mz->iter; |
1115 | cmpxchg(&iter->position, dead_memcg, NULL); | |
6df38689 VD |
1116 | } |
1117 | } | |
1118 | ||
54a83d6b MC |
1119 | static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) |
1120 | { | |
1121 | struct mem_cgroup *memcg = dead_memcg; | |
1122 | struct mem_cgroup *last; | |
1123 | ||
1124 | do { | |
1125 | __invalidate_reclaim_iterators(memcg, dead_memcg); | |
1126 | last = memcg; | |
1127 | } while ((memcg = parent_mem_cgroup(memcg))); | |
1128 | ||
1129 | /* | |
b8dd3ee9 | 1130 | * When cgroup1 non-hierarchy mode is used, |
54a83d6b MC |
1131 | * parent_mem_cgroup() does not walk all the way up to the |
1132 | * cgroup root (root_mem_cgroup). So we have to handle | |
1133 | * dead_memcg from cgroup root separately. | |
1134 | */ | |
7848ed62 | 1135 | if (!mem_cgroup_is_root(last)) |
54a83d6b MC |
1136 | __invalidate_reclaim_iterators(root_mem_cgroup, |
1137 | dead_memcg); | |
1138 | } | |
1139 | ||
7c5f64f8 VD |
1140 | /** |
1141 | * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy | |
1142 | * @memcg: hierarchy root | |
1143 | * @fn: function to call for each task | |
1144 | * @arg: argument passed to @fn | |
1145 | * | |
1146 | * This function iterates over tasks attached to @memcg or to any of its | |
1147 | * descendants and calls @fn for each task. If @fn returns a non-zero | |
025b7799 Z |
1148 | * value, the function breaks the iteration loop. Otherwise, it will iterate |
1149 | * over all tasks and return 0. | |
7c5f64f8 VD |
1150 | * |
1151 | * This function must not be called for the root memory cgroup. | |
1152 | */ | |
025b7799 Z |
1153 | void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, |
1154 | int (*fn)(struct task_struct *, void *), void *arg) | |
7c5f64f8 VD |
1155 | { |
1156 | struct mem_cgroup *iter; | |
1157 | int ret = 0; | |
1158 | ||
7848ed62 | 1159 | BUG_ON(mem_cgroup_is_root(memcg)); |
7c5f64f8 VD |
1160 | |
1161 | for_each_mem_cgroup_tree(iter, memcg) { | |
1162 | struct css_task_iter it; | |
1163 | struct task_struct *task; | |
1164 | ||
f168a9a5 | 1165 | css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); |
7c5f64f8 VD |
1166 | while (!ret && (task = css_task_iter_next(&it))) |
1167 | ret = fn(task, arg); | |
1168 | css_task_iter_end(&it); | |
1169 | if (ret) { | |
1170 | mem_cgroup_iter_break(memcg, iter); | |
1171 | break; | |
1172 | } | |
1173 | } | |
7c5f64f8 VD |
1174 | } |
1175 | ||
6168d0da | 1176 | #ifdef CONFIG_DEBUG_VM |
e809c3fe | 1177 | void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) |
6168d0da AS |
1178 | { |
1179 | struct mem_cgroup *memcg; | |
1180 | ||
1181 | if (mem_cgroup_disabled()) | |
1182 | return; | |
1183 | ||
e809c3fe | 1184 | memcg = folio_memcg(folio); |
6168d0da AS |
1185 | |
1186 | if (!memcg) | |
7848ed62 | 1187 | VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio); |
6168d0da | 1188 | else |
e809c3fe | 1189 | VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); |
6168d0da AS |
1190 | } |
1191 | #endif | |
1192 | ||
6168d0da | 1193 | /** |
e809c3fe MWO |
1194 | * folio_lruvec_lock - Lock the lruvec for a folio. |
1195 | * @folio: Pointer to the folio. | |
6168d0da | 1196 | * |
d7e3aba5 | 1197 | * These functions are safe to use under any of the following conditions: |
e809c3fe MWO |
1198 | * - folio locked |
1199 | * - folio_test_lru false | |
1200 | * - folio_memcg_lock() | |
1201 | * - folio frozen (refcount of 0) | |
1202 | * | |
1203 | * Return: The lruvec this folio is on with its lock held. | |
6168d0da | 1204 | */ |
e809c3fe | 1205 | struct lruvec *folio_lruvec_lock(struct folio *folio) |
6168d0da | 1206 | { |
e809c3fe | 1207 | struct lruvec *lruvec = folio_lruvec(folio); |
6168d0da | 1208 | |
6168d0da | 1209 | spin_lock(&lruvec->lru_lock); |
e809c3fe | 1210 | lruvec_memcg_debug(lruvec, folio); |
6168d0da AS |
1211 | |
1212 | return lruvec; | |
1213 | } | |
1214 | ||
e809c3fe MWO |
1215 | /** |
1216 | * folio_lruvec_lock_irq - Lock the lruvec for a folio. | |
1217 | * @folio: Pointer to the folio. | |
1218 | * | |
1219 | * These functions are safe to use under any of the following conditions: | |
1220 | * - folio locked | |
1221 | * - folio_test_lru false | |
1222 | * - folio_memcg_lock() | |
1223 | * - folio frozen (refcount of 0) | |
1224 | * | |
1225 | * Return: The lruvec this folio is on with its lock held and interrupts | |
1226 | * disabled. | |
1227 | */ | |
1228 | struct lruvec *folio_lruvec_lock_irq(struct folio *folio) | |
6168d0da | 1229 | { |
e809c3fe | 1230 | struct lruvec *lruvec = folio_lruvec(folio); |
6168d0da | 1231 | |
6168d0da | 1232 | spin_lock_irq(&lruvec->lru_lock); |
e809c3fe | 1233 | lruvec_memcg_debug(lruvec, folio); |
6168d0da AS |
1234 | |
1235 | return lruvec; | |
1236 | } | |
1237 | ||
e809c3fe MWO |
1238 | /** |
1239 | * folio_lruvec_lock_irqsave - Lock the lruvec for a folio. | |
1240 | * @folio: Pointer to the folio. | |
1241 | * @flags: Pointer to irqsave flags. | |
1242 | * | |
1243 | * These functions are safe to use under any of the following conditions: | |
1244 | * - folio locked | |
1245 | * - folio_test_lru false | |
1246 | * - folio_memcg_lock() | |
1247 | * - folio frozen (refcount of 0) | |
1248 | * | |
1249 | * Return: The lruvec this folio is on with its lock held and interrupts | |
1250 | * disabled. | |
1251 | */ | |
1252 | struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, | |
1253 | unsigned long *flags) | |
6168d0da | 1254 | { |
e809c3fe | 1255 | struct lruvec *lruvec = folio_lruvec(folio); |
6168d0da | 1256 | |
6168d0da | 1257 | spin_lock_irqsave(&lruvec->lru_lock, *flags); |
e809c3fe | 1258 | lruvec_memcg_debug(lruvec, folio); |
6168d0da AS |
1259 | |
1260 | return lruvec; | |
1261 | } | |
1262 | ||
925b7673 | 1263 | /** |
fa9add64 HD |
1264 | * mem_cgroup_update_lru_size - account for adding or removing an lru page |
1265 | * @lruvec: mem_cgroup per zone lru vector | |
1266 | * @lru: index of lru list the page is sitting on | |
b4536f0c | 1267 | * @zid: zone id of the accounted pages |
fa9add64 | 1268 | * @nr_pages: positive when adding or negative when removing |
925b7673 | 1269 | * |
ca707239 | 1270 | * This function must be called under lru_lock, just before a page is added |
07ca7606 | 1271 | * to or just after a page is removed from an lru list. |
3f58a829 | 1272 | */ |
fa9add64 | 1273 | void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, |
b4536f0c | 1274 | int zid, int nr_pages) |
3f58a829 | 1275 | { |
ef8f2327 | 1276 | struct mem_cgroup_per_node *mz; |
fa9add64 | 1277 | unsigned long *lru_size; |
ca707239 | 1278 | long size; |
3f58a829 MK |
1279 | |
1280 | if (mem_cgroup_disabled()) | |
1281 | return; | |
1282 | ||
ef8f2327 | 1283 | mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
b4536f0c | 1284 | lru_size = &mz->lru_zone_size[zid][lru]; |
ca707239 HD |
1285 | |
1286 | if (nr_pages < 0) | |
1287 | *lru_size += nr_pages; | |
1288 | ||
1289 | size = *lru_size; | |
b4536f0c MH |
1290 | if (WARN_ONCE(size < 0, |
1291 | "%s(%p, %d, %d): lru_size %ld\n", | |
1292 | __func__, lruvec, lru, nr_pages, size)) { | |
ca707239 HD |
1293 | VM_BUG_ON(1); |
1294 | *lru_size = 0; | |
1295 | } | |
1296 | ||
1297 | if (nr_pages > 0) | |
1298 | *lru_size += nr_pages; | |
08e552c6 | 1299 | } |
544122e5 | 1300 | |
19942822 | 1301 | /** |
9d11ea9f | 1302 | * mem_cgroup_margin - calculate chargeable space of a memory cgroup |
dad7557e | 1303 | * @memcg: the memory cgroup |
19942822 | 1304 | * |
9d11ea9f | 1305 | * Returns the maximum amount of memory @mem can be charged with, in |
7ec99d62 | 1306 | * pages. |
19942822 | 1307 | */ |
c0ff4b85 | 1308 | static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) |
19942822 | 1309 | { |
3e32cb2e JW |
1310 | unsigned long margin = 0; |
1311 | unsigned long count; | |
1312 | unsigned long limit; | |
9d11ea9f | 1313 | |
3e32cb2e | 1314 | count = page_counter_read(&memcg->memory); |
bbec2e15 | 1315 | limit = READ_ONCE(memcg->memory.max); |
3e32cb2e JW |
1316 | if (count < limit) |
1317 | margin = limit - count; | |
1318 | ||
7941d214 | 1319 | if (do_memsw_account()) { |
3e32cb2e | 1320 | count = page_counter_read(&memcg->memsw); |
bbec2e15 | 1321 | limit = READ_ONCE(memcg->memsw.max); |
1c4448ed | 1322 | if (count < limit) |
3e32cb2e | 1323 | margin = min(margin, limit - count); |
cbedbac3 LR |
1324 | else |
1325 | margin = 0; | |
3e32cb2e JW |
1326 | } |
1327 | ||
1328 | return margin; | |
19942822 JW |
1329 | } |
1330 | ||
5f9a4f4a MS |
1331 | struct memory_stat { |
1332 | const char *name; | |
5f9a4f4a MS |
1333 | unsigned int idx; |
1334 | }; | |
1335 | ||
57b2847d | 1336 | static const struct memory_stat memory_stats[] = { |
fff66b79 MS |
1337 | { "anon", NR_ANON_MAPPED }, |
1338 | { "file", NR_FILE_PAGES }, | |
a8c49af3 | 1339 | { "kernel", MEMCG_KMEM }, |
fff66b79 MS |
1340 | { "kernel_stack", NR_KERNEL_STACK_KB }, |
1341 | { "pagetables", NR_PAGETABLE }, | |
ebc97a52 | 1342 | { "sec_pagetables", NR_SECONDARY_PAGETABLE }, |
fff66b79 MS |
1343 | { "percpu", MEMCG_PERCPU_B }, |
1344 | { "sock", MEMCG_SOCK }, | |
4e5aa1f4 | 1345 | { "vmalloc", MEMCG_VMALLOC }, |
fff66b79 | 1346 | { "shmem", NR_SHMEM }, |
3a3b7fec | 1347 | #ifdef CONFIG_ZSWAP |
f4840ccf JW |
1348 | { "zswap", MEMCG_ZSWAP_B }, |
1349 | { "zswapped", MEMCG_ZSWAPPED }, | |
1350 | #endif | |
fff66b79 MS |
1351 | { "file_mapped", NR_FILE_MAPPED }, |
1352 | { "file_dirty", NR_FILE_DIRTY }, | |
1353 | { "file_writeback", NR_WRITEBACK }, | |
b6038942 SB |
1354 | #ifdef CONFIG_SWAP |
1355 | { "swapcached", NR_SWAPCACHE }, | |
1356 | #endif | |
5f9a4f4a | 1357 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fff66b79 MS |
1358 | { "anon_thp", NR_ANON_THPS }, |
1359 | { "file_thp", NR_FILE_THPS }, | |
1360 | { "shmem_thp", NR_SHMEM_THPS }, | |
5f9a4f4a | 1361 | #endif |
fff66b79 MS |
1362 | { "inactive_anon", NR_INACTIVE_ANON }, |
1363 | { "active_anon", NR_ACTIVE_ANON }, | |
1364 | { "inactive_file", NR_INACTIVE_FILE }, | |
1365 | { "active_file", NR_ACTIVE_FILE }, | |
1366 | { "unevictable", NR_UNEVICTABLE }, | |
1367 | { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B }, | |
1368 | { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B }, | |
5f9a4f4a MS |
1369 | |
1370 | /* The memory events */ | |
fff66b79 MS |
1371 | { "workingset_refault_anon", WORKINGSET_REFAULT_ANON }, |
1372 | { "workingset_refault_file", WORKINGSET_REFAULT_FILE }, | |
1373 | { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON }, | |
1374 | { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE }, | |
1375 | { "workingset_restore_anon", WORKINGSET_RESTORE_ANON }, | |
1376 | { "workingset_restore_file", WORKINGSET_RESTORE_FILE }, | |
1377 | { "workingset_nodereclaim", WORKINGSET_NODERECLAIM }, | |
5f9a4f4a MS |
1378 | }; |
1379 | ||
ff841a06 | 1380 | /* The actual unit of the state item, not the same as the output unit */ |
fff66b79 MS |
1381 | static int memcg_page_state_unit(int item) |
1382 | { | |
1383 | switch (item) { | |
1384 | case MEMCG_PERCPU_B: | |
f4840ccf | 1385 | case MEMCG_ZSWAP_B: |
fff66b79 MS |
1386 | case NR_SLAB_RECLAIMABLE_B: |
1387 | case NR_SLAB_UNRECLAIMABLE_B: | |
ff841a06 YA |
1388 | return 1; |
1389 | case NR_KERNEL_STACK_KB: | |
1390 | return SZ_1K; | |
1391 | default: | |
1392 | return PAGE_SIZE; | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | /* Translate stat items to the correct unit for memory.stat output */ | |
1397 | static int memcg_page_state_output_unit(int item) | |
1398 | { | |
1399 | /* | |
1400 | * Workingset state is actually in pages, but we export it to userspace | |
1401 | * as a scalar count of events, so special case it here. | |
1402 | */ | |
1403 | switch (item) { | |
fff66b79 MS |
1404 | case WORKINGSET_REFAULT_ANON: |
1405 | case WORKINGSET_REFAULT_FILE: | |
1406 | case WORKINGSET_ACTIVATE_ANON: | |
1407 | case WORKINGSET_ACTIVATE_FILE: | |
1408 | case WORKINGSET_RESTORE_ANON: | |
1409 | case WORKINGSET_RESTORE_FILE: | |
1410 | case WORKINGSET_NODERECLAIM: | |
1411 | return 1; | |
fff66b79 | 1412 | default: |
ff841a06 | 1413 | return memcg_page_state_unit(item); |
fff66b79 MS |
1414 | } |
1415 | } | |
1416 | ||
ea1e8796 | 1417 | unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) |
fff66b79 | 1418 | { |
ff841a06 YA |
1419 | return memcg_page_state(memcg, item) * |
1420 | memcg_page_state_output_unit(item); | |
1421 | } | |
1422 | ||
ea1e8796 | 1423 | unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) |
ff841a06 YA |
1424 | { |
1425 | return memcg_page_state_local(memcg, item) * | |
1426 | memcg_page_state_output_unit(item); | |
fff66b79 MS |
1427 | } |
1428 | ||
dddb44ff | 1429 | static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) |
c8713d0b | 1430 | { |
c8713d0b | 1431 | int i; |
71cd3113 | 1432 | |
c8713d0b JW |
1433 | /* |
1434 | * Provide statistics on the state of the memory subsystem as | |
1435 | * well as cumulative event counters that show past behavior. | |
1436 | * | |
1437 | * This list is ordered following a combination of these gradients: | |
1438 | * 1) generic big picture -> specifics and details | |
1439 | * 2) reflecting userspace activity -> reflecting kernel heuristics | |
1440 | * | |
1441 | * Current memory state: | |
1442 | */ | |
7d7ef0a4 | 1443 | mem_cgroup_flush_stats(memcg); |
c8713d0b | 1444 | |
5f9a4f4a MS |
1445 | for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { |
1446 | u64 size; | |
c8713d0b | 1447 | |
fff66b79 | 1448 | size = memcg_page_state_output(memcg, memory_stats[i].idx); |
5b42360c | 1449 | seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); |
c8713d0b | 1450 | |
5f9a4f4a | 1451 | if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { |
fff66b79 MS |
1452 | size += memcg_page_state_output(memcg, |
1453 | NR_SLAB_RECLAIMABLE_B); | |
5b42360c | 1454 | seq_buf_printf(s, "slab %llu\n", size); |
5f9a4f4a MS |
1455 | } |
1456 | } | |
c8713d0b JW |
1457 | |
1458 | /* Accumulated memory events */ | |
5b42360c | 1459 | seq_buf_printf(s, "pgscan %lu\n", |
c8713d0b | 1460 | memcg_events(memcg, PGSCAN_KSWAPD) + |
57e9cc50 JW |
1461 | memcg_events(memcg, PGSCAN_DIRECT) + |
1462 | memcg_events(memcg, PGSCAN_KHUGEPAGED)); | |
5b42360c | 1463 | seq_buf_printf(s, "pgsteal %lu\n", |
c8713d0b | 1464 | memcg_events(memcg, PGSTEAL_KSWAPD) + |
57e9cc50 JW |
1465 | memcg_events(memcg, PGSTEAL_DIRECT) + |
1466 | memcg_events(memcg, PGSTEAL_KHUGEPAGED)); | |
c8713d0b | 1467 | |
8278f1c7 SB |
1468 | for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) { |
1469 | if (memcg_vm_event_stat[i] == PGPGIN || | |
1470 | memcg_vm_event_stat[i] == PGPGOUT) | |
1471 | continue; | |
1472 | ||
5b42360c | 1473 | seq_buf_printf(s, "%s %lu\n", |
673520f8 QZ |
1474 | vm_event_name(memcg_vm_event_stat[i]), |
1475 | memcg_events(memcg, memcg_vm_event_stat[i])); | |
8278f1c7 | 1476 | } |
c8713d0b | 1477 | } |
71cd3113 | 1478 | |
dddb44ff YA |
1479 | static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) |
1480 | { | |
1481 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
1482 | memcg_stat_format(memcg, s); | |
1483 | else | |
1484 | memcg1_stat_format(memcg, s); | |
c2fad56b XJ |
1485 | if (seq_buf_has_overflowed(s)) |
1486 | pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__); | |
dddb44ff YA |
1487 | } |
1488 | ||
e222432b | 1489 | /** |
f0c867d9 | 1490 | * mem_cgroup_print_oom_context: Print OOM information relevant to |
1491 | * memory controller. | |
e222432b BS |
1492 | * @memcg: The memory cgroup that went over limit |
1493 | * @p: Task that is going to be killed | |
1494 | * | |
1495 | * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is | |
1496 | * enabled | |
1497 | */ | |
f0c867d9 | 1498 | void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) |
e222432b | 1499 | { |
e222432b BS |
1500 | rcu_read_lock(); |
1501 | ||
f0c867d9 | 1502 | if (memcg) { |
1503 | pr_cont(",oom_memcg="); | |
1504 | pr_cont_cgroup_path(memcg->css.cgroup); | |
1505 | } else | |
1506 | pr_cont(",global_oom"); | |
2415b9f5 | 1507 | if (p) { |
f0c867d9 | 1508 | pr_cont(",task_memcg="); |
2415b9f5 | 1509 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); |
2415b9f5 | 1510 | } |
e222432b | 1511 | rcu_read_unlock(); |
f0c867d9 | 1512 | } |
1513 | ||
1514 | /** | |
1515 | * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to | |
1516 | * memory controller. | |
1517 | * @memcg: The memory cgroup that went over limit | |
1518 | */ | |
1519 | void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) | |
1520 | { | |
68aaee14 TH |
1521 | /* Use static buffer, for the caller is holding oom_lock. */ |
1522 | static char buf[PAGE_SIZE]; | |
5b42360c | 1523 | struct seq_buf s; |
68aaee14 TH |
1524 | |
1525 | lockdep_assert_held(&oom_lock); | |
e222432b | 1526 | |
3e32cb2e JW |
1527 | pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", |
1528 | K((u64)page_counter_read(&memcg->memory)), | |
15b42562 | 1529 | K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); |
c8713d0b JW |
1530 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
1531 | pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n", | |
1532 | K((u64)page_counter_read(&memcg->swap)), | |
32d087cd | 1533 | K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); |
47d2702b | 1534 | #ifdef CONFIG_MEMCG_V1 |
c8713d0b JW |
1535 | else { |
1536 | pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", | |
1537 | K((u64)page_counter_read(&memcg->memsw)), | |
1538 | K((u64)memcg->memsw.max), memcg->memsw.failcnt); | |
1539 | pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", | |
1540 | K((u64)page_counter_read(&memcg->kmem)), | |
1541 | K((u64)memcg->kmem.max), memcg->kmem.failcnt); | |
58cf188e | 1542 | } |
47d2702b | 1543 | #endif |
c8713d0b JW |
1544 | |
1545 | pr_info("Memory cgroup stats for "); | |
1546 | pr_cont_cgroup_path(memcg->css.cgroup); | |
1547 | pr_cont(":"); | |
5b42360c YA |
1548 | seq_buf_init(&s, buf, sizeof(buf)); |
1549 | memory_stat_format(memcg, &s); | |
1550 | seq_buf_do_printk(&s, KERN_INFO); | |
e222432b BS |
1551 | } |
1552 | ||
a63d83f4 DR |
1553 | /* |
1554 | * Return the memory (and swap, if configured) limit for a memcg. | |
1555 | */ | |
bbec2e15 | 1556 | unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) |
a63d83f4 | 1557 | { |
8d387a5f WL |
1558 | unsigned long max = READ_ONCE(memcg->memory.max); |
1559 | ||
b94c4e94 | 1560 | if (do_memsw_account()) { |
8d387a5f WL |
1561 | if (mem_cgroup_swappiness(memcg)) { |
1562 | /* Calculate swap excess capacity from memsw limit */ | |
1563 | unsigned long swap = READ_ONCE(memcg->memsw.max) - max; | |
1564 | ||
1565 | max += min(swap, (unsigned long)total_swap_pages); | |
1566 | } | |
b94c4e94 JW |
1567 | } else { |
1568 | if (mem_cgroup_swappiness(memcg)) | |
1569 | max += min(READ_ONCE(memcg->swap.max), | |
1570 | (unsigned long)total_swap_pages); | |
9a5a8f19 | 1571 | } |
bbec2e15 | 1572 | return max; |
a63d83f4 DR |
1573 | } |
1574 | ||
9783aa99 CD |
1575 | unsigned long mem_cgroup_size(struct mem_cgroup *memcg) |
1576 | { | |
1577 | return page_counter_read(&memcg->memory); | |
1578 | } | |
1579 | ||
b6e6edcf | 1580 | static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, |
19965460 | 1581 | int order) |
9cbb78bb | 1582 | { |
6e0fc46d DR |
1583 | struct oom_control oc = { |
1584 | .zonelist = NULL, | |
1585 | .nodemask = NULL, | |
2a966b77 | 1586 | .memcg = memcg, |
6e0fc46d DR |
1587 | .gfp_mask = gfp_mask, |
1588 | .order = order, | |
6e0fc46d | 1589 | }; |
1378b37d | 1590 | bool ret = true; |
9cbb78bb | 1591 | |
7775face TH |
1592 | if (mutex_lock_killable(&oom_lock)) |
1593 | return true; | |
1378b37d YS |
1594 | |
1595 | if (mem_cgroup_margin(memcg) >= (1 << order)) | |
1596 | goto unlock; | |
1597 | ||
7775face TH |
1598 | /* |
1599 | * A few threads which were not waiting at mutex_lock_killable() can | |
1600 | * fail to bail out. Therefore, check again after holding oom_lock. | |
1601 | */ | |
a4ebf1b6 | 1602 | ret = task_is_dying() || out_of_memory(&oc); |
1378b37d YS |
1603 | |
1604 | unlock: | |
dc56401f | 1605 | mutex_unlock(&oom_lock); |
7c5f64f8 | 1606 | return ret; |
9cbb78bb DR |
1607 | } |
1608 | ||
becdf89d SB |
1609 | /* |
1610 | * Returns true if successfully killed one or more processes. Though in some | |
1611 | * corner cases it can return true even without killing any process. | |
1612 | */ | |
1613 | static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) | |
0b7f569e | 1614 | { |
becdf89d | 1615 | bool locked, ret; |
7056d3a3 | 1616 | |
29ef680a | 1617 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
becdf89d | 1618 | return false; |
29ef680a | 1619 | |
7a1adfdd RG |
1620 | memcg_memory_event(memcg, MEMCG_OOM); |
1621 | ||
292fc2e0 | 1622 | if (!memcg1_oom_prepare(memcg, &locked)) |
becdf89d | 1623 | return false; |
29ef680a | 1624 | |
becdf89d | 1625 | ret = mem_cgroup_out_of_memory(memcg, mask, order); |
7056d3a3 | 1626 | |
292fc2e0 | 1627 | memcg1_oom_finish(memcg, locked); |
29ef680a | 1628 | |
7056d3a3 | 1629 | return ret; |
3812c8c8 JW |
1630 | } |
1631 | ||
3d8b38eb RG |
1632 | /** |
1633 | * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM | |
1634 | * @victim: task to be killed by the OOM killer | |
1635 | * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM | |
1636 | * | |
1637 | * Returns a pointer to a memory cgroup, which has to be cleaned up | |
1638 | * by killing all belonging OOM-killable tasks. | |
1639 | * | |
1640 | * Caller has to call mem_cgroup_put() on the returned non-NULL memcg. | |
1641 | */ | |
1642 | struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, | |
1643 | struct mem_cgroup *oom_domain) | |
1644 | { | |
1645 | struct mem_cgroup *oom_group = NULL; | |
1646 | struct mem_cgroup *memcg; | |
1647 | ||
1648 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
1649 | return NULL; | |
1650 | ||
1651 | if (!oom_domain) | |
1652 | oom_domain = root_mem_cgroup; | |
1653 | ||
1654 | rcu_read_lock(); | |
1655 | ||
1656 | memcg = mem_cgroup_from_task(victim); | |
7848ed62 | 1657 | if (mem_cgroup_is_root(memcg)) |
3d8b38eb RG |
1658 | goto out; |
1659 | ||
48fe267c RG |
1660 | /* |
1661 | * If the victim task has been asynchronously moved to a different | |
1662 | * memory cgroup, we might end up killing tasks outside oom_domain. | |
1663 | * In this case it's better to ignore memory.group.oom. | |
1664 | */ | |
1665 | if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) | |
1666 | goto out; | |
1667 | ||
3d8b38eb RG |
1668 | /* |
1669 | * Traverse the memory cgroup hierarchy from the victim task's | |
1670 | * cgroup up to the OOMing cgroup (or root) to find the | |
1671 | * highest-level memory cgroup with oom.group set. | |
1672 | */ | |
1673 | for (; memcg; memcg = parent_mem_cgroup(memcg)) { | |
eaf7b66b | 1674 | if (READ_ONCE(memcg->oom_group)) |
3d8b38eb RG |
1675 | oom_group = memcg; |
1676 | ||
1677 | if (memcg == oom_domain) | |
1678 | break; | |
1679 | } | |
1680 | ||
1681 | if (oom_group) | |
1682 | css_get(&oom_group->css); | |
1683 | out: | |
1684 | rcu_read_unlock(); | |
1685 | ||
1686 | return oom_group; | |
1687 | } | |
1688 | ||
1689 | void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |
1690 | { | |
1691 | pr_info("Tasks in "); | |
1692 | pr_cont_cgroup_path(memcg->css.cgroup); | |
1693 | pr_cont(" are going to be killed due to memory.oom.group set\n"); | |
1694 | } | |
1695 | ||
fead2b86 | 1696 | struct memcg_stock_pcp { |
56751146 | 1697 | local_lock_t stock_lock; |
fead2b86 MH |
1698 | struct mem_cgroup *cached; /* this never be root cgroup */ |
1699 | unsigned int nr_pages; | |
1700 | ||
bf4f0599 | 1701 | struct obj_cgroup *cached_objcg; |
68ac5b3c | 1702 | struct pglist_data *cached_pgdat; |
bf4f0599 | 1703 | unsigned int nr_bytes; |
68ac5b3c WL |
1704 | int nr_slab_reclaimable_b; |
1705 | int nr_slab_unreclaimable_b; | |
bf4f0599 | 1706 | |
cdec2e42 | 1707 | struct work_struct work; |
26fe6168 | 1708 | unsigned long flags; |
a0db00fc | 1709 | #define FLUSHING_CACHED_CHARGE 0 |
cdec2e42 | 1710 | }; |
56751146 SAS |
1711 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { |
1712 | .stock_lock = INIT_LOCAL_LOCK(stock_lock), | |
1713 | }; | |
9f50fad6 | 1714 | static DEFINE_MUTEX(percpu_charge_mutex); |
cdec2e42 | 1715 | |
56751146 | 1716 | static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock); |
bf4f0599 RG |
1717 | static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, |
1718 | struct mem_cgroup *root_memcg); | |
1719 | ||
a0956d54 SS |
1720 | /** |
1721 | * consume_stock: Try to consume stocked charge on this cpu. | |
1722 | * @memcg: memcg to consume from. | |
1723 | * @nr_pages: how many pages to charge. | |
1724 | * | |
1725 | * The charges will only happen if @memcg matches the current cpu's memcg | |
1726 | * stock, and at least @nr_pages are available in that stock. Failure to | |
1727 | * service an allocation will refill the stock. | |
1728 | * | |
1729 | * returns true if successful, false otherwise. | |
cdec2e42 | 1730 | */ |
a0956d54 | 1731 | static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) |
cdec2e42 KH |
1732 | { |
1733 | struct memcg_stock_pcp *stock; | |
1872b3bc | 1734 | unsigned int stock_pages; |
db2ba40c | 1735 | unsigned long flags; |
3e32cb2e | 1736 | bool ret = false; |
cdec2e42 | 1737 | |
a983b5eb | 1738 | if (nr_pages > MEMCG_CHARGE_BATCH) |
3e32cb2e | 1739 | return ret; |
a0956d54 | 1740 | |
56751146 | 1741 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
db2ba40c JW |
1742 | |
1743 | stock = this_cpu_ptr(&memcg_stock); | |
1872b3bc BL |
1744 | stock_pages = READ_ONCE(stock->nr_pages); |
1745 | if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { | |
1746 | WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages); | |
3e32cb2e JW |
1747 | ret = true; |
1748 | } | |
db2ba40c | 1749 | |
56751146 | 1750 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
db2ba40c | 1751 | |
cdec2e42 KH |
1752 | return ret; |
1753 | } | |
1754 | ||
1755 | /* | |
3e32cb2e | 1756 | * Returns stocks cached in percpu and reset cached information. |
cdec2e42 KH |
1757 | */ |
1758 | static void drain_stock(struct memcg_stock_pcp *stock) | |
1759 | { | |
1872b3bc | 1760 | unsigned int stock_pages = READ_ONCE(stock->nr_pages); |
f785a8f2 | 1761 | struct mem_cgroup *old = READ_ONCE(stock->cached); |
cdec2e42 | 1762 | |
1a3e1f40 JW |
1763 | if (!old) |
1764 | return; | |
1765 | ||
1872b3bc BL |
1766 | if (stock_pages) { |
1767 | page_counter_uncharge(&old->memory, stock_pages); | |
7941d214 | 1768 | if (do_memsw_account()) |
1872b3bc BL |
1769 | page_counter_uncharge(&old->memsw, stock_pages); |
1770 | ||
1771 | WRITE_ONCE(stock->nr_pages, 0); | |
cdec2e42 | 1772 | } |
1a3e1f40 JW |
1773 | |
1774 | css_put(&old->css); | |
f785a8f2 | 1775 | WRITE_ONCE(stock->cached, NULL); |
cdec2e42 KH |
1776 | } |
1777 | ||
cdec2e42 KH |
1778 | static void drain_local_stock(struct work_struct *dummy) |
1779 | { | |
db2ba40c | 1780 | struct memcg_stock_pcp *stock; |
56751146 | 1781 | struct obj_cgroup *old = NULL; |
db2ba40c JW |
1782 | unsigned long flags; |
1783 | ||
72f0184c | 1784 | /* |
5c49cf9a MH |
1785 | * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs. |
1786 | * drain_stock races is that we always operate on local CPU stock | |
1787 | * here with IRQ disabled | |
72f0184c | 1788 | */ |
56751146 | 1789 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
db2ba40c JW |
1790 | |
1791 | stock = this_cpu_ptr(&memcg_stock); | |
56751146 | 1792 | old = drain_obj_stock(stock); |
cdec2e42 | 1793 | drain_stock(stock); |
26fe6168 | 1794 | clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); |
db2ba40c | 1795 | |
56751146 | 1796 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
91b71e78 | 1797 | obj_cgroup_put(old); |
cdec2e42 KH |
1798 | } |
1799 | ||
1800 | /* | |
3e32cb2e | 1801 | * Cache charges(val) to local per_cpu area. |
320cc51d | 1802 | * This will be consumed by consume_stock() function, later. |
cdec2e42 | 1803 | */ |
af9a3b69 | 1804 | static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) |
cdec2e42 | 1805 | { |
db2ba40c | 1806 | struct memcg_stock_pcp *stock; |
1872b3bc | 1807 | unsigned int stock_pages; |
cdec2e42 | 1808 | |
db2ba40c | 1809 | stock = this_cpu_ptr(&memcg_stock); |
f785a8f2 | 1810 | if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ |
cdec2e42 | 1811 | drain_stock(stock); |
1a3e1f40 | 1812 | css_get(&memcg->css); |
f785a8f2 | 1813 | WRITE_ONCE(stock->cached, memcg); |
cdec2e42 | 1814 | } |
1872b3bc BL |
1815 | stock_pages = READ_ONCE(stock->nr_pages) + nr_pages; |
1816 | WRITE_ONCE(stock->nr_pages, stock_pages); | |
db2ba40c | 1817 | |
1872b3bc | 1818 | if (stock_pages > MEMCG_CHARGE_BATCH) |
475d0487 | 1819 | drain_stock(stock); |
af9a3b69 JW |
1820 | } |
1821 | ||
1822 | static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) | |
1823 | { | |
1824 | unsigned long flags; | |
475d0487 | 1825 | |
56751146 | 1826 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
af9a3b69 | 1827 | __refill_stock(memcg, nr_pages); |
56751146 | 1828 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
cdec2e42 KH |
1829 | } |
1830 | ||
1831 | /* | |
c0ff4b85 | 1832 | * Drains all per-CPU charge caches for given root_memcg resp. subtree |
6d3d6aa2 | 1833 | * of the hierarchy under it. |
cdec2e42 | 1834 | */ |
ea1e8796 | 1835 | void drain_all_stock(struct mem_cgroup *root_memcg) |
cdec2e42 | 1836 | { |
26fe6168 | 1837 | int cpu, curcpu; |
d38144b7 | 1838 | |
6d3d6aa2 JW |
1839 | /* If someone's already draining, avoid adding running more workers. */ |
1840 | if (!mutex_trylock(&percpu_charge_mutex)) | |
1841 | return; | |
72f0184c MH |
1842 | /* |
1843 | * Notify other cpus that system-wide "drain" is running | |
1844 | * We do not care about races with the cpu hotplug because cpu down | |
1845 | * as well as workers from this path always operate on the local | |
1846 | * per-cpu data. CPU up doesn't touch memcg_stock at all. | |
1847 | */ | |
0790ed62 SAS |
1848 | migrate_disable(); |
1849 | curcpu = smp_processor_id(); | |
cdec2e42 KH |
1850 | for_each_online_cpu(cpu) { |
1851 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | |
c0ff4b85 | 1852 | struct mem_cgroup *memcg; |
e1a366be | 1853 | bool flush = false; |
26fe6168 | 1854 | |
e1a366be | 1855 | rcu_read_lock(); |
f785a8f2 | 1856 | memcg = READ_ONCE(stock->cached); |
1872b3bc | 1857 | if (memcg && READ_ONCE(stock->nr_pages) && |
e1a366be RG |
1858 | mem_cgroup_is_descendant(memcg, root_memcg)) |
1859 | flush = true; | |
27fb0956 | 1860 | else if (obj_stock_flush_required(stock, root_memcg)) |
bf4f0599 | 1861 | flush = true; |
e1a366be RG |
1862 | rcu_read_unlock(); |
1863 | ||
1864 | if (flush && | |
1865 | !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { | |
d1a05b69 MH |
1866 | if (cpu == curcpu) |
1867 | drain_local_stock(&stock->work); | |
6a792697 | 1868 | else if (!cpu_is_isolated(cpu)) |
d1a05b69 MH |
1869 | schedule_work_on(cpu, &stock->work); |
1870 | } | |
cdec2e42 | 1871 | } |
0790ed62 | 1872 | migrate_enable(); |
9f50fad6 | 1873 | mutex_unlock(&percpu_charge_mutex); |
cdec2e42 KH |
1874 | } |
1875 | ||
2cd21c89 JW |
1876 | static int memcg_hotplug_cpu_dead(unsigned int cpu) |
1877 | { | |
1878 | struct memcg_stock_pcp *stock; | |
a3d4c05a | 1879 | |
2cd21c89 JW |
1880 | stock = &per_cpu(memcg_stock, cpu); |
1881 | drain_stock(stock); | |
a3d4c05a | 1882 | |
308167fc | 1883 | return 0; |
cdec2e42 KH |
1884 | } |
1885 | ||
b3ff9291 CD |
1886 | static unsigned long reclaim_high(struct mem_cgroup *memcg, |
1887 | unsigned int nr_pages, | |
1888 | gfp_t gfp_mask) | |
f7e1cb6e | 1889 | { |
b3ff9291 CD |
1890 | unsigned long nr_reclaimed = 0; |
1891 | ||
f7e1cb6e | 1892 | do { |
e22c6ed9 JW |
1893 | unsigned long pflags; |
1894 | ||
d1663a90 JK |
1895 | if (page_counter_read(&memcg->memory) <= |
1896 | READ_ONCE(memcg->memory.high)) | |
f7e1cb6e | 1897 | continue; |
e22c6ed9 | 1898 | |
e27be240 | 1899 | memcg_memory_event(memcg, MEMCG_HIGH); |
e22c6ed9 JW |
1900 | |
1901 | psi_memstall_enter(&pflags); | |
b3ff9291 | 1902 | nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, |
73b73bac | 1903 | gfp_mask, |
68cd9050 DS |
1904 | MEMCG_RECLAIM_MAY_SWAP, |
1905 | NULL); | |
e22c6ed9 | 1906 | psi_memstall_leave(&pflags); |
4bf17307 CD |
1907 | } while ((memcg = parent_mem_cgroup(memcg)) && |
1908 | !mem_cgroup_is_root(memcg)); | |
b3ff9291 CD |
1909 | |
1910 | return nr_reclaimed; | |
f7e1cb6e JW |
1911 | } |
1912 | ||
1913 | static void high_work_func(struct work_struct *work) | |
1914 | { | |
1915 | struct mem_cgroup *memcg; | |
1916 | ||
1917 | memcg = container_of(work, struct mem_cgroup, high_work); | |
a983b5eb | 1918 | reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); |
f7e1cb6e JW |
1919 | } |
1920 | ||
0e4b01df CD |
1921 | /* |
1922 | * Clamp the maximum sleep time per allocation batch to 2 seconds. This is | |
1923 | * enough to still cause a significant slowdown in most cases, while still | |
1924 | * allowing diagnostics and tracing to proceed without becoming stuck. | |
1925 | */ | |
1926 | #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ) | |
1927 | ||
1928 | /* | |
1929 | * When calculating the delay, we use these either side of the exponentiation to | |
1930 | * maintain precision and scale to a reasonable number of jiffies (see the table | |
1931 | * below. | |
1932 | * | |
1933 | * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the | |
1934 | * overage ratio to a delay. | |
ac5ddd0f | 1935 | * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the |
0e4b01df CD |
1936 | * proposed penalty in order to reduce to a reasonable number of jiffies, and |
1937 | * to produce a reasonable delay curve. | |
1938 | * | |
1939 | * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a | |
1940 | * reasonable delay curve compared to precision-adjusted overage, not | |
1941 | * penalising heavily at first, but still making sure that growth beyond the | |
1942 | * limit penalises misbehaviour cgroups by slowing them down exponentially. For | |
1943 | * example, with a high of 100 megabytes: | |
1944 | * | |
1945 | * +-------+------------------------+ | |
1946 | * | usage | time to allocate in ms | | |
1947 | * +-------+------------------------+ | |
1948 | * | 100M | 0 | | |
1949 | * | 101M | 6 | | |
1950 | * | 102M | 25 | | |
1951 | * | 103M | 57 | | |
1952 | * | 104M | 102 | | |
1953 | * | 105M | 159 | | |
1954 | * | 106M | 230 | | |
1955 | * | 107M | 313 | | |
1956 | * | 108M | 409 | | |
1957 | * | 109M | 518 | | |
1958 | * | 110M | 639 | | |
1959 | * | 111M | 774 | | |
1960 | * | 112M | 921 | | |
1961 | * | 113M | 1081 | | |
1962 | * | 114M | 1254 | | |
1963 | * | 115M | 1439 | | |
1964 | * | 116M | 1638 | | |
1965 | * | 117M | 1849 | | |
1966 | * | 118M | 2000 | | |
1967 | * | 119M | 2000 | | |
1968 | * | 120M | 2000 | | |
1969 | * +-------+------------------------+ | |
1970 | */ | |
1971 | #define MEMCG_DELAY_PRECISION_SHIFT 20 | |
1972 | #define MEMCG_DELAY_SCALING_SHIFT 14 | |
1973 | ||
8a5dbc65 | 1974 | static u64 calculate_overage(unsigned long usage, unsigned long high) |
b23afb93 | 1975 | { |
8a5dbc65 | 1976 | u64 overage; |
b23afb93 | 1977 | |
8a5dbc65 JK |
1978 | if (usage <= high) |
1979 | return 0; | |
e26733e0 | 1980 | |
8a5dbc65 JK |
1981 | /* |
1982 | * Prevent division by 0 in overage calculation by acting as if | |
1983 | * it was a threshold of 1 page | |
1984 | */ | |
1985 | high = max(high, 1UL); | |
9b8b1754 | 1986 | |
8a5dbc65 JK |
1987 | overage = usage - high; |
1988 | overage <<= MEMCG_DELAY_PRECISION_SHIFT; | |
1989 | return div64_u64(overage, high); | |
1990 | } | |
e26733e0 | 1991 | |
8a5dbc65 JK |
1992 | static u64 mem_find_max_overage(struct mem_cgroup *memcg) |
1993 | { | |
1994 | u64 overage, max_overage = 0; | |
e26733e0 | 1995 | |
8a5dbc65 JK |
1996 | do { |
1997 | overage = calculate_overage(page_counter_read(&memcg->memory), | |
d1663a90 | 1998 | READ_ONCE(memcg->memory.high)); |
8a5dbc65 | 1999 | max_overage = max(overage, max_overage); |
e26733e0 CD |
2000 | } while ((memcg = parent_mem_cgroup(memcg)) && |
2001 | !mem_cgroup_is_root(memcg)); | |
2002 | ||
8a5dbc65 JK |
2003 | return max_overage; |
2004 | } | |
2005 | ||
4b82ab4f JK |
2006 | static u64 swap_find_max_overage(struct mem_cgroup *memcg) |
2007 | { | |
2008 | u64 overage, max_overage = 0; | |
2009 | ||
2010 | do { | |
2011 | overage = calculate_overage(page_counter_read(&memcg->swap), | |
2012 | READ_ONCE(memcg->swap.high)); | |
2013 | if (overage) | |
2014 | memcg_memory_event(memcg, MEMCG_SWAP_HIGH); | |
2015 | max_overage = max(overage, max_overage); | |
2016 | } while ((memcg = parent_mem_cgroup(memcg)) && | |
2017 | !mem_cgroup_is_root(memcg)); | |
2018 | ||
2019 | return max_overage; | |
2020 | } | |
2021 | ||
8a5dbc65 JK |
2022 | /* |
2023 | * Get the number of jiffies that we should penalise a mischievous cgroup which | |
2024 | * is exceeding its memory.high by checking both it and its ancestors. | |
2025 | */ | |
2026 | static unsigned long calculate_high_delay(struct mem_cgroup *memcg, | |
2027 | unsigned int nr_pages, | |
2028 | u64 max_overage) | |
2029 | { | |
2030 | unsigned long penalty_jiffies; | |
2031 | ||
e26733e0 CD |
2032 | if (!max_overage) |
2033 | return 0; | |
0e4b01df CD |
2034 | |
2035 | /* | |
0e4b01df CD |
2036 | * We use overage compared to memory.high to calculate the number of |
2037 | * jiffies to sleep (penalty_jiffies). Ideally this value should be | |
2038 | * fairly lenient on small overages, and increasingly harsh when the | |
2039 | * memcg in question makes it clear that it has no intention of stopping | |
2040 | * its crazy behaviour, so we exponentially increase the delay based on | |
2041 | * overage amount. | |
2042 | */ | |
e26733e0 CD |
2043 | penalty_jiffies = max_overage * max_overage * HZ; |
2044 | penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT; | |
2045 | penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT; | |
0e4b01df CD |
2046 | |
2047 | /* | |
2048 | * Factor in the task's own contribution to the overage, such that four | |
2049 | * N-sized allocations are throttled approximately the same as one | |
2050 | * 4N-sized allocation. | |
2051 | * | |
2052 | * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or | |
2053 | * larger the current charge patch is than that. | |
2054 | */ | |
ff144e69 | 2055 | return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; |
e26733e0 CD |
2056 | } |
2057 | ||
2058 | /* | |
63fd3270 JW |
2059 | * Reclaims memory over the high limit. Called directly from |
2060 | * try_charge() (context permitting), as well as from the userland | |
2061 | * return path where reclaim is always able to block. | |
e26733e0 | 2062 | */ |
9ea9cb00 | 2063 | void mem_cgroup_handle_over_high(gfp_t gfp_mask) |
e26733e0 CD |
2064 | { |
2065 | unsigned long penalty_jiffies; | |
2066 | unsigned long pflags; | |
b3ff9291 | 2067 | unsigned long nr_reclaimed; |
e26733e0 | 2068 | unsigned int nr_pages = current->memcg_nr_pages_over_high; |
d977aa93 | 2069 | int nr_retries = MAX_RECLAIM_RETRIES; |
e26733e0 | 2070 | struct mem_cgroup *memcg; |
b3ff9291 | 2071 | bool in_retry = false; |
e26733e0 CD |
2072 | |
2073 | if (likely(!nr_pages)) | |
2074 | return; | |
2075 | ||
2076 | memcg = get_mem_cgroup_from_mm(current->mm); | |
e26733e0 CD |
2077 | current->memcg_nr_pages_over_high = 0; |
2078 | ||
b3ff9291 | 2079 | retry_reclaim: |
63fd3270 JW |
2080 | /* |
2081 | * Bail if the task is already exiting. Unlike memory.max, | |
2082 | * memory.high enforcement isn't as strict, and there is no | |
2083 | * OOM killer involved, which means the excess could already | |
2084 | * be much bigger (and still growing) than it could for | |
2085 | * memory.max; the dying task could get stuck in fruitless | |
2086 | * reclaim for a long time, which isn't desirable. | |
2087 | */ | |
2088 | if (task_is_dying()) | |
2089 | goto out; | |
2090 | ||
b3ff9291 CD |
2091 | /* |
2092 | * The allocating task should reclaim at least the batch size, but for | |
2093 | * subsequent retries we only want to do what's necessary to prevent oom | |
2094 | * or breaching resource isolation. | |
2095 | * | |
2096 | * This is distinct from memory.max or page allocator behaviour because | |
2097 | * memory.high is currently batched, whereas memory.max and the page | |
2098 | * allocator run every time an allocation is made. | |
2099 | */ | |
2100 | nr_reclaimed = reclaim_high(memcg, | |
2101 | in_retry ? SWAP_CLUSTER_MAX : nr_pages, | |
9ea9cb00 | 2102 | gfp_mask); |
b3ff9291 | 2103 | |
e26733e0 CD |
2104 | /* |
2105 | * memory.high is breached and reclaim is unable to keep up. Throttle | |
2106 | * allocators proactively to slow down excessive growth. | |
2107 | */ | |
8a5dbc65 JK |
2108 | penalty_jiffies = calculate_high_delay(memcg, nr_pages, |
2109 | mem_find_max_overage(memcg)); | |
0e4b01df | 2110 | |
4b82ab4f JK |
2111 | penalty_jiffies += calculate_high_delay(memcg, nr_pages, |
2112 | swap_find_max_overage(memcg)); | |
2113 | ||
ff144e69 JK |
2114 | /* |
2115 | * Clamp the max delay per usermode return so as to still keep the | |
2116 | * application moving forwards and also permit diagnostics, albeit | |
2117 | * extremely slowly. | |
2118 | */ | |
2119 | penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES); | |
2120 | ||
0e4b01df CD |
2121 | /* |
2122 | * Don't sleep if the amount of jiffies this memcg owes us is so low | |
2123 | * that it's not even worth doing, in an attempt to be nice to those who | |
2124 | * go only a small amount over their memory.high value and maybe haven't | |
2125 | * been aggressively reclaimed enough yet. | |
2126 | */ | |
2127 | if (penalty_jiffies <= HZ / 100) | |
2128 | goto out; | |
2129 | ||
b3ff9291 CD |
2130 | /* |
2131 | * If reclaim is making forward progress but we're still over | |
2132 | * memory.high, we want to encourage that rather than doing allocator | |
2133 | * throttling. | |
2134 | */ | |
2135 | if (nr_reclaimed || nr_retries--) { | |
2136 | in_retry = true; | |
2137 | goto retry_reclaim; | |
2138 | } | |
2139 | ||
0e4b01df | 2140 | /* |
63fd3270 JW |
2141 | * Reclaim didn't manage to push usage below the limit, slow |
2142 | * this allocating task down. | |
2143 | * | |
0e4b01df CD |
2144 | * If we exit early, we're guaranteed to die (since |
2145 | * schedule_timeout_killable sets TASK_KILLABLE). This means we don't | |
2146 | * need to account for any ill-begotten jiffies to pay them off later. | |
2147 | */ | |
2148 | psi_memstall_enter(&pflags); | |
2149 | schedule_timeout_killable(penalty_jiffies); | |
2150 | psi_memstall_leave(&pflags); | |
2151 | ||
2152 | out: | |
2153 | css_put(&memcg->css); | |
b23afb93 TH |
2154 | } |
2155 | ||
e548ad4a RG |
2156 | int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, |
2157 | unsigned int nr_pages) | |
8a9f3ccd | 2158 | { |
a983b5eb | 2159 | unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); |
d977aa93 | 2160 | int nr_retries = MAX_RECLAIM_RETRIES; |
6539cc05 | 2161 | struct mem_cgroup *mem_over_limit; |
3e32cb2e | 2162 | struct page_counter *counter; |
6539cc05 | 2163 | unsigned long nr_reclaimed; |
a4ebf1b6 | 2164 | bool passed_oom = false; |
73b73bac | 2165 | unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP; |
b70a2a21 | 2166 | bool drained = false; |
d6e103a7 | 2167 | bool raised_max_event = false; |
e22c6ed9 | 2168 | unsigned long pflags; |
a636b327 | 2169 | |
6539cc05 | 2170 | retry: |
b6b6cc72 | 2171 | if (consume_stock(memcg, nr_pages)) |
10d53c74 | 2172 | return 0; |
8a9f3ccd | 2173 | |
7941d214 | 2174 | if (!do_memsw_account() || |
6071ca52 JW |
2175 | page_counter_try_charge(&memcg->memsw, batch, &counter)) { |
2176 | if (page_counter_try_charge(&memcg->memory, batch, &counter)) | |
6539cc05 | 2177 | goto done_restock; |
7941d214 | 2178 | if (do_memsw_account()) |
3e32cb2e JW |
2179 | page_counter_uncharge(&memcg->memsw, batch); |
2180 | mem_over_limit = mem_cgroup_from_counter(counter, memory); | |
3fbe7244 | 2181 | } else { |
3e32cb2e | 2182 | mem_over_limit = mem_cgroup_from_counter(counter, memsw); |
73b73bac | 2183 | reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; |
3fbe7244 | 2184 | } |
7a81b88c | 2185 | |
6539cc05 JW |
2186 | if (batch > nr_pages) { |
2187 | batch = nr_pages; | |
2188 | goto retry; | |
2189 | } | |
6d61ef40 | 2190 | |
89a28483 JW |
2191 | /* |
2192 | * Prevent unbounded recursion when reclaim operations need to | |
2193 | * allocate memory. This might exceed the limits temporarily, | |
2194 | * but we prefer facilitating memory reclaim and getting back | |
2195 | * under the limit over triggering OOM kills in these cases. | |
2196 | */ | |
2197 | if (unlikely(current->flags & PF_MEMALLOC)) | |
2198 | goto force; | |
2199 | ||
06b078fc JW |
2200 | if (unlikely(task_in_memcg_oom(current))) |
2201 | goto nomem; | |
2202 | ||
d0164adc | 2203 | if (!gfpflags_allow_blocking(gfp_mask)) |
6539cc05 | 2204 | goto nomem; |
4b534334 | 2205 | |
e27be240 | 2206 | memcg_memory_event(mem_over_limit, MEMCG_MAX); |
d6e103a7 | 2207 | raised_max_event = true; |
241994ed | 2208 | |
e22c6ed9 | 2209 | psi_memstall_enter(&pflags); |
b70a2a21 | 2210 | nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, |
68cd9050 | 2211 | gfp_mask, reclaim_options, NULL); |
e22c6ed9 | 2212 | psi_memstall_leave(&pflags); |
6539cc05 | 2213 | |
61e02c74 | 2214 | if (mem_cgroup_margin(mem_over_limit) >= nr_pages) |
6539cc05 | 2215 | goto retry; |
28c34c29 | 2216 | |
b70a2a21 | 2217 | if (!drained) { |
6d3d6aa2 | 2218 | drain_all_stock(mem_over_limit); |
b70a2a21 JW |
2219 | drained = true; |
2220 | goto retry; | |
2221 | } | |
2222 | ||
28c34c29 JW |
2223 | if (gfp_mask & __GFP_NORETRY) |
2224 | goto nomem; | |
6539cc05 JW |
2225 | /* |
2226 | * Even though the limit is exceeded at this point, reclaim | |
2227 | * may have been able to free some pages. Retry the charge | |
2228 | * before killing the task. | |
2229 | * | |
2230 | * Only for regular pages, though: huge pages are rather | |
2231 | * unlikely to succeed so close to the limit, and we fall back | |
2232 | * to regular pages anyway in case of failure. | |
2233 | */ | |
61e02c74 | 2234 | if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) |
6539cc05 JW |
2235 | goto retry; |
2236 | /* | |
2237 | * At task move, charge accounts can be doubly counted. So, it's | |
2238 | * better to wait until the end of task_move if something is going on. | |
2239 | */ | |
b9eaacb1 | 2240 | if (memcg1_wait_acct_move(mem_over_limit)) |
6539cc05 JW |
2241 | goto retry; |
2242 | ||
9b130619 JW |
2243 | if (nr_retries--) |
2244 | goto retry; | |
2245 | ||
38d38493 | 2246 | if (gfp_mask & __GFP_RETRY_MAYFAIL) |
29ef680a MH |
2247 | goto nomem; |
2248 | ||
a4ebf1b6 VA |
2249 | /* Avoid endless loop for tasks bypassed by the oom killer */ |
2250 | if (passed_oom && task_is_dying()) | |
2251 | goto nomem; | |
6539cc05 | 2252 | |
29ef680a MH |
2253 | /* |
2254 | * keep retrying as long as the memcg oom killer is able to make | |
2255 | * a forward progress or bypass the charge if the oom killer | |
2256 | * couldn't make any progress. | |
2257 | */ | |
becdf89d SB |
2258 | if (mem_cgroup_oom(mem_over_limit, gfp_mask, |
2259 | get_order(nr_pages * PAGE_SIZE))) { | |
a4ebf1b6 | 2260 | passed_oom = true; |
d977aa93 | 2261 | nr_retries = MAX_RECLAIM_RETRIES; |
29ef680a | 2262 | goto retry; |
29ef680a | 2263 | } |
7a81b88c | 2264 | nomem: |
1461e8c2 SB |
2265 | /* |
2266 | * Memcg doesn't have a dedicated reserve for atomic | |
2267 | * allocations. But like the global atomic pool, we need to | |
2268 | * put the burden of reclaim on regular allocation requests | |
2269 | * and let these go through as privileged allocations. | |
2270 | */ | |
2271 | if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH))) | |
3168ecbe | 2272 | return -ENOMEM; |
10d53c74 | 2273 | force: |
d6e103a7 RG |
2274 | /* |
2275 | * If the allocation has to be enforced, don't forget to raise | |
2276 | * a MEMCG_MAX event. | |
2277 | */ | |
2278 | if (!raised_max_event) | |
2279 | memcg_memory_event(mem_over_limit, MEMCG_MAX); | |
2280 | ||
10d53c74 TH |
2281 | /* |
2282 | * The allocation either can't fail or will lead to more memory | |
2283 | * being freed very soon. Allow memory usage go over the limit | |
2284 | * temporarily by force charging it. | |
2285 | */ | |
2286 | page_counter_charge(&memcg->memory, nr_pages); | |
7941d214 | 2287 | if (do_memsw_account()) |
10d53c74 | 2288 | page_counter_charge(&memcg->memsw, nr_pages); |
10d53c74 TH |
2289 | |
2290 | return 0; | |
6539cc05 JW |
2291 | |
2292 | done_restock: | |
2293 | if (batch > nr_pages) | |
2294 | refill_stock(memcg, batch - nr_pages); | |
b23afb93 | 2295 | |
241994ed | 2296 | /* |
b23afb93 TH |
2297 | * If the hierarchy is above the normal consumption range, schedule |
2298 | * reclaim on returning to userland. We can perform reclaim here | |
71baba4b | 2299 | * if __GFP_RECLAIM but let's always punt for simplicity and so that |
b23afb93 TH |
2300 | * GFP_KERNEL can consistently be used during reclaim. @memcg is |
2301 | * not recorded as it most likely matches current's and won't | |
2302 | * change in the meantime. As high limit is checked again before | |
2303 | * reclaim, the cost of mismatch is negligible. | |
241994ed JW |
2304 | */ |
2305 | do { | |
4b82ab4f JK |
2306 | bool mem_high, swap_high; |
2307 | ||
2308 | mem_high = page_counter_read(&memcg->memory) > | |
2309 | READ_ONCE(memcg->memory.high); | |
2310 | swap_high = page_counter_read(&memcg->swap) > | |
2311 | READ_ONCE(memcg->swap.high); | |
2312 | ||
2313 | /* Don't bother a random interrupted task */ | |
086f694a | 2314 | if (!in_task()) { |
4b82ab4f | 2315 | if (mem_high) { |
f7e1cb6e JW |
2316 | schedule_work(&memcg->high_work); |
2317 | break; | |
2318 | } | |
4b82ab4f JK |
2319 | continue; |
2320 | } | |
2321 | ||
2322 | if (mem_high || swap_high) { | |
2323 | /* | |
2324 | * The allocating tasks in this cgroup will need to do | |
2325 | * reclaim or be throttled to prevent further growth | |
2326 | * of the memory or swap footprints. | |
2327 | * | |
2328 | * Target some best-effort fairness between the tasks, | |
2329 | * and distribute reclaim work and delay penalties | |
2330 | * based on how much each task is actually allocating. | |
2331 | */ | |
9516a18a | 2332 | current->memcg_nr_pages_over_high += batch; |
b23afb93 TH |
2333 | set_notify_resume(current); |
2334 | break; | |
2335 | } | |
241994ed | 2336 | } while ((memcg = parent_mem_cgroup(memcg))); |
10d53c74 | 2337 | |
63fd3270 JW |
2338 | /* |
2339 | * Reclaim is set up above to be called from the userland | |
2340 | * return path. But also attempt synchronous reclaim to avoid | |
2341 | * excessive overrun while the task is still inside the | |
2342 | * kernel. If this is successful, the return path will see it | |
2343 | * when it rechecks the overage and simply bail out. | |
2344 | */ | |
c9afe31e SB |
2345 | if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && |
2346 | !(current->flags & PF_MEMALLOC) && | |
63fd3270 | 2347 | gfpflags_allow_blocking(gfp_mask)) |
9ea9cb00 | 2348 | mem_cgroup_handle_over_high(gfp_mask); |
10d53c74 | 2349 | return 0; |
7a81b88c | 2350 | } |
8a9f3ccd | 2351 | |
4b569387 NP |
2352 | /** |
2353 | * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call. | |
2354 | * @memcg: memcg previously charged. | |
2355 | * @nr_pages: number of pages previously charged. | |
2356 | */ | |
2357 | void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) | |
a3032a2c | 2358 | { |
ce00a967 JW |
2359 | if (mem_cgroup_is_root(memcg)) |
2360 | return; | |
2361 | ||
3e32cb2e | 2362 | page_counter_uncharge(&memcg->memory, nr_pages); |
7941d214 | 2363 | if (do_memsw_account()) |
3e32cb2e | 2364 | page_counter_uncharge(&memcg->memsw, nr_pages); |
d01dd17f KH |
2365 | } |
2366 | ||
118f2875 | 2367 | static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) |
0a31bc97 | 2368 | { |
118f2875 | 2369 | VM_BUG_ON_FOLIO(folio_memcg(folio), folio); |
0a31bc97 | 2370 | /* |
a5eb011a | 2371 | * Any of the following ensures page's memcg stability: |
0a31bc97 | 2372 | * |
a0b5b414 JW |
2373 | * - the page lock |
2374 | * - LRU isolation | |
6c77b607 | 2375 | * - folio_memcg_lock() |
a0b5b414 | 2376 | * - exclusive reference |
018ee47f | 2377 | * - mem_cgroup_trylock_pages() |
0a31bc97 | 2378 | */ |
118f2875 | 2379 | folio->memcg_data = (unsigned long)memcg; |
7a81b88c | 2380 | } |
66e1707b | 2381 | |
4b569387 NP |
2382 | /** |
2383 | * mem_cgroup_commit_charge - commit a previously successful try_charge(). | |
2384 | * @folio: folio to commit the charge to. | |
2385 | * @memcg: memcg previously charged. | |
2386 | */ | |
2387 | void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) | |
2388 | { | |
2389 | css_get(&memcg->css); | |
2390 | commit_charge(folio, memcg); | |
2391 | ||
2392 | local_irq_disable(); | |
2393 | mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio)); | |
cc7b8504 | 2394 | memcg1_check_events(memcg, folio_nid(folio)); |
4b569387 NP |
2395 | local_irq_enable(); |
2396 | } | |
2397 | ||
91882c16 SB |
2398 | static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, |
2399 | struct pglist_data *pgdat, | |
2400 | enum node_stat_item idx, int nr) | |
a7ebf564 WL |
2401 | { |
2402 | struct mem_cgroup *memcg; | |
2403 | struct lruvec *lruvec; | |
2404 | ||
2405 | rcu_read_lock(); | |
2406 | memcg = obj_cgroup_memcg(objcg); | |
2407 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
91882c16 | 2408 | __mod_memcg_lruvec_state(lruvec, idx, nr); |
a7ebf564 WL |
2409 | rcu_read_unlock(); |
2410 | } | |
2411 | ||
fc4db90f RG |
2412 | static __always_inline |
2413 | struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) | |
8380ce47 | 2414 | { |
8380ce47 | 2415 | /* |
9855609b RG |
2416 | * Slab objects are accounted individually, not per-page. |
2417 | * Memcg membership data for each individual object is saved in | |
21c690a3 | 2418 | * slab->obj_exts. |
8380ce47 | 2419 | */ |
4b5f8d9a | 2420 | if (folio_test_slab(folio)) { |
21c690a3 | 2421 | struct slabobj_ext *obj_exts; |
4b5f8d9a | 2422 | struct slab *slab; |
9855609b RG |
2423 | unsigned int off; |
2424 | ||
4b5f8d9a | 2425 | slab = folio_slab(folio); |
21c690a3 SB |
2426 | obj_exts = slab_obj_exts(slab); |
2427 | if (!obj_exts) | |
4b5f8d9a VB |
2428 | return NULL; |
2429 | ||
2430 | off = obj_to_index(slab->slab_cache, slab, p); | |
21c690a3 SB |
2431 | if (obj_exts[off].objcg) |
2432 | return obj_cgroup_memcg(obj_exts[off].objcg); | |
10befea9 RG |
2433 | |
2434 | return NULL; | |
9855609b | 2435 | } |
8380ce47 | 2436 | |
bcfe06bf | 2437 | /* |
becacb04 | 2438 | * folio_memcg_check() is used here, because in theory we can encounter |
4b5f8d9a | 2439 | * a folio where the slab flag has been cleared already, but |
21c690a3 | 2440 | * slab->obj_exts has not been freed yet |
becacb04 | 2441 | * folio_memcg_check() will guarantee that a proper memory |
bcfe06bf RG |
2442 | * cgroup pointer or NULL will be returned. |
2443 | */ | |
becacb04 | 2444 | return folio_memcg_check(folio); |
8380ce47 RG |
2445 | } |
2446 | ||
fc4db90f RG |
2447 | /* |
2448 | * Returns a pointer to the memory cgroup to which the kernel object is charged. | |
2449 | * | |
2450 | * A passed kernel object can be a slab object, vmalloc object or a generic | |
2451 | * kernel page, so different mechanisms for getting the memory cgroup pointer | |
2452 | * should be used. | |
2453 | * | |
2454 | * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller | |
2455 | * can not know for sure how the kernel object is implemented. | |
2456 | * mem_cgroup_from_obj() can be safely used in such cases. | |
2457 | * | |
2458 | * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), | |
2459 | * cgroup_mutex, etc. | |
2460 | */ | |
2461 | struct mem_cgroup *mem_cgroup_from_obj(void *p) | |
2462 | { | |
2463 | struct folio *folio; | |
2464 | ||
2465 | if (mem_cgroup_disabled()) | |
2466 | return NULL; | |
2467 | ||
2468 | if (unlikely(is_vmalloc_addr(p))) | |
2469 | folio = page_folio(vmalloc_to_page(p)); | |
2470 | else | |
2471 | folio = virt_to_folio(p); | |
2472 | ||
2473 | return mem_cgroup_from_obj_folio(folio, p); | |
2474 | } | |
2475 | ||
2476 | /* | |
2477 | * Returns a pointer to the memory cgroup to which the kernel object is charged. | |
2478 | * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects, | |
2479 | * allocated using vmalloc(). | |
2480 | * | |
2481 | * A passed kernel object must be a slab object or a generic kernel page. | |
2482 | * | |
2483 | * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(), | |
2484 | * cgroup_mutex, etc. | |
2485 | */ | |
2486 | struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) | |
2487 | { | |
2488 | if (mem_cgroup_disabled()) | |
2489 | return NULL; | |
2490 | ||
2491 | return mem_cgroup_from_obj_folio(virt_to_folio(p), p); | |
2492 | } | |
2493 | ||
f4840ccf JW |
2494 | static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) |
2495 | { | |
2496 | struct obj_cgroup *objcg = NULL; | |
2497 | ||
7848ed62 | 2498 | for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { |
f4840ccf | 2499 | objcg = rcu_dereference(memcg->objcg); |
7d0715d0 | 2500 | if (likely(objcg && obj_cgroup_tryget(objcg))) |
f4840ccf JW |
2501 | break; |
2502 | objcg = NULL; | |
2503 | } | |
2504 | return objcg; | |
2505 | } | |
2506 | ||
1aacbd35 RG |
2507 | static struct obj_cgroup *current_objcg_update(void) |
2508 | { | |
2509 | struct mem_cgroup *memcg; | |
2510 | struct obj_cgroup *old, *objcg = NULL; | |
2511 | ||
2512 | do { | |
2513 | /* Atomically drop the update bit. */ | |
2514 | old = xchg(¤t->objcg, NULL); | |
2515 | if (old) { | |
2516 | old = (struct obj_cgroup *) | |
2517 | ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); | |
91b71e78 | 2518 | obj_cgroup_put(old); |
1aacbd35 RG |
2519 | |
2520 | old = NULL; | |
2521 | } | |
2522 | ||
2523 | /* If new objcg is NULL, no reason for the second atomic update. */ | |
2524 | if (!current->mm || (current->flags & PF_KTHREAD)) | |
2525 | return NULL; | |
2526 | ||
2527 | /* | |
2528 | * Release the objcg pointer from the previous iteration, | |
2529 | * if try_cmpxcg() below fails. | |
2530 | */ | |
2531 | if (unlikely(objcg)) { | |
2532 | obj_cgroup_put(objcg); | |
2533 | objcg = NULL; | |
2534 | } | |
2535 | ||
2536 | /* | |
2537 | * Obtain the new objcg pointer. The current task can be | |
2538 | * asynchronously moved to another memcg and the previous | |
2539 | * memcg can be offlined. So let's get the memcg pointer | |
2540 | * and try get a reference to objcg under a rcu read lock. | |
2541 | */ | |
2542 | ||
2543 | rcu_read_lock(); | |
2544 | memcg = mem_cgroup_from_task(current); | |
2545 | objcg = __get_obj_cgroup_from_memcg(memcg); | |
2546 | rcu_read_unlock(); | |
2547 | ||
2548 | /* | |
2549 | * Try set up a new objcg pointer atomically. If it | |
2550 | * fails, it means the update flag was set concurrently, so | |
2551 | * the whole procedure should be repeated. | |
2552 | */ | |
2553 | } while (!try_cmpxchg(¤t->objcg, &old, objcg)); | |
2554 | ||
2555 | return objcg; | |
2556 | } | |
2557 | ||
e86828e5 RG |
2558 | __always_inline struct obj_cgroup *current_obj_cgroup(void) |
2559 | { | |
2560 | struct mem_cgroup *memcg; | |
2561 | struct obj_cgroup *objcg; | |
2562 | ||
2563 | if (in_task()) { | |
2564 | memcg = current->active_memcg; | |
2565 | if (unlikely(memcg)) | |
2566 | goto from_memcg; | |
2567 | ||
2568 | objcg = READ_ONCE(current->objcg); | |
2569 | if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG)) | |
2570 | objcg = current_objcg_update(); | |
2571 | /* | |
2572 | * Objcg reference is kept by the task, so it's safe | |
2573 | * to use the objcg by the current task. | |
2574 | */ | |
2575 | return objcg; | |
2576 | } | |
2577 | ||
2578 | memcg = this_cpu_read(int_active_memcg); | |
2579 | if (unlikely(memcg)) | |
2580 | goto from_memcg; | |
2581 | ||
2582 | return NULL; | |
2583 | ||
2584 | from_memcg: | |
5f79489a | 2585 | objcg = NULL; |
e86828e5 RG |
2586 | for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { |
2587 | /* | |
2588 | * Memcg pointer is protected by scope (see set_active_memcg()) | |
2589 | * and is pinning the corresponding objcg, so objcg can't go | |
2590 | * away and can be used within the scope without any additional | |
2591 | * protection. | |
2592 | */ | |
2593 | objcg = rcu_dereference_check(memcg->objcg, 1); | |
2594 | if (likely(objcg)) | |
2595 | break; | |
e86828e5 RG |
2596 | } |
2597 | ||
2598 | return objcg; | |
2599 | } | |
2600 | ||
074e3e26 | 2601 | struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) |
f4840ccf JW |
2602 | { |
2603 | struct obj_cgroup *objcg; | |
2604 | ||
f7a449f7 | 2605 | if (!memcg_kmem_online()) |
f4840ccf JW |
2606 | return NULL; |
2607 | ||
074e3e26 MWO |
2608 | if (folio_memcg_kmem(folio)) { |
2609 | objcg = __folio_objcg(folio); | |
f4840ccf JW |
2610 | obj_cgroup_get(objcg); |
2611 | } else { | |
2612 | struct mem_cgroup *memcg; | |
bf4f0599 | 2613 | |
f4840ccf | 2614 | rcu_read_lock(); |
074e3e26 | 2615 | memcg = __folio_memcg(folio); |
f4840ccf JW |
2616 | if (memcg) |
2617 | objcg = __get_obj_cgroup_from_memcg(memcg); | |
2618 | else | |
2619 | objcg = NULL; | |
2620 | rcu_read_unlock(); | |
2621 | } | |
bf4f0599 RG |
2622 | return objcg; |
2623 | } | |
2624 | ||
f1286fae MS |
2625 | /* |
2626 | * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg | |
2627 | * @objcg: object cgroup to uncharge | |
2628 | * @nr_pages: number of pages to uncharge | |
2629 | */ | |
e74d2259 MS |
2630 | static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, |
2631 | unsigned int nr_pages) | |
2632 | { | |
2633 | struct mem_cgroup *memcg; | |
2634 | ||
2635 | memcg = get_mem_cgroup_from_objcg(objcg); | |
e74d2259 | 2636 | |
04fbe921 RG |
2637 | mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); |
2638 | memcg1_account_kmem(memcg, -nr_pages); | |
f1286fae | 2639 | refill_stock(memcg, nr_pages); |
e74d2259 | 2640 | |
e74d2259 | 2641 | css_put(&memcg->css); |
e74d2259 MS |
2642 | } |
2643 | ||
f1286fae MS |
2644 | /* |
2645 | * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg | |
2646 | * @objcg: object cgroup to charge | |
45264778 | 2647 | * @gfp: reclaim mode |
92d0510c | 2648 | * @nr_pages: number of pages to charge |
45264778 VD |
2649 | * |
2650 | * Returns 0 on success, an error code on failure. | |
2651 | */ | |
f1286fae MS |
2652 | static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, |
2653 | unsigned int nr_pages) | |
7ae1e1d0 | 2654 | { |
f1286fae | 2655 | struct mem_cgroup *memcg; |
7ae1e1d0 GC |
2656 | int ret; |
2657 | ||
f1286fae MS |
2658 | memcg = get_mem_cgroup_from_objcg(objcg); |
2659 | ||
c5c8b16b | 2660 | ret = try_charge_memcg(memcg, gfp, nr_pages); |
52c29b04 | 2661 | if (ret) |
f1286fae | 2662 | goto out; |
52c29b04 | 2663 | |
04fbe921 RG |
2664 | mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); |
2665 | memcg1_account_kmem(memcg, nr_pages); | |
f1286fae MS |
2666 | out: |
2667 | css_put(&memcg->css); | |
4b13f64d | 2668 | |
f1286fae | 2669 | return ret; |
4b13f64d RG |
2670 | } |
2671 | ||
45264778 | 2672 | /** |
f4b00eab | 2673 | * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup |
45264778 VD |
2674 | * @page: page to charge |
2675 | * @gfp: reclaim mode | |
2676 | * @order: allocation order | |
2677 | * | |
2678 | * Returns 0 on success, an error code on failure. | |
2679 | */ | |
f4b00eab | 2680 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) |
7ae1e1d0 | 2681 | { |
b4e0b68f | 2682 | struct obj_cgroup *objcg; |
fcff7d7e | 2683 | int ret = 0; |
7ae1e1d0 | 2684 | |
e86828e5 | 2685 | objcg = current_obj_cgroup(); |
b4e0b68f MS |
2686 | if (objcg) { |
2687 | ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); | |
4d96ba35 | 2688 | if (!ret) { |
e86828e5 | 2689 | obj_cgroup_get(objcg); |
b4e0b68f | 2690 | page->memcg_data = (unsigned long)objcg | |
18b2db3b | 2691 | MEMCG_DATA_KMEM; |
1a3e1f40 | 2692 | return 0; |
4d96ba35 | 2693 | } |
c4159a75 | 2694 | } |
d05e83a6 | 2695 | return ret; |
7ae1e1d0 | 2696 | } |
49a18eae | 2697 | |
45264778 | 2698 | /** |
f4b00eab | 2699 | * __memcg_kmem_uncharge_page: uncharge a kmem page |
45264778 VD |
2700 | * @page: page to uncharge |
2701 | * @order: allocation order | |
2702 | */ | |
f4b00eab | 2703 | void __memcg_kmem_uncharge_page(struct page *page, int order) |
7ae1e1d0 | 2704 | { |
1b7e4464 | 2705 | struct folio *folio = page_folio(page); |
b4e0b68f | 2706 | struct obj_cgroup *objcg; |
f3ccb2c4 | 2707 | unsigned int nr_pages = 1 << order; |
7ae1e1d0 | 2708 | |
1b7e4464 | 2709 | if (!folio_memcg_kmem(folio)) |
7ae1e1d0 GC |
2710 | return; |
2711 | ||
1b7e4464 | 2712 | objcg = __folio_objcg(folio); |
b4e0b68f | 2713 | obj_cgroup_uncharge_pages(objcg, nr_pages); |
1b7e4464 | 2714 | folio->memcg_data = 0; |
b4e0b68f | 2715 | obj_cgroup_put(objcg); |
60d3fd32 | 2716 | } |
bf4f0599 | 2717 | |
91882c16 | 2718 | static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, |
68ac5b3c WL |
2719 | enum node_stat_item idx, int nr) |
2720 | { | |
fead2b86 | 2721 | struct memcg_stock_pcp *stock; |
56751146 | 2722 | struct obj_cgroup *old = NULL; |
68ac5b3c WL |
2723 | unsigned long flags; |
2724 | int *bytes; | |
2725 | ||
56751146 | 2726 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
fead2b86 MH |
2727 | stock = this_cpu_ptr(&memcg_stock); |
2728 | ||
68ac5b3c WL |
2729 | /* |
2730 | * Save vmstat data in stock and skip vmstat array update unless | |
2731 | * accumulating over a page of vmstat data or when pgdat or idx | |
2732 | * changes. | |
2733 | */ | |
3b8abb32 | 2734 | if (READ_ONCE(stock->cached_objcg) != objcg) { |
56751146 | 2735 | old = drain_obj_stock(stock); |
68ac5b3c WL |
2736 | obj_cgroup_get(objcg); |
2737 | stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) | |
2738 | ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; | |
3b8abb32 | 2739 | WRITE_ONCE(stock->cached_objcg, objcg); |
68ac5b3c WL |
2740 | stock->cached_pgdat = pgdat; |
2741 | } else if (stock->cached_pgdat != pgdat) { | |
2742 | /* Flush the existing cached vmstat data */ | |
7fa0dacb WL |
2743 | struct pglist_data *oldpg = stock->cached_pgdat; |
2744 | ||
68ac5b3c | 2745 | if (stock->nr_slab_reclaimable_b) { |
91882c16 | 2746 | __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B, |
68ac5b3c WL |
2747 | stock->nr_slab_reclaimable_b); |
2748 | stock->nr_slab_reclaimable_b = 0; | |
2749 | } | |
2750 | if (stock->nr_slab_unreclaimable_b) { | |
91882c16 | 2751 | __mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B, |
68ac5b3c WL |
2752 | stock->nr_slab_unreclaimable_b); |
2753 | stock->nr_slab_unreclaimable_b = 0; | |
2754 | } | |
2755 | stock->cached_pgdat = pgdat; | |
2756 | } | |
2757 | ||
2758 | bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b | |
2759 | : &stock->nr_slab_unreclaimable_b; | |
2760 | /* | |
2761 | * Even for large object >= PAGE_SIZE, the vmstat data will still be | |
2762 | * cached locally at least once before pushing it out. | |
2763 | */ | |
2764 | if (!*bytes) { | |
2765 | *bytes = nr; | |
2766 | nr = 0; | |
2767 | } else { | |
2768 | *bytes += nr; | |
2769 | if (abs(*bytes) > PAGE_SIZE) { | |
2770 | nr = *bytes; | |
2771 | *bytes = 0; | |
2772 | } else { | |
2773 | nr = 0; | |
2774 | } | |
2775 | } | |
2776 | if (nr) | |
91882c16 | 2777 | __mod_objcg_mlstate(objcg, pgdat, idx, nr); |
68ac5b3c | 2778 | |
56751146 | 2779 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
91b71e78 | 2780 | obj_cgroup_put(old); |
68ac5b3c WL |
2781 | } |
2782 | ||
bf4f0599 RG |
2783 | static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) |
2784 | { | |
fead2b86 | 2785 | struct memcg_stock_pcp *stock; |
bf4f0599 RG |
2786 | unsigned long flags; |
2787 | bool ret = false; | |
2788 | ||
56751146 | 2789 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
fead2b86 MH |
2790 | |
2791 | stock = this_cpu_ptr(&memcg_stock); | |
3b8abb32 | 2792 | if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { |
bf4f0599 RG |
2793 | stock->nr_bytes -= nr_bytes; |
2794 | ret = true; | |
2795 | } | |
2796 | ||
56751146 | 2797 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
bf4f0599 RG |
2798 | |
2799 | return ret; | |
2800 | } | |
2801 | ||
56751146 | 2802 | static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) |
bf4f0599 | 2803 | { |
3b8abb32 | 2804 | struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); |
bf4f0599 RG |
2805 | |
2806 | if (!old) | |
56751146 | 2807 | return NULL; |
bf4f0599 RG |
2808 | |
2809 | if (stock->nr_bytes) { | |
2810 | unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; | |
2811 | unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); | |
2812 | ||
af9a3b69 JW |
2813 | if (nr_pages) { |
2814 | struct mem_cgroup *memcg; | |
2815 | ||
2816 | memcg = get_mem_cgroup_from_objcg(old); | |
2817 | ||
04fbe921 RG |
2818 | mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); |
2819 | memcg1_account_kmem(memcg, -nr_pages); | |
af9a3b69 JW |
2820 | __refill_stock(memcg, nr_pages); |
2821 | ||
2822 | css_put(&memcg->css); | |
2823 | } | |
bf4f0599 RG |
2824 | |
2825 | /* | |
2826 | * The leftover is flushed to the centralized per-memcg value. | |
2827 | * On the next attempt to refill obj stock it will be moved | |
2828 | * to a per-cpu stock (probably, on an other CPU), see | |
2829 | * refill_obj_stock(). | |
2830 | * | |
2831 | * How often it's flushed is a trade-off between the memory | |
2832 | * limit enforcement accuracy and potential CPU contention, | |
2833 | * so it might be changed in the future. | |
2834 | */ | |
2835 | atomic_add(nr_bytes, &old->nr_charged_bytes); | |
2836 | stock->nr_bytes = 0; | |
2837 | } | |
2838 | ||
68ac5b3c WL |
2839 | /* |
2840 | * Flush the vmstat data in current stock | |
2841 | */ | |
2842 | if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { | |
2843 | if (stock->nr_slab_reclaimable_b) { | |
91882c16 | 2844 | __mod_objcg_mlstate(old, stock->cached_pgdat, |
68ac5b3c WL |
2845 | NR_SLAB_RECLAIMABLE_B, |
2846 | stock->nr_slab_reclaimable_b); | |
2847 | stock->nr_slab_reclaimable_b = 0; | |
2848 | } | |
2849 | if (stock->nr_slab_unreclaimable_b) { | |
91882c16 | 2850 | __mod_objcg_mlstate(old, stock->cached_pgdat, |
68ac5b3c WL |
2851 | NR_SLAB_UNRECLAIMABLE_B, |
2852 | stock->nr_slab_unreclaimable_b); | |
2853 | stock->nr_slab_unreclaimable_b = 0; | |
2854 | } | |
2855 | stock->cached_pgdat = NULL; | |
2856 | } | |
2857 | ||
3b8abb32 | 2858 | WRITE_ONCE(stock->cached_objcg, NULL); |
56751146 SAS |
2859 | /* |
2860 | * The `old' objects needs to be released by the caller via | |
2861 | * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. | |
2862 | */ | |
2863 | return old; | |
bf4f0599 RG |
2864 | } |
2865 | ||
2866 | static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, | |
2867 | struct mem_cgroup *root_memcg) | |
2868 | { | |
3b8abb32 | 2869 | struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); |
bf4f0599 RG |
2870 | struct mem_cgroup *memcg; |
2871 | ||
3b8abb32 RG |
2872 | if (objcg) { |
2873 | memcg = obj_cgroup_memcg(objcg); | |
bf4f0599 RG |
2874 | if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) |
2875 | return true; | |
2876 | } | |
2877 | ||
2878 | return false; | |
2879 | } | |
2880 | ||
5387c904 WL |
2881 | static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, |
2882 | bool allow_uncharge) | |
bf4f0599 | 2883 | { |
fead2b86 | 2884 | struct memcg_stock_pcp *stock; |
56751146 | 2885 | struct obj_cgroup *old = NULL; |
bf4f0599 | 2886 | unsigned long flags; |
5387c904 | 2887 | unsigned int nr_pages = 0; |
bf4f0599 | 2888 | |
56751146 | 2889 | local_lock_irqsave(&memcg_stock.stock_lock, flags); |
fead2b86 MH |
2890 | |
2891 | stock = this_cpu_ptr(&memcg_stock); | |
3b8abb32 | 2892 | if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ |
56751146 | 2893 | old = drain_obj_stock(stock); |
bf4f0599 | 2894 | obj_cgroup_get(objcg); |
3b8abb32 | 2895 | WRITE_ONCE(stock->cached_objcg, objcg); |
5387c904 WL |
2896 | stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) |
2897 | ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; | |
2898 | allow_uncharge = true; /* Allow uncharge when objcg changes */ | |
bf4f0599 RG |
2899 | } |
2900 | stock->nr_bytes += nr_bytes; | |
2901 | ||
5387c904 WL |
2902 | if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { |
2903 | nr_pages = stock->nr_bytes >> PAGE_SHIFT; | |
2904 | stock->nr_bytes &= (PAGE_SIZE - 1); | |
2905 | } | |
bf4f0599 | 2906 | |
56751146 | 2907 | local_unlock_irqrestore(&memcg_stock.stock_lock, flags); |
91b71e78 | 2908 | obj_cgroup_put(old); |
5387c904 WL |
2909 | |
2910 | if (nr_pages) | |
2911 | obj_cgroup_uncharge_pages(objcg, nr_pages); | |
bf4f0599 RG |
2912 | } |
2913 | ||
2914 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) | |
2915 | { | |
bf4f0599 RG |
2916 | unsigned int nr_pages, nr_bytes; |
2917 | int ret; | |
2918 | ||
2919 | if (consume_obj_stock(objcg, size)) | |
2920 | return 0; | |
2921 | ||
2922 | /* | |
5387c904 | 2923 | * In theory, objcg->nr_charged_bytes can have enough |
bf4f0599 | 2924 | * pre-charged bytes to satisfy the allocation. However, |
5387c904 WL |
2925 | * flushing objcg->nr_charged_bytes requires two atomic |
2926 | * operations, and objcg->nr_charged_bytes can't be big. | |
2927 | * The shared objcg->nr_charged_bytes can also become a | |
2928 | * performance bottleneck if all tasks of the same memcg are | |
2929 | * trying to update it. So it's better to ignore it and try | |
2930 | * grab some new pages. The stock's nr_bytes will be flushed to | |
2931 | * objcg->nr_charged_bytes later on when objcg changes. | |
2932 | * | |
2933 | * The stock's nr_bytes may contain enough pre-charged bytes | |
2934 | * to allow one less page from being charged, but we can't rely | |
2935 | * on the pre-charged bytes not being changed outside of | |
2936 | * consume_obj_stock() or refill_obj_stock(). So ignore those | |
2937 | * pre-charged bytes as well when charging pages. To avoid a | |
2938 | * page uncharge right after a page charge, we set the | |
2939 | * allow_uncharge flag to false when calling refill_obj_stock() | |
2940 | * to temporarily allow the pre-charged bytes to exceed the page | |
2941 | * size limit. The maximum reachable value of the pre-charged | |
2942 | * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data | |
2943 | * race. | |
bf4f0599 | 2944 | */ |
bf4f0599 RG |
2945 | nr_pages = size >> PAGE_SHIFT; |
2946 | nr_bytes = size & (PAGE_SIZE - 1); | |
2947 | ||
2948 | if (nr_bytes) | |
2949 | nr_pages += 1; | |
2950 | ||
e74d2259 | 2951 | ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); |
bf4f0599 | 2952 | if (!ret && nr_bytes) |
5387c904 | 2953 | refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); |
bf4f0599 | 2954 | |
bf4f0599 RG |
2955 | return ret; |
2956 | } | |
2957 | ||
2958 | void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) | |
2959 | { | |
5387c904 | 2960 | refill_obj_stock(objcg, size, true); |
bf4f0599 RG |
2961 | } |
2962 | ||
e6100a45 VB |
2963 | static inline size_t obj_full_size(struct kmem_cache *s) |
2964 | { | |
2965 | /* | |
2966 | * For each accounted object there is an extra space which is used | |
2967 | * to store obj_cgroup membership. Charge it too. | |
2968 | */ | |
2969 | return s->size + sizeof(struct obj_cgroup *); | |
2970 | } | |
2971 | ||
2972 | bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, | |
2973 | gfp_t flags, size_t size, void **p) | |
2974 | { | |
2975 | struct obj_cgroup *objcg; | |
2976 | struct slab *slab; | |
2977 | unsigned long off; | |
2978 | size_t i; | |
2979 | ||
2980 | /* | |
2981 | * The obtained objcg pointer is safe to use within the current scope, | |
2982 | * defined by current task or set_active_memcg() pair. | |
2983 | * obj_cgroup_get() is used to get a permanent reference. | |
2984 | */ | |
2985 | objcg = current_obj_cgroup(); | |
2986 | if (!objcg) | |
2987 | return true; | |
2988 | ||
2989 | /* | |
2990 | * slab_alloc_node() avoids the NULL check, so we might be called with a | |
2991 | * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill | |
2992 | * the whole requested size. | |
2993 | * return success as there's nothing to free back | |
2994 | */ | |
2995 | if (unlikely(*p == NULL)) | |
2996 | return true; | |
2997 | ||
2998 | flags &= gfp_allowed_mask; | |
2999 | ||
3000 | if (lru) { | |
3001 | int ret; | |
3002 | struct mem_cgroup *memcg; | |
3003 | ||
3004 | memcg = get_mem_cgroup_from_objcg(objcg); | |
3005 | ret = memcg_list_lru_alloc(memcg, lru, flags); | |
3006 | css_put(&memcg->css); | |
3007 | ||
3008 | if (ret) | |
3009 | return false; | |
3010 | } | |
3011 | ||
3012 | if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s))) | |
3013 | return false; | |
3014 | ||
3015 | for (i = 0; i < size; i++) { | |
3016 | slab = virt_to_slab(p[i]); | |
3017 | ||
3018 | if (!slab_obj_exts(slab) && | |
3019 | alloc_slab_obj_exts(slab, s, flags, false)) { | |
3020 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
3021 | continue; | |
3022 | } | |
3023 | ||
3024 | off = obj_to_index(s, slab, p[i]); | |
3025 | obj_cgroup_get(objcg); | |
3026 | slab_obj_exts(slab)[off].objcg = objcg; | |
3027 | mod_objcg_state(objcg, slab_pgdat(slab), | |
3028 | cache_vmstat_idx(s), obj_full_size(s)); | |
3029 | } | |
3030 | ||
3031 | return true; | |
3032 | } | |
3033 | ||
3034 | void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, | |
3035 | void **p, int objects, struct slabobj_ext *obj_exts) | |
3036 | { | |
3037 | for (int i = 0; i < objects; i++) { | |
3038 | struct obj_cgroup *objcg; | |
3039 | unsigned int off; | |
3040 | ||
3041 | off = obj_to_index(s, slab, p[i]); | |
3042 | objcg = obj_exts[off].objcg; | |
3043 | if (!objcg) | |
3044 | continue; | |
3045 | ||
3046 | obj_exts[off].objcg = NULL; | |
3047 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
3048 | mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), | |
3049 | -obj_full_size(s)); | |
3050 | obj_cgroup_put(objcg); | |
3051 | } | |
3052 | } | |
7ae1e1d0 | 3053 | |
ca3e0214 | 3054 | /* |
ffc3c8a6 | 3055 | * Because folio_memcg(head) is not set on tails, set it now. |
ca3e0214 | 3056 | */ |
b8791381 | 3057 | void split_page_memcg(struct page *head, int old_order, int new_order) |
ca3e0214 | 3058 | { |
1b7e4464 MWO |
3059 | struct folio *folio = page_folio(head); |
3060 | struct mem_cgroup *memcg = folio_memcg(folio); | |
e94c8a9c | 3061 | int i; |
b8791381 ZY |
3062 | unsigned int old_nr = 1 << old_order; |
3063 | unsigned int new_nr = 1 << new_order; | |
ca3e0214 | 3064 | |
be6c8982 | 3065 | if (mem_cgroup_disabled() || !memcg) |
3d37c4a9 | 3066 | return; |
b070e65c | 3067 | |
b8791381 | 3068 | for (i = new_nr; i < old_nr; i += new_nr) |
1b7e4464 | 3069 | folio_page(folio, i)->memcg_data = folio->memcg_data; |
b4e0b68f | 3070 | |
1b7e4464 | 3071 | if (folio_memcg_kmem(folio)) |
b8791381 | 3072 | obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); |
b4e0b68f | 3073 | else |
b8791381 | 3074 | css_get_many(&memcg->css, old_nr / new_nr - 1); |
ca3e0214 | 3075 | } |
ca3e0214 | 3076 | |
66d60c42 | 3077 | unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) |
ce00a967 | 3078 | { |
42a30035 | 3079 | unsigned long val; |
ce00a967 | 3080 | |
3e32cb2e | 3081 | if (mem_cgroup_is_root(memcg)) { |
a2174e95 | 3082 | /* |
f82a7a86 YA |
3083 | * Approximate root's usage from global state. This isn't |
3084 | * perfect, but the root usage was always an approximation. | |
a2174e95 | 3085 | */ |
f82a7a86 YA |
3086 | val = global_node_page_state(NR_FILE_PAGES) + |
3087 | global_node_page_state(NR_ANON_MAPPED); | |
42a30035 | 3088 | if (swap) |
f82a7a86 | 3089 | val += total_swap_pages - get_nr_swap_pages(); |
3e32cb2e | 3090 | } else { |
ce00a967 | 3091 | if (!swap) |
3e32cb2e | 3092 | val = page_counter_read(&memcg->memory); |
ce00a967 | 3093 | else |
3e32cb2e | 3094 | val = page_counter_read(&memcg->memsw); |
ce00a967 | 3095 | } |
c12176d3 | 3096 | return val; |
ce00a967 JW |
3097 | } |
3098 | ||
567e9ab2 | 3099 | static int memcg_online_kmem(struct mem_cgroup *memcg) |
d6441637 | 3100 | { |
bf4f0599 | 3101 | struct obj_cgroup *objcg; |
d6441637 | 3102 | |
9c94bef9 | 3103 | if (mem_cgroup_kmem_disabled()) |
b313aeee VD |
3104 | return 0; |
3105 | ||
da0efe30 MS |
3106 | if (unlikely(mem_cgroup_is_root(memcg))) |
3107 | return 0; | |
d6441637 | 3108 | |
bf4f0599 | 3109 | objcg = obj_cgroup_alloc(); |
f9c69d63 | 3110 | if (!objcg) |
bf4f0599 | 3111 | return -ENOMEM; |
f9c69d63 | 3112 | |
bf4f0599 RG |
3113 | objcg->memcg = memcg; |
3114 | rcu_assign_pointer(memcg->objcg, objcg); | |
675d6c9b RG |
3115 | obj_cgroup_get(objcg); |
3116 | memcg->orig_objcg = objcg; | |
bf4f0599 | 3117 | |
f7a449f7 | 3118 | static_branch_enable(&memcg_kmem_online_key); |
d648bcc7 | 3119 | |
f9c69d63 | 3120 | memcg->kmemcg_id = memcg->id.id; |
0b8f73e1 JW |
3121 | |
3122 | return 0; | |
d6441637 VD |
3123 | } |
3124 | ||
8e0a8912 JW |
3125 | static void memcg_offline_kmem(struct mem_cgroup *memcg) |
3126 | { | |
64268868 | 3127 | struct mem_cgroup *parent; |
8e0a8912 | 3128 | |
9c94bef9 | 3129 | if (mem_cgroup_kmem_disabled()) |
da0efe30 MS |
3130 | return; |
3131 | ||
3132 | if (unlikely(mem_cgroup_is_root(memcg))) | |
8e0a8912 | 3133 | return; |
9855609b | 3134 | |
8e0a8912 JW |
3135 | parent = parent_mem_cgroup(memcg); |
3136 | if (!parent) | |
3137 | parent = root_mem_cgroup; | |
3138 | ||
bf4f0599 | 3139 | memcg_reparent_objcgs(memcg, parent); |
fb2f2b0a | 3140 | |
8e0a8912 | 3141 | /* |
64268868 MS |
3142 | * After we have finished memcg_reparent_objcgs(), all list_lrus |
3143 | * corresponding to this cgroup are guaranteed to remain empty. | |
3144 | * The ordering is imposed by list_lru_node->lock taken by | |
1f391eb2 | 3145 | * memcg_reparent_list_lrus(). |
8e0a8912 | 3146 | */ |
1f391eb2 | 3147 | memcg_reparent_list_lrus(memcg, parent); |
8e0a8912 | 3148 | } |
127424c8 | 3149 | |
52ebea74 TH |
3150 | #ifdef CONFIG_CGROUP_WRITEBACK |
3151 | ||
3a8e9ac8 TH |
3152 | #include <trace/events/writeback.h> |
3153 | ||
841710aa TH |
3154 | static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) |
3155 | { | |
3156 | return wb_domain_init(&memcg->cgwb_domain, gfp); | |
3157 | } | |
3158 | ||
3159 | static void memcg_wb_domain_exit(struct mem_cgroup *memcg) | |
3160 | { | |
3161 | wb_domain_exit(&memcg->cgwb_domain); | |
3162 | } | |
3163 | ||
2529bb3a TH |
3164 | static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) |
3165 | { | |
3166 | wb_domain_size_changed(&memcg->cgwb_domain); | |
3167 | } | |
3168 | ||
841710aa TH |
3169 | struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) |
3170 | { | |
3171 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); | |
3172 | ||
3173 | if (!memcg->css.parent) | |
3174 | return NULL; | |
3175 | ||
3176 | return &memcg->cgwb_domain; | |
3177 | } | |
3178 | ||
c2aa723a TH |
3179 | /** |
3180 | * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg | |
3181 | * @wb: bdi_writeback in question | |
c5edf9cd TH |
3182 | * @pfilepages: out parameter for number of file pages |
3183 | * @pheadroom: out parameter for number of allocatable pages according to memcg | |
c2aa723a TH |
3184 | * @pdirty: out parameter for number of dirty pages |
3185 | * @pwriteback: out parameter for number of pages under writeback | |
3186 | * | |
c5edf9cd TH |
3187 | * Determine the numbers of file, headroom, dirty, and writeback pages in |
3188 | * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom | |
3189 | * is a bit more involved. | |
c2aa723a | 3190 | * |
c5edf9cd TH |
3191 | * A memcg's headroom is "min(max, high) - used". In the hierarchy, the |
3192 | * headroom is calculated as the lowest headroom of itself and the | |
3193 | * ancestors. Note that this doesn't consider the actual amount of | |
3194 | * available memory in the system. The caller should further cap | |
3195 | * *@pheadroom accordingly. | |
c2aa723a | 3196 | */ |
c5edf9cd TH |
3197 | void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, |
3198 | unsigned long *pheadroom, unsigned long *pdirty, | |
3199 | unsigned long *pwriteback) | |
c2aa723a TH |
3200 | { |
3201 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); | |
3202 | struct mem_cgroup *parent; | |
c2aa723a | 3203 | |
d9b3ce87 | 3204 | mem_cgroup_flush_stats_ratelimited(memcg); |
c2aa723a | 3205 | |
2d146aa3 JW |
3206 | *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); |
3207 | *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); | |
3208 | *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + | |
3209 | memcg_page_state(memcg, NR_ACTIVE_FILE); | |
c2aa723a | 3210 | |
2d146aa3 | 3211 | *pheadroom = PAGE_COUNTER_MAX; |
c2aa723a | 3212 | while ((parent = parent_mem_cgroup(memcg))) { |
15b42562 | 3213 | unsigned long ceiling = min(READ_ONCE(memcg->memory.max), |
d1663a90 | 3214 | READ_ONCE(memcg->memory.high)); |
c2aa723a TH |
3215 | unsigned long used = page_counter_read(&memcg->memory); |
3216 | ||
c5edf9cd | 3217 | *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); |
c2aa723a TH |
3218 | memcg = parent; |
3219 | } | |
c2aa723a TH |
3220 | } |
3221 | ||
97b27821 TH |
3222 | /* |
3223 | * Foreign dirty flushing | |
3224 | * | |
3225 | * There's an inherent mismatch between memcg and writeback. The former | |
f0953a1b | 3226 | * tracks ownership per-page while the latter per-inode. This was a |
97b27821 TH |
3227 | * deliberate design decision because honoring per-page ownership in the |
3228 | * writeback path is complicated, may lead to higher CPU and IO overheads | |
3229 | * and deemed unnecessary given that write-sharing an inode across | |
3230 | * different cgroups isn't a common use-case. | |
3231 | * | |
3232 | * Combined with inode majority-writer ownership switching, this works well | |
3233 | * enough in most cases but there are some pathological cases. For | |
3234 | * example, let's say there are two cgroups A and B which keep writing to | |
3235 | * different but confined parts of the same inode. B owns the inode and | |
3236 | * A's memory is limited far below B's. A's dirty ratio can rise enough to | |
3237 | * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid | |
3238 | * triggering background writeback. A will be slowed down without a way to | |
3239 | * make writeback of the dirty pages happen. | |
3240 | * | |
f0953a1b | 3241 | * Conditions like the above can lead to a cgroup getting repeatedly and |
97b27821 | 3242 | * severely throttled after making some progress after each |
f0953a1b | 3243 | * dirty_expire_interval while the underlying IO device is almost |
97b27821 TH |
3244 | * completely idle. |
3245 | * | |
3246 | * Solving this problem completely requires matching the ownership tracking | |
3247 | * granularities between memcg and writeback in either direction. However, | |
3248 | * the more egregious behaviors can be avoided by simply remembering the | |
3249 | * most recent foreign dirtying events and initiating remote flushes on | |
3250 | * them when local writeback isn't enough to keep the memory clean enough. | |
3251 | * | |
3252 | * The following two functions implement such mechanism. When a foreign | |
3253 | * page - a page whose memcg and writeback ownerships don't match - is | |
3254 | * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning | |
3255 | * bdi_writeback on the page owning memcg. When balance_dirty_pages() | |
3256 | * decides that the memcg needs to sleep due to high dirty ratio, it calls | |
3257 | * mem_cgroup_flush_foreign() which queues writeback on the recorded | |
3258 | * foreign bdi_writebacks which haven't expired. Both the numbers of | |
3259 | * recorded bdi_writebacks and concurrent in-flight foreign writebacks are | |
3260 | * limited to MEMCG_CGWB_FRN_CNT. | |
3261 | * | |
3262 | * The mechanism only remembers IDs and doesn't hold any object references. | |
3263 | * As being wrong occasionally doesn't matter, updates and accesses to the | |
3264 | * records are lockless and racy. | |
3265 | */ | |
9d8053fc | 3266 | void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, |
97b27821 TH |
3267 | struct bdi_writeback *wb) |
3268 | { | |
9d8053fc | 3269 | struct mem_cgroup *memcg = folio_memcg(folio); |
97b27821 TH |
3270 | struct memcg_cgwb_frn *frn; |
3271 | u64 now = get_jiffies_64(); | |
3272 | u64 oldest_at = now; | |
3273 | int oldest = -1; | |
3274 | int i; | |
3275 | ||
9d8053fc | 3276 | trace_track_foreign_dirty(folio, wb); |
3a8e9ac8 | 3277 | |
97b27821 TH |
3278 | /* |
3279 | * Pick the slot to use. If there is already a slot for @wb, keep | |
3280 | * using it. If not replace the oldest one which isn't being | |
3281 | * written out. | |
3282 | */ | |
3283 | for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { | |
3284 | frn = &memcg->cgwb_frn[i]; | |
3285 | if (frn->bdi_id == wb->bdi->id && | |
3286 | frn->memcg_id == wb->memcg_css->id) | |
3287 | break; | |
3288 | if (time_before64(frn->at, oldest_at) && | |
3289 | atomic_read(&frn->done.cnt) == 1) { | |
3290 | oldest = i; | |
3291 | oldest_at = frn->at; | |
3292 | } | |
3293 | } | |
3294 | ||
3295 | if (i < MEMCG_CGWB_FRN_CNT) { | |
3296 | /* | |
3297 | * Re-using an existing one. Update timestamp lazily to | |
3298 | * avoid making the cacheline hot. We want them to be | |
3299 | * reasonably up-to-date and significantly shorter than | |
3300 | * dirty_expire_interval as that's what expires the record. | |
3301 | * Use the shorter of 1s and dirty_expire_interval / 8. | |
3302 | */ | |
3303 | unsigned long update_intv = | |
3304 | min_t(unsigned long, HZ, | |
3305 | msecs_to_jiffies(dirty_expire_interval * 10) / 8); | |
3306 | ||
3307 | if (time_before64(frn->at, now - update_intv)) | |
3308 | frn->at = now; | |
3309 | } else if (oldest >= 0) { | |
3310 | /* replace the oldest free one */ | |
3311 | frn = &memcg->cgwb_frn[oldest]; | |
3312 | frn->bdi_id = wb->bdi->id; | |
3313 | frn->memcg_id = wb->memcg_css->id; | |
3314 | frn->at = now; | |
3315 | } | |
3316 | } | |
3317 | ||
3318 | /* issue foreign writeback flushes for recorded foreign dirtying events */ | |
3319 | void mem_cgroup_flush_foreign(struct bdi_writeback *wb) | |
3320 | { | |
3321 | struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); | |
3322 | unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10); | |
3323 | u64 now = jiffies_64; | |
3324 | int i; | |
3325 | ||
3326 | for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) { | |
3327 | struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; | |
3328 | ||
3329 | /* | |
3330 | * If the record is older than dirty_expire_interval, | |
3331 | * writeback on it has already started. No need to kick it | |
3332 | * off again. Also, don't start a new one if there's | |
3333 | * already one in flight. | |
3334 | */ | |
3335 | if (time_after64(frn->at, now - intv) && | |
3336 | atomic_read(&frn->done.cnt) == 1) { | |
3337 | frn->at = 0; | |
3a8e9ac8 | 3338 | trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); |
7490a2d2 | 3339 | cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, |
97b27821 TH |
3340 | WB_REASON_FOREIGN_FLUSH, |
3341 | &frn->done); | |
3342 | } | |
3343 | } | |
3344 | } | |
3345 | ||
841710aa TH |
3346 | #else /* CONFIG_CGROUP_WRITEBACK */ |
3347 | ||
3348 | static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) | |
3349 | { | |
3350 | return 0; | |
3351 | } | |
3352 | ||
3353 | static void memcg_wb_domain_exit(struct mem_cgroup *memcg) | |
3354 | { | |
3355 | } | |
3356 | ||
2529bb3a TH |
3357 | static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) |
3358 | { | |
3359 | } | |
3360 | ||
52ebea74 TH |
3361 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
3362 | ||
73f576c0 JW |
3363 | /* |
3364 | * Private memory cgroup IDR | |
3365 | * | |
3366 | * Swap-out records and page cache shadow entries need to store memcg | |
3367 | * references in constrained space, so we maintain an ID space that is | |
3368 | * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of | |
3369 | * memory-controlled cgroups to 64k. | |
3370 | * | |
b8f2935f | 3371 | * However, there usually are many references to the offline CSS after |
73f576c0 JW |
3372 | * the cgroup has been destroyed, such as page cache or reclaimable |
3373 | * slab objects, that don't need to hang on to the ID. We want to keep | |
3374 | * those dead CSS from occupying IDs, or we might quickly exhaust the | |
3375 | * relatively small ID space and prevent the creation of new cgroups | |
3376 | * even when there are much fewer than 64k cgroups - possibly none. | |
3377 | * | |
3378 | * Maintain a private 16-bit ID space for memcg, and allow the ID to | |
3379 | * be freed and recycled when it's no longer needed, which is usually | |
3380 | * when the CSS is offlined. | |
3381 | * | |
3382 | * The only exception to that are records of swapped out tmpfs/shmem | |
3383 | * pages that need to be attributed to live ancestors on swapin. But | |
3384 | * those references are manageable from userspace. | |
3385 | */ | |
3386 | ||
60b1e24c | 3387 | #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) |
73f576c0 JW |
3388 | static DEFINE_IDR(mem_cgroup_idr); |
3389 | ||
7e97de0b KT |
3390 | static void mem_cgroup_id_remove(struct mem_cgroup *memcg) |
3391 | { | |
3392 | if (memcg->id.id > 0) { | |
3393 | idr_remove(&mem_cgroup_idr, memcg->id.id); | |
3394 | memcg->id.id = 0; | |
3395 | } | |
3396 | } | |
3397 | ||
e548ad4a RG |
3398 | void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, |
3399 | unsigned int n) | |
73f576c0 | 3400 | { |
1c2d479a | 3401 | refcount_add(n, &memcg->id.ref); |
73f576c0 JW |
3402 | } |
3403 | ||
e548ad4a | 3404 | void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) |
73f576c0 | 3405 | { |
1c2d479a | 3406 | if (refcount_sub_and_test(n, &memcg->id.ref)) { |
7e97de0b | 3407 | mem_cgroup_id_remove(memcg); |
73f576c0 JW |
3408 | |
3409 | /* Memcg ID pins CSS */ | |
3410 | css_put(&memcg->css); | |
3411 | } | |
3412 | } | |
3413 | ||
615d66c3 VD |
3414 | static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) |
3415 | { | |
3416 | mem_cgroup_id_put_many(memcg, 1); | |
3417 | } | |
3418 | ||
73f576c0 JW |
3419 | /** |
3420 | * mem_cgroup_from_id - look up a memcg from a memcg id | |
3421 | * @id: the memcg id to look up | |
3422 | * | |
3423 | * Caller must hold rcu_read_lock(). | |
3424 | */ | |
3425 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |
3426 | { | |
3427 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
3428 | return idr_find(&mem_cgroup_idr, id); | |
3429 | } | |
3430 | ||
c15187a4 RG |
3431 | #ifdef CONFIG_SHRINKER_DEBUG |
3432 | struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) | |
3433 | { | |
3434 | struct cgroup *cgrp; | |
3435 | struct cgroup_subsys_state *css; | |
3436 | struct mem_cgroup *memcg; | |
3437 | ||
3438 | cgrp = cgroup_get_from_id(ino); | |
fa7e439c | 3439 | if (IS_ERR(cgrp)) |
c0f2df49 | 3440 | return ERR_CAST(cgrp); |
c15187a4 RG |
3441 | |
3442 | css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys); | |
3443 | if (css) | |
3444 | memcg = container_of(css, struct mem_cgroup, css); | |
3445 | else | |
3446 | memcg = ERR_PTR(-ENOENT); | |
3447 | ||
3448 | cgroup_put(cgrp); | |
3449 | ||
3450 | return memcg; | |
3451 | } | |
3452 | #endif | |
3453 | ||
a8248bb7 | 3454 | static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) |
6d12e2d8 KH |
3455 | { |
3456 | struct mem_cgroup_per_node *pn; | |
8c9bb398 WY |
3457 | |
3458 | pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node); | |
6d12e2d8 | 3459 | if (!pn) |
a8248bb7 | 3460 | return false; |
1ecaab2b | 3461 | |
aab6103b RG |
3462 | pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), |
3463 | GFP_KERNEL_ACCOUNT, node); | |
70a64b79 SB |
3464 | if (!pn->lruvec_stats) |
3465 | goto fail; | |
3466 | ||
7e1c0d6f SB |
3467 | pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, |
3468 | GFP_KERNEL_ACCOUNT); | |
70a64b79 SB |
3469 | if (!pn->lruvec_stats_percpu) |
3470 | goto fail; | |
00f3ca2c | 3471 | |
ef8f2327 | 3472 | lruvec_init(&pn->lruvec); |
ef8f2327 MG |
3473 | pn->memcg = memcg; |
3474 | ||
54f72fe0 | 3475 | memcg->nodeinfo[node] = pn; |
a8248bb7 | 3476 | return true; |
70a64b79 SB |
3477 | fail: |
3478 | kfree(pn->lruvec_stats); | |
3479 | kfree(pn); | |
a8248bb7 | 3480 | return false; |
6d12e2d8 KH |
3481 | } |
3482 | ||
ef8f2327 | 3483 | static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) |
1ecaab2b | 3484 | { |
00f3ca2c JW |
3485 | struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; |
3486 | ||
4eaf431f MH |
3487 | if (!pn) |
3488 | return; | |
3489 | ||
7e1c0d6f | 3490 | free_percpu(pn->lruvec_stats_percpu); |
70a64b79 | 3491 | kfree(pn->lruvec_stats); |
00f3ca2c | 3492 | kfree(pn); |
1ecaab2b KH |
3493 | } |
3494 | ||
40e952f9 | 3495 | static void __mem_cgroup_free(struct mem_cgroup *memcg) |
59927fb9 | 3496 | { |
c8b2a36f | 3497 | int node; |
59927fb9 | 3498 | |
91b71e78 | 3499 | obj_cgroup_put(memcg->orig_objcg); |
675d6c9b | 3500 | |
c8b2a36f | 3501 | for_each_node(node) |
ef8f2327 | 3502 | free_mem_cgroup_per_node_info(memcg, node); |
410f8e82 | 3503 | kfree(memcg->vmstats); |
871789d4 | 3504 | free_percpu(memcg->vmstats_percpu); |
8ff69e2c | 3505 | kfree(memcg); |
59927fb9 | 3506 | } |
3afe36b1 | 3507 | |
40e952f9 TE |
3508 | static void mem_cgroup_free(struct mem_cgroup *memcg) |
3509 | { | |
ec1c86b2 | 3510 | lru_gen_exit_memcg(memcg); |
40e952f9 TE |
3511 | memcg_wb_domain_exit(memcg); |
3512 | __mem_cgroup_free(memcg); | |
3513 | } | |
3514 | ||
9cee7e8e | 3515 | static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) |
8cdea7c0 | 3516 | { |
9cee7e8e | 3517 | struct memcg_vmstats_percpu *statc, *pstatc; |
d142e3e6 | 3518 | struct mem_cgroup *memcg; |
9cee7e8e | 3519 | int node, cpu; |
97b27821 | 3520 | int __maybe_unused i; |
11d67612 | 3521 | long error = -ENOMEM; |
8cdea7c0 | 3522 | |
06b2c3b0 | 3523 | memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); |
c0ff4b85 | 3524 | if (!memcg) |
11d67612 | 3525 | return ERR_PTR(error); |
0b8f73e1 | 3526 | |
73f576c0 | 3527 | memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, |
be740503 | 3528 | 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL); |
11d67612 YS |
3529 | if (memcg->id.id < 0) { |
3530 | error = memcg->id.id; | |
73f576c0 | 3531 | goto fail; |
11d67612 | 3532 | } |
73f576c0 | 3533 | |
aab6103b RG |
3534 | memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), |
3535 | GFP_KERNEL_ACCOUNT); | |
410f8e82 SB |
3536 | if (!memcg->vmstats) |
3537 | goto fail; | |
3538 | ||
3e38e0aa RG |
3539 | memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, |
3540 | GFP_KERNEL_ACCOUNT); | |
871789d4 | 3541 | if (!memcg->vmstats_percpu) |
0b8f73e1 | 3542 | goto fail; |
78fb7466 | 3543 | |
9cee7e8e YA |
3544 | for_each_possible_cpu(cpu) { |
3545 | if (parent) | |
3546 | pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu); | |
3547 | statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); | |
3548 | statc->parent = parent ? pstatc : NULL; | |
3549 | statc->vmstats = memcg->vmstats; | |
3550 | } | |
3551 | ||
3ed28fa1 | 3552 | for_each_node(node) |
a8248bb7 | 3553 | if (!alloc_mem_cgroup_per_node_info(memcg, node)) |
0b8f73e1 | 3554 | goto fail; |
f64c3f54 | 3555 | |
0b8f73e1 JW |
3556 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) |
3557 | goto fail; | |
28dbc4b6 | 3558 | |
f7e1cb6e | 3559 | INIT_WORK(&memcg->high_work, high_work_func); |
70ddf637 | 3560 | vmpressure_init(&memcg->vmpressure); |
d886f4e4 | 3561 | memcg->socket_pressure = jiffies; |
b5855a26 | 3562 | memcg1_memcg_init(memcg); |
900a38f0 | 3563 | memcg->kmemcg_id = -1; |
bf4f0599 | 3564 | INIT_LIST_HEAD(&memcg->objcg_list); |
52ebea74 TH |
3565 | #ifdef CONFIG_CGROUP_WRITEBACK |
3566 | INIT_LIST_HEAD(&memcg->cgwb_list); | |
97b27821 TH |
3567 | for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) |
3568 | memcg->cgwb_frn[i].done = | |
3569 | __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); | |
87eaceb3 YS |
3570 | #endif |
3571 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
3572 | spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); | |
3573 | INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); | |
3574 | memcg->deferred_split_queue.split_queue_len = 0; | |
52ebea74 | 3575 | #endif |
ec1c86b2 | 3576 | lru_gen_init_memcg(memcg); |
0b8f73e1 JW |
3577 | return memcg; |
3578 | fail: | |
7e97de0b | 3579 | mem_cgroup_id_remove(memcg); |
40e952f9 | 3580 | __mem_cgroup_free(memcg); |
11d67612 | 3581 | return ERR_PTR(error); |
d142e3e6 GC |
3582 | } |
3583 | ||
0b8f73e1 JW |
3584 | static struct cgroup_subsys_state * __ref |
3585 | mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |
d142e3e6 | 3586 | { |
0b8f73e1 | 3587 | struct mem_cgroup *parent = mem_cgroup_from_css(parent_css); |
b87d8cef | 3588 | struct mem_cgroup *memcg, *old_memcg; |
d142e3e6 | 3589 | |
b87d8cef | 3590 | old_memcg = set_active_memcg(parent); |
9cee7e8e | 3591 | memcg = mem_cgroup_alloc(parent); |
b87d8cef | 3592 | set_active_memcg(old_memcg); |
11d67612 YS |
3593 | if (IS_ERR(memcg)) |
3594 | return ERR_CAST(memcg); | |
d142e3e6 | 3595 | |
d1663a90 | 3596 | page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); |
d12f6d22 | 3597 | memcg1_soft_limit_reset(memcg); |
3a3b7fec | 3598 | #ifdef CONFIG_ZSWAP |
f4840ccf | 3599 | memcg->zswap_max = PAGE_COUNTER_MAX; |
501a06fe NP |
3600 | WRITE_ONCE(memcg->zswap_writeback, |
3601 | !parent || READ_ONCE(parent->zswap_writeback)); | |
f4840ccf | 3602 | #endif |
4b82ab4f | 3603 | page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); |
0b8f73e1 | 3604 | if (parent) { |
82b3aa26 | 3605 | WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); |
bef8620c | 3606 | |
3e32cb2e | 3607 | page_counter_init(&memcg->memory, &parent->memory); |
37e84351 | 3608 | page_counter_init(&memcg->swap, &parent->swap); |
05dfec12 RG |
3609 | #ifdef CONFIG_MEMCG_V1 |
3610 | WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); | |
3e32cb2e | 3611 | page_counter_init(&memcg->kmem, &parent->kmem); |
0db15298 | 3612 | page_counter_init(&memcg->tcpmem, &parent->tcpmem); |
05dfec12 | 3613 | #endif |
18f59ea7 | 3614 | } else { |
ff48c71c | 3615 | init_memcg_stats(); |
8278f1c7 | 3616 | init_memcg_events(); |
bef8620c RG |
3617 | page_counter_init(&memcg->memory, NULL); |
3618 | page_counter_init(&memcg->swap, NULL); | |
05dfec12 | 3619 | #ifdef CONFIG_MEMCG_V1 |
bef8620c RG |
3620 | page_counter_init(&memcg->kmem, NULL); |
3621 | page_counter_init(&memcg->tcpmem, NULL); | |
05dfec12 | 3622 | #endif |
0b8f73e1 JW |
3623 | root_mem_cgroup = memcg; |
3624 | return &memcg->css; | |
3625 | } | |
3626 | ||
f7e1cb6e | 3627 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) |
ef12947c | 3628 | static_branch_inc(&memcg_sockets_enabled_key); |
f7e1cb6e | 3629 | |
b6c1a8af YS |
3630 | if (!cgroup_memory_nobpf) |
3631 | static_branch_inc(&memcg_bpf_enabled_key); | |
b6c1a8af | 3632 | |
0b8f73e1 | 3633 | return &memcg->css; |
0b8f73e1 JW |
3634 | } |
3635 | ||
73f576c0 | 3636 | static int mem_cgroup_css_online(struct cgroup_subsys_state *css) |
0b8f73e1 | 3637 | { |
58fa2a55 VD |
3638 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
3639 | ||
da0efe30 MS |
3640 | if (memcg_online_kmem(memcg)) |
3641 | goto remove_id; | |
3642 | ||
0a4465d3 | 3643 | /* |
e4262c4f | 3644 | * A memcg must be visible for expand_shrinker_info() |
0a4465d3 KT |
3645 | * by the time the maps are allocated. So, we allocate maps |
3646 | * here, when for_each_mem_cgroup() can't skip it. | |
3647 | */ | |
da0efe30 MS |
3648 | if (alloc_shrinker_info(memcg)) |
3649 | goto offline_kmem; | |
0a4465d3 | 3650 | |
13ef7424 | 3651 | if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) |
aa48e47e | 3652 | queue_delayed_work(system_unbound_wq, &stats_flush_dwork, |
396faf88 | 3653 | FLUSH_TIME); |
e4dde56c | 3654 | lru_gen_online_memcg(memcg); |
6f0df8e1 JW |
3655 | |
3656 | /* Online state pins memcg ID, memcg ID pins CSS */ | |
3657 | refcount_set(&memcg->id.ref, 1); | |
3658 | css_get(css); | |
3659 | ||
3660 | /* | |
3661 | * Ensure mem_cgroup_from_id() works once we're fully online. | |
3662 | * | |
3663 | * We could do this earlier and require callers to filter with | |
3664 | * css_tryget_online(). But right now there are no users that | |
3665 | * need earlier access, and the workingset code relies on the | |
3666 | * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So | |
3667 | * publish it here at the end of onlining. This matches the | |
3668 | * regular ID destruction during offlining. | |
3669 | */ | |
3670 | idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); | |
3671 | ||
2f7dd7a4 | 3672 | return 0; |
da0efe30 MS |
3673 | offline_kmem: |
3674 | memcg_offline_kmem(memcg); | |
3675 | remove_id: | |
3676 | mem_cgroup_id_remove(memcg); | |
3677 | return -ENOMEM; | |
8cdea7c0 BS |
3678 | } |
3679 | ||
eb95419b | 3680 | static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) |
df878fb0 | 3681 | { |
eb95419b | 3682 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
79bd9814 | 3683 | |
66d60c42 | 3684 | memcg1_css_offline(memcg); |
ec64f515 | 3685 | |
bf8d5d52 | 3686 | page_counter_set_min(&memcg->memory, 0); |
23067153 | 3687 | page_counter_set_low(&memcg->memory, 0); |
63677c74 | 3688 | |
a65b0e76 DC |
3689 | zswap_memcg_offline_cleanup(memcg); |
3690 | ||
567e9ab2 | 3691 | memcg_offline_kmem(memcg); |
a178015c | 3692 | reparent_shrinker_deferred(memcg); |
52ebea74 | 3693 | wb_memcg_offline(memcg); |
e4dde56c | 3694 | lru_gen_offline_memcg(memcg); |
73f576c0 | 3695 | |
591edfb1 RG |
3696 | drain_all_stock(memcg); |
3697 | ||
73f576c0 | 3698 | mem_cgroup_id_put(memcg); |
df878fb0 KH |
3699 | } |
3700 | ||
6df38689 VD |
3701 | static void mem_cgroup_css_released(struct cgroup_subsys_state *css) |
3702 | { | |
3703 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
3704 | ||
3705 | invalidate_reclaim_iterators(memcg); | |
e4dde56c | 3706 | lru_gen_release_memcg(memcg); |
6df38689 VD |
3707 | } |
3708 | ||
eb95419b | 3709 | static void mem_cgroup_css_free(struct cgroup_subsys_state *css) |
8cdea7c0 | 3710 | { |
eb95419b | 3711 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
97b27821 | 3712 | int __maybe_unused i; |
c268e994 | 3713 | |
97b27821 TH |
3714 | #ifdef CONFIG_CGROUP_WRITEBACK |
3715 | for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) | |
3716 | wb_wait_for_completion(&memcg->cgwb_frn[i].done); | |
3717 | #endif | |
f7e1cb6e | 3718 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) |
ef12947c | 3719 | static_branch_dec(&memcg_sockets_enabled_key); |
127424c8 | 3720 | |
773e9ae7 | 3721 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) |
d55f90bf | 3722 | static_branch_dec(&memcg_sockets_enabled_key); |
3893e302 | 3723 | |
b6c1a8af YS |
3724 | if (!cgroup_memory_nobpf) |
3725 | static_branch_dec(&memcg_bpf_enabled_key); | |
b6c1a8af | 3726 | |
0b8f73e1 JW |
3727 | vmpressure_cleanup(&memcg->vmpressure); |
3728 | cancel_work_sync(&memcg->high_work); | |
87024f58 | 3729 | memcg1_remove_from_trees(memcg); |
e4262c4f | 3730 | free_shrinker_info(memcg); |
0b8f73e1 | 3731 | mem_cgroup_free(memcg); |
8cdea7c0 BS |
3732 | } |
3733 | ||
1ced953b TH |
3734 | /** |
3735 | * mem_cgroup_css_reset - reset the states of a mem_cgroup | |
3736 | * @css: the target css | |
3737 | * | |
3738 | * Reset the states of the mem_cgroup associated with @css. This is | |
3739 | * invoked when the userland requests disabling on the default hierarchy | |
3740 | * but the memcg is pinned through dependency. The memcg should stop | |
3741 | * applying policies and should revert to the vanilla state as it may be | |
3742 | * made visible again. | |
3743 | * | |
3744 | * The current implementation only resets the essential configurations. | |
3745 | * This needs to be expanded to cover all the visible parts. | |
3746 | */ | |
3747 | static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) | |
3748 | { | |
3749 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
3750 | ||
bbec2e15 RG |
3751 | page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); |
3752 | page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); | |
05dfec12 | 3753 | #ifdef CONFIG_MEMCG_V1 |
bbec2e15 RG |
3754 | page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); |
3755 | page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); | |
05dfec12 | 3756 | #endif |
bf8d5d52 | 3757 | page_counter_set_min(&memcg->memory, 0); |
23067153 | 3758 | page_counter_set_low(&memcg->memory, 0); |
d1663a90 | 3759 | page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); |
d12f6d22 | 3760 | memcg1_soft_limit_reset(memcg); |
4b82ab4f | 3761 | page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); |
2529bb3a | 3762 | memcg_wb_domain_size_changed(memcg); |
1ced953b TH |
3763 | } |
3764 | ||
2d146aa3 JW |
3765 | static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) |
3766 | { | |
3767 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
3768 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | |
3769 | struct memcg_vmstats_percpu *statc; | |
f82e6bf9 | 3770 | long delta, delta_cpu, v; |
7e1c0d6f | 3771 | int i, nid; |
2d146aa3 JW |
3772 | |
3773 | statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); | |
3774 | ||
ff48c71c | 3775 | for (i = 0; i < MEMCG_VMSTAT_SIZE; i++) { |
2d146aa3 JW |
3776 | /* |
3777 | * Collect the aggregated propagation counts of groups | |
3778 | * below us. We're in a per-cpu loop here and this is | |
3779 | * a global counter, so the first cycle will get them. | |
3780 | */ | |
410f8e82 | 3781 | delta = memcg->vmstats->state_pending[i]; |
2d146aa3 | 3782 | if (delta) |
410f8e82 | 3783 | memcg->vmstats->state_pending[i] = 0; |
2d146aa3 JW |
3784 | |
3785 | /* Add CPU changes on this level since the last flush */ | |
f82e6bf9 | 3786 | delta_cpu = 0; |
2d146aa3 JW |
3787 | v = READ_ONCE(statc->state[i]); |
3788 | if (v != statc->state_prev[i]) { | |
f82e6bf9 YA |
3789 | delta_cpu = v - statc->state_prev[i]; |
3790 | delta += delta_cpu; | |
2d146aa3 JW |
3791 | statc->state_prev[i] = v; |
3792 | } | |
3793 | ||
2d146aa3 | 3794 | /* Aggregate counts on this level and propagate upwards */ |
f82e6bf9 YA |
3795 | if (delta_cpu) |
3796 | memcg->vmstats->state_local[i] += delta_cpu; | |
3797 | ||
3798 | if (delta) { | |
3799 | memcg->vmstats->state[i] += delta; | |
3800 | if (parent) | |
3801 | parent->vmstats->state_pending[i] += delta; | |
3802 | } | |
2d146aa3 JW |
3803 | } |
3804 | ||
8278f1c7 | 3805 | for (i = 0; i < NR_MEMCG_EVENTS; i++) { |
410f8e82 | 3806 | delta = memcg->vmstats->events_pending[i]; |
2d146aa3 | 3807 | if (delta) |
410f8e82 | 3808 | memcg->vmstats->events_pending[i] = 0; |
2d146aa3 | 3809 | |
f82e6bf9 | 3810 | delta_cpu = 0; |
2d146aa3 JW |
3811 | v = READ_ONCE(statc->events[i]); |
3812 | if (v != statc->events_prev[i]) { | |
f82e6bf9 YA |
3813 | delta_cpu = v - statc->events_prev[i]; |
3814 | delta += delta_cpu; | |
2d146aa3 JW |
3815 | statc->events_prev[i] = v; |
3816 | } | |
3817 | ||
f82e6bf9 YA |
3818 | if (delta_cpu) |
3819 | memcg->vmstats->events_local[i] += delta_cpu; | |
2d146aa3 | 3820 | |
f82e6bf9 YA |
3821 | if (delta) { |
3822 | memcg->vmstats->events[i] += delta; | |
3823 | if (parent) | |
3824 | parent->vmstats->events_pending[i] += delta; | |
3825 | } | |
2d146aa3 | 3826 | } |
7e1c0d6f SB |
3827 | |
3828 | for_each_node_state(nid, N_MEMORY) { | |
3829 | struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; | |
70a64b79 SB |
3830 | struct lruvec_stats *lstats = pn->lruvec_stats; |
3831 | struct lruvec_stats *plstats = NULL; | |
7e1c0d6f SB |
3832 | struct lruvec_stats_percpu *lstatc; |
3833 | ||
3834 | if (parent) | |
70a64b79 | 3835 | plstats = parent->nodeinfo[nid]->lruvec_stats; |
7e1c0d6f SB |
3836 | |
3837 | lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); | |
3838 | ||
ff48c71c | 3839 | for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; i++) { |
70a64b79 | 3840 | delta = lstats->state_pending[i]; |
7e1c0d6f | 3841 | if (delta) |
70a64b79 | 3842 | lstats->state_pending[i] = 0; |
7e1c0d6f | 3843 | |
f82e6bf9 | 3844 | delta_cpu = 0; |
7e1c0d6f SB |
3845 | v = READ_ONCE(lstatc->state[i]); |
3846 | if (v != lstatc->state_prev[i]) { | |
f82e6bf9 YA |
3847 | delta_cpu = v - lstatc->state_prev[i]; |
3848 | delta += delta_cpu; | |
7e1c0d6f SB |
3849 | lstatc->state_prev[i] = v; |
3850 | } | |
3851 | ||
f82e6bf9 | 3852 | if (delta_cpu) |
70a64b79 | 3853 | lstats->state_local[i] += delta_cpu; |
7e1c0d6f | 3854 | |
f82e6bf9 | 3855 | if (delta) { |
70a64b79 SB |
3856 | lstats->state[i] += delta; |
3857 | if (plstats) | |
3858 | plstats->state_pending[i] += delta; | |
f82e6bf9 | 3859 | } |
7e1c0d6f SB |
3860 | } |
3861 | } | |
78ec6f9d | 3862 | WRITE_ONCE(statc->stats_updates, 0); |
8d59d221 YA |
3863 | /* We are in a per-cpu loop here, only do the atomic write once */ |
3864 | if (atomic64_read(&memcg->vmstats->stats_updates)) | |
3865 | atomic64_set(&memcg->vmstats->stats_updates, 0); | |
2d146aa3 JW |
3866 | } |
3867 | ||
1aacbd35 RG |
3868 | static void mem_cgroup_fork(struct task_struct *task) |
3869 | { | |
3870 | /* | |
3871 | * Set the update flag to cause task->objcg to be initialized lazily | |
3872 | * on the first allocation. It can be done without any synchronization | |
3873 | * because it's always performed on the current task, so does | |
3874 | * current_objcg_update(). | |
3875 | */ | |
3876 | task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; | |
3877 | } | |
3878 | ||
3879 | static void mem_cgroup_exit(struct task_struct *task) | |
3880 | { | |
3881 | struct obj_cgroup *objcg = task->objcg; | |
3882 | ||
3883 | objcg = (struct obj_cgroup *) | |
3884 | ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); | |
91b71e78 | 3885 | obj_cgroup_put(objcg); |
1aacbd35 RG |
3886 | |
3887 | /* | |
3888 | * Some kernel allocations can happen after this point, | |
3889 | * but let's ignore them. It can be done without any synchronization | |
3890 | * because it's always performed on the current task, so does | |
3891 | * current_objcg_update(). | |
3892 | */ | |
3893 | task->objcg = NULL; | |
3894 | } | |
1aacbd35 | 3895 | |
bd74fdae | 3896 | #ifdef CONFIG_LRU_GEN |
1aacbd35 | 3897 | static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) |
bd74fdae YZ |
3898 | { |
3899 | struct task_struct *task; | |
3900 | struct cgroup_subsys_state *css; | |
3901 | ||
3902 | /* find the first leader if there is any */ | |
3903 | cgroup_taskset_for_each_leader(task, css, tset) | |
3904 | break; | |
3905 | ||
3906 | if (!task) | |
3907 | return; | |
3908 | ||
3909 | task_lock(task); | |
3910 | if (task->mm && READ_ONCE(task->mm->owner) == task) | |
3911 | lru_gen_migrate_mm(task->mm); | |
3912 | task_unlock(task); | |
3913 | } | |
3914 | #else | |
1aacbd35 RG |
3915 | static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {} |
3916 | #endif /* CONFIG_LRU_GEN */ | |
3917 | ||
1aacbd35 RG |
3918 | static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) |
3919 | { | |
3920 | struct task_struct *task; | |
3921 | struct cgroup_subsys_state *css; | |
3922 | ||
3923 | cgroup_taskset_for_each(task, css, tset) { | |
3924 | /* atomically set the update bit */ | |
3925 | set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); | |
3926 | } | |
3927 | } | |
1aacbd35 | 3928 | |
bd74fdae YZ |
3929 | static void mem_cgroup_attach(struct cgroup_taskset *tset) |
3930 | { | |
1aacbd35 RG |
3931 | mem_cgroup_lru_gen_attach(tset); |
3932 | mem_cgroup_kmem_attach(tset); | |
bd74fdae | 3933 | } |
bd74fdae | 3934 | |
677dc973 CD |
3935 | static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) |
3936 | { | |
3937 | if (value == PAGE_COUNTER_MAX) | |
3938 | seq_puts(m, "max\n"); | |
3939 | else | |
3940 | seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE); | |
3941 | ||
3942 | return 0; | |
3943 | } | |
3944 | ||
241994ed JW |
3945 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
3946 | struct cftype *cft) | |
3947 | { | |
f5fc3c5d JW |
3948 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
3949 | ||
3950 | return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; | |
241994ed JW |
3951 | } |
3952 | ||
8e20d4b3 GR |
3953 | static u64 memory_peak_read(struct cgroup_subsys_state *css, |
3954 | struct cftype *cft) | |
3955 | { | |
3956 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
3957 | ||
3958 | return (u64)memcg->memory.watermark * PAGE_SIZE; | |
3959 | } | |
3960 | ||
bf8d5d52 RG |
3961 | static int memory_min_show(struct seq_file *m, void *v) |
3962 | { | |
677dc973 CD |
3963 | return seq_puts_memcg_tunable(m, |
3964 | READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); | |
bf8d5d52 RG |
3965 | } |
3966 | ||
3967 | static ssize_t memory_min_write(struct kernfs_open_file *of, | |
3968 | char *buf, size_t nbytes, loff_t off) | |
3969 | { | |
3970 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
3971 | unsigned long min; | |
3972 | int err; | |
3973 | ||
3974 | buf = strstrip(buf); | |
3975 | err = page_counter_memparse(buf, "max", &min); | |
3976 | if (err) | |
3977 | return err; | |
3978 | ||
3979 | page_counter_set_min(&memcg->memory, min); | |
3980 | ||
3981 | return nbytes; | |
3982 | } | |
3983 | ||
241994ed JW |
3984 | static int memory_low_show(struct seq_file *m, void *v) |
3985 | { | |
677dc973 CD |
3986 | return seq_puts_memcg_tunable(m, |
3987 | READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); | |
241994ed JW |
3988 | } |
3989 | ||
3990 | static ssize_t memory_low_write(struct kernfs_open_file *of, | |
3991 | char *buf, size_t nbytes, loff_t off) | |
3992 | { | |
3993 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
3994 | unsigned long low; | |
3995 | int err; | |
3996 | ||
3997 | buf = strstrip(buf); | |
d2973697 | 3998 | err = page_counter_memparse(buf, "max", &low); |
241994ed JW |
3999 | if (err) |
4000 | return err; | |
4001 | ||
23067153 | 4002 | page_counter_set_low(&memcg->memory, low); |
241994ed JW |
4003 | |
4004 | return nbytes; | |
4005 | } | |
4006 | ||
4007 | static int memory_high_show(struct seq_file *m, void *v) | |
4008 | { | |
d1663a90 JK |
4009 | return seq_puts_memcg_tunable(m, |
4010 | READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); | |
241994ed JW |
4011 | } |
4012 | ||
4013 | static ssize_t memory_high_write(struct kernfs_open_file *of, | |
4014 | char *buf, size_t nbytes, loff_t off) | |
4015 | { | |
4016 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
d977aa93 | 4017 | unsigned int nr_retries = MAX_RECLAIM_RETRIES; |
8c8c383c | 4018 | bool drained = false; |
241994ed JW |
4019 | unsigned long high; |
4020 | int err; | |
4021 | ||
4022 | buf = strstrip(buf); | |
d2973697 | 4023 | err = page_counter_memparse(buf, "max", &high); |
241994ed JW |
4024 | if (err) |
4025 | return err; | |
4026 | ||
e82553c1 JW |
4027 | page_counter_set_high(&memcg->memory, high); |
4028 | ||
8c8c383c JW |
4029 | for (;;) { |
4030 | unsigned long nr_pages = page_counter_read(&memcg->memory); | |
4031 | unsigned long reclaimed; | |
4032 | ||
4033 | if (nr_pages <= high) | |
4034 | break; | |
4035 | ||
4036 | if (signal_pending(current)) | |
4037 | break; | |
4038 | ||
4039 | if (!drained) { | |
4040 | drain_all_stock(memcg); | |
4041 | drained = true; | |
4042 | continue; | |
4043 | } | |
4044 | ||
4045 | reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, | |
68cd9050 | 4046 | GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); |
8c8c383c JW |
4047 | |
4048 | if (!reclaimed && !nr_retries--) | |
4049 | break; | |
4050 | } | |
588083bb | 4051 | |
19ce33ac | 4052 | memcg_wb_domain_size_changed(memcg); |
241994ed JW |
4053 | return nbytes; |
4054 | } | |
4055 | ||
4056 | static int memory_max_show(struct seq_file *m, void *v) | |
4057 | { | |
677dc973 CD |
4058 | return seq_puts_memcg_tunable(m, |
4059 | READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); | |
241994ed JW |
4060 | } |
4061 | ||
4062 | static ssize_t memory_max_write(struct kernfs_open_file *of, | |
4063 | char *buf, size_t nbytes, loff_t off) | |
4064 | { | |
4065 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
d977aa93 | 4066 | unsigned int nr_reclaims = MAX_RECLAIM_RETRIES; |
b6e6edcf | 4067 | bool drained = false; |
241994ed JW |
4068 | unsigned long max; |
4069 | int err; | |
4070 | ||
4071 | buf = strstrip(buf); | |
d2973697 | 4072 | err = page_counter_memparse(buf, "max", &max); |
241994ed JW |
4073 | if (err) |
4074 | return err; | |
4075 | ||
bbec2e15 | 4076 | xchg(&memcg->memory.max, max); |
b6e6edcf JW |
4077 | |
4078 | for (;;) { | |
4079 | unsigned long nr_pages = page_counter_read(&memcg->memory); | |
4080 | ||
4081 | if (nr_pages <= max) | |
4082 | break; | |
4083 | ||
7249c9f0 | 4084 | if (signal_pending(current)) |
b6e6edcf | 4085 | break; |
b6e6edcf JW |
4086 | |
4087 | if (!drained) { | |
4088 | drain_all_stock(memcg); | |
4089 | drained = true; | |
4090 | continue; | |
4091 | } | |
4092 | ||
4093 | if (nr_reclaims) { | |
4094 | if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, | |
68cd9050 | 4095 | GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) |
b6e6edcf JW |
4096 | nr_reclaims--; |
4097 | continue; | |
4098 | } | |
4099 | ||
e27be240 | 4100 | memcg_memory_event(memcg, MEMCG_OOM); |
b6e6edcf JW |
4101 | if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) |
4102 | break; | |
4103 | } | |
241994ed | 4104 | |
2529bb3a | 4105 | memcg_wb_domain_size_changed(memcg); |
241994ed JW |
4106 | return nbytes; |
4107 | } | |
4108 | ||
664dc218 DR |
4109 | /* |
4110 | * Note: don't forget to update the 'samples/cgroup/memcg_event_listener' | |
4111 | * if any new events become available. | |
4112 | */ | |
1e577f97 SB |
4113 | static void __memory_events_show(struct seq_file *m, atomic_long_t *events) |
4114 | { | |
4115 | seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); | |
4116 | seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); | |
4117 | seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX])); | |
4118 | seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); | |
4119 | seq_printf(m, "oom_kill %lu\n", | |
4120 | atomic_long_read(&events[MEMCG_OOM_KILL])); | |
b6bf9abb DS |
4121 | seq_printf(m, "oom_group_kill %lu\n", |
4122 | atomic_long_read(&events[MEMCG_OOM_GROUP_KILL])); | |
1e577f97 SB |
4123 | } |
4124 | ||
241994ed JW |
4125 | static int memory_events_show(struct seq_file *m, void *v) |
4126 | { | |
aa9694bb | 4127 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
241994ed | 4128 | |
1e577f97 SB |
4129 | __memory_events_show(m, memcg->memory_events); |
4130 | return 0; | |
4131 | } | |
4132 | ||
4133 | static int memory_events_local_show(struct seq_file *m, void *v) | |
4134 | { | |
4135 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); | |
241994ed | 4136 | |
1e577f97 | 4137 | __memory_events_show(m, memcg->memory_events_local); |
241994ed JW |
4138 | return 0; |
4139 | } | |
4140 | ||
ea1e8796 | 4141 | int memory_stat_show(struct seq_file *m, void *v) |
587d9f72 | 4142 | { |
aa9694bb | 4143 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
68aaee14 | 4144 | char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
5b42360c | 4145 | struct seq_buf s; |
1ff9e6e1 | 4146 | |
c8713d0b JW |
4147 | if (!buf) |
4148 | return -ENOMEM; | |
5b42360c YA |
4149 | seq_buf_init(&s, buf, PAGE_SIZE); |
4150 | memory_stat_format(memcg, &s); | |
c8713d0b JW |
4151 | seq_puts(m, buf); |
4152 | kfree(buf); | |
587d9f72 JW |
4153 | return 0; |
4154 | } | |
4155 | ||
5f9a4f4a | 4156 | #ifdef CONFIG_NUMA |
fff66b79 MS |
4157 | static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec, |
4158 | int item) | |
4159 | { | |
ff841a06 YA |
4160 | return lruvec_page_state(lruvec, item) * |
4161 | memcg_page_state_output_unit(item); | |
fff66b79 MS |
4162 | } |
4163 | ||
5f9a4f4a MS |
4164 | static int memory_numa_stat_show(struct seq_file *m, void *v) |
4165 | { | |
4166 | int i; | |
4167 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); | |
4168 | ||
7d7ef0a4 | 4169 | mem_cgroup_flush_stats(memcg); |
7e1c0d6f | 4170 | |
5f9a4f4a MS |
4171 | for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { |
4172 | int nid; | |
4173 | ||
4174 | if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS) | |
4175 | continue; | |
4176 | ||
4177 | seq_printf(m, "%s", memory_stats[i].name); | |
4178 | for_each_node_state(nid, N_MEMORY) { | |
4179 | u64 size; | |
4180 | struct lruvec *lruvec; | |
4181 | ||
4182 | lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); | |
fff66b79 MS |
4183 | size = lruvec_page_state_output(lruvec, |
4184 | memory_stats[i].idx); | |
5f9a4f4a MS |
4185 | seq_printf(m, " N%d=%llu", nid, size); |
4186 | } | |
4187 | seq_putc(m, '\n'); | |
4188 | } | |
4189 | ||
4190 | return 0; | |
4191 | } | |
4192 | #endif | |
4193 | ||
3d8b38eb RG |
4194 | static int memory_oom_group_show(struct seq_file *m, void *v) |
4195 | { | |
aa9694bb | 4196 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
3d8b38eb | 4197 | |
eaf7b66b | 4198 | seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); |
3d8b38eb RG |
4199 | |
4200 | return 0; | |
4201 | } | |
4202 | ||
4203 | static ssize_t memory_oom_group_write(struct kernfs_open_file *of, | |
4204 | char *buf, size_t nbytes, loff_t off) | |
4205 | { | |
4206 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
4207 | int ret, oom_group; | |
4208 | ||
4209 | buf = strstrip(buf); | |
4210 | if (!buf) | |
4211 | return -EINVAL; | |
4212 | ||
4213 | ret = kstrtoint(buf, 0, &oom_group); | |
4214 | if (ret) | |
4215 | return ret; | |
4216 | ||
4217 | if (oom_group != 0 && oom_group != 1) | |
4218 | return -EINVAL; | |
4219 | ||
eaf7b66b | 4220 | WRITE_ONCE(memcg->oom_group, oom_group); |
3d8b38eb RG |
4221 | |
4222 | return nbytes; | |
4223 | } | |
4224 | ||
68cd9050 DS |
4225 | enum { |
4226 | MEMORY_RECLAIM_SWAPPINESS = 0, | |
4227 | MEMORY_RECLAIM_NULL, | |
4228 | }; | |
4229 | ||
4230 | static const match_table_t tokens = { | |
4231 | { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"}, | |
4232 | { MEMORY_RECLAIM_NULL, NULL }, | |
4233 | }; | |
4234 | ||
94968384 SB |
4235 | static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, |
4236 | size_t nbytes, loff_t off) | |
4237 | { | |
4238 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
4239 | unsigned int nr_retries = MAX_RECLAIM_RETRIES; | |
4240 | unsigned long nr_to_reclaim, nr_reclaimed = 0; | |
68cd9050 | 4241 | int swappiness = -1; |
55ab834a | 4242 | unsigned int reclaim_options; |
68cd9050 DS |
4243 | char *old_buf, *start; |
4244 | substring_t args[MAX_OPT_ARGS]; | |
12a5d395 MA |
4245 | |
4246 | buf = strstrip(buf); | |
68cd9050 DS |
4247 | |
4248 | old_buf = buf; | |
4249 | nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE; | |
4250 | if (buf == old_buf) | |
4251 | return -EINVAL; | |
4252 | ||
4253 | buf = strstrip(buf); | |
4254 | ||
4255 | while ((start = strsep(&buf, " ")) != NULL) { | |
4256 | if (!strlen(start)) | |
4257 | continue; | |
4258 | switch (match_token(start, tokens, args)) { | |
4259 | case MEMORY_RECLAIM_SWAPPINESS: | |
4260 | if (match_int(&args[0], &swappiness)) | |
4261 | return -EINVAL; | |
4262 | if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS) | |
4263 | return -EINVAL; | |
4264 | break; | |
4265 | default: | |
4266 | return -EINVAL; | |
4267 | } | |
4268 | } | |
12a5d395 | 4269 | |
55ab834a | 4270 | reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; |
94968384 | 4271 | while (nr_reclaimed < nr_to_reclaim) { |
287d5fed M |
4272 | /* Will converge on zero, but reclaim enforces a minimum */ |
4273 | unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4; | |
94968384 SB |
4274 | unsigned long reclaimed; |
4275 | ||
4276 | if (signal_pending(current)) | |
4277 | return -EINTR; | |
4278 | ||
4279 | /* | |
4280 | * This is the final attempt, drain percpu lru caches in the | |
4281 | * hope of introducing more evictable pages for | |
4282 | * try_to_free_mem_cgroup_pages(). | |
4283 | */ | |
4284 | if (!nr_retries) | |
4285 | lru_add_drain_all(); | |
4286 | ||
4287 | reclaimed = try_to_free_mem_cgroup_pages(memcg, | |
68cd9050 DS |
4288 | batch_size, GFP_KERNEL, |
4289 | reclaim_options, | |
4290 | swappiness == -1 ? NULL : &swappiness); | |
94968384 SB |
4291 | |
4292 | if (!reclaimed && !nr_retries--) | |
4293 | return -EAGAIN; | |
4294 | ||
4295 | nr_reclaimed += reclaimed; | |
4296 | } | |
4297 | ||
4298 | return nbytes; | |
4299 | } | |
4300 | ||
241994ed JW |
4301 | static struct cftype memory_files[] = { |
4302 | { | |
4303 | .name = "current", | |
f5fc3c5d | 4304 | .flags = CFTYPE_NOT_ON_ROOT, |
241994ed JW |
4305 | .read_u64 = memory_current_read, |
4306 | }, | |
8e20d4b3 GR |
4307 | { |
4308 | .name = "peak", | |
4309 | .flags = CFTYPE_NOT_ON_ROOT, | |
4310 | .read_u64 = memory_peak_read, | |
4311 | }, | |
bf8d5d52 RG |
4312 | { |
4313 | .name = "min", | |
4314 | .flags = CFTYPE_NOT_ON_ROOT, | |
4315 | .seq_show = memory_min_show, | |
4316 | .write = memory_min_write, | |
4317 | }, | |
241994ed JW |
4318 | { |
4319 | .name = "low", | |
4320 | .flags = CFTYPE_NOT_ON_ROOT, | |
4321 | .seq_show = memory_low_show, | |
4322 | .write = memory_low_write, | |
4323 | }, | |
4324 | { | |
4325 | .name = "high", | |
4326 | .flags = CFTYPE_NOT_ON_ROOT, | |
4327 | .seq_show = memory_high_show, | |
4328 | .write = memory_high_write, | |
4329 | }, | |
4330 | { | |
4331 | .name = "max", | |
4332 | .flags = CFTYPE_NOT_ON_ROOT, | |
4333 | .seq_show = memory_max_show, | |
4334 | .write = memory_max_write, | |
4335 | }, | |
4336 | { | |
4337 | .name = "events", | |
4338 | .flags = CFTYPE_NOT_ON_ROOT, | |
472912a2 | 4339 | .file_offset = offsetof(struct mem_cgroup, events_file), |
241994ed JW |
4340 | .seq_show = memory_events_show, |
4341 | }, | |
1e577f97 SB |
4342 | { |
4343 | .name = "events.local", | |
4344 | .flags = CFTYPE_NOT_ON_ROOT, | |
4345 | .file_offset = offsetof(struct mem_cgroup, events_local_file), | |
4346 | .seq_show = memory_events_local_show, | |
4347 | }, | |
587d9f72 JW |
4348 | { |
4349 | .name = "stat", | |
587d9f72 JW |
4350 | .seq_show = memory_stat_show, |
4351 | }, | |
5f9a4f4a MS |
4352 | #ifdef CONFIG_NUMA |
4353 | { | |
4354 | .name = "numa_stat", | |
4355 | .seq_show = memory_numa_stat_show, | |
4356 | }, | |
4357 | #endif | |
3d8b38eb RG |
4358 | { |
4359 | .name = "oom.group", | |
4360 | .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, | |
4361 | .seq_show = memory_oom_group_show, | |
4362 | .write = memory_oom_group_write, | |
4363 | }, | |
94968384 SB |
4364 | { |
4365 | .name = "reclaim", | |
4366 | .flags = CFTYPE_NS_DELEGATABLE, | |
4367 | .write = memory_reclaim, | |
4368 | }, | |
241994ed JW |
4369 | { } /* terminate */ |
4370 | }; | |
4371 | ||
073219e9 | 4372 | struct cgroup_subsys memory_cgrp_subsys = { |
92fb9748 | 4373 | .css_alloc = mem_cgroup_css_alloc, |
d142e3e6 | 4374 | .css_online = mem_cgroup_css_online, |
92fb9748 | 4375 | .css_offline = mem_cgroup_css_offline, |
6df38689 | 4376 | .css_released = mem_cgroup_css_released, |
92fb9748 | 4377 | .css_free = mem_cgroup_css_free, |
1ced953b | 4378 | .css_reset = mem_cgroup_css_reset, |
2d146aa3 | 4379 | .css_rstat_flush = mem_cgroup_css_rstat_flush, |
bd74fdae | 4380 | .attach = mem_cgroup_attach, |
1aacbd35 RG |
4381 | .fork = mem_cgroup_fork, |
4382 | .exit = mem_cgroup_exit, | |
241994ed | 4383 | .dfl_cftypes = memory_files, |
e93d4166 RG |
4384 | #ifdef CONFIG_MEMCG_V1 |
4385 | .can_attach = memcg1_can_attach, | |
4386 | .cancel_attach = memcg1_cancel_attach, | |
4387 | .post_attach = memcg1_move_task, | |
241994ed | 4388 | .legacy_cftypes = mem_cgroup_legacy_files, |
e93d4166 | 4389 | #endif |
6d12e2d8 | 4390 | .early_init = 0, |
8cdea7c0 | 4391 | }; |
c077719b | 4392 | |
241994ed | 4393 | /** |
05395718 | 4394 | * mem_cgroup_calculate_protection - check if memory consumption is in the normal range |
34c81057 | 4395 | * @root: the top ancestor of the sub-tree being checked |
241994ed JW |
4396 | * @memcg: the memory cgroup to check |
4397 | * | |
23067153 RG |
4398 | * WARNING: This function is not stateless! It can only be used as part |
4399 | * of a top-down tree iteration, not for isolated queries. | |
241994ed | 4400 | */ |
45c7f7e1 CD |
4401 | void mem_cgroup_calculate_protection(struct mem_cgroup *root, |
4402 | struct mem_cgroup *memcg) | |
241994ed | 4403 | { |
a8585ac6 ML |
4404 | bool recursive_protection = |
4405 | cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT; | |
23067153 | 4406 | |
241994ed | 4407 | if (mem_cgroup_disabled()) |
45c7f7e1 | 4408 | return; |
241994ed | 4409 | |
34c81057 SC |
4410 | if (!root) |
4411 | root = root_mem_cgroup; | |
22f7496f | 4412 | |
a8585ac6 | 4413 | page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); |
241994ed JW |
4414 | } |
4415 | ||
8f425e4e MWO |
4416 | static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, |
4417 | gfp_t gfp) | |
0add0c77 | 4418 | { |
0add0c77 SB |
4419 | int ret; |
4420 | ||
4b569387 | 4421 | ret = try_charge(memcg, gfp, folio_nr_pages(folio)); |
0add0c77 SB |
4422 | if (ret) |
4423 | goto out; | |
4424 | ||
4b569387 | 4425 | mem_cgroup_commit_charge(folio, memcg); |
0add0c77 SB |
4426 | out: |
4427 | return ret; | |
4428 | } | |
4429 | ||
8f425e4e | 4430 | int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) |
00501b53 | 4431 | { |
0add0c77 SB |
4432 | struct mem_cgroup *memcg; |
4433 | int ret; | |
00501b53 | 4434 | |
0add0c77 | 4435 | memcg = get_mem_cgroup_from_mm(mm); |
8f425e4e | 4436 | ret = charge_memcg(folio, memcg, gfp); |
0add0c77 | 4437 | css_put(&memcg->css); |
2d1c4980 | 4438 | |
0add0c77 SB |
4439 | return ret; |
4440 | } | |
e993d905 | 4441 | |
8cba9576 NP |
4442 | /** |
4443 | * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio | |
4444 | * @memcg: memcg to charge. | |
4445 | * @gfp: reclaim mode. | |
4446 | * @nr_pages: number of pages to charge. | |
4447 | * | |
4448 | * This function is called when allocating a huge page folio to determine if | |
4449 | * the memcg has the capacity for it. It does not commit the charge yet, | |
4450 | * as the hugetlb folio itself has not been obtained from the hugetlb pool. | |
4451 | * | |
4452 | * Once we have obtained the hugetlb folio, we can call | |
4453 | * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the | |
4454 | * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect | |
4455 | * of try_charge(). | |
4456 | * | |
4457 | * Returns 0 on success. Otherwise, an error code is returned. | |
4458 | */ | |
4459 | int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, | |
4460 | long nr_pages) | |
4461 | { | |
4462 | /* | |
4463 | * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation, | |
4464 | * but do not attempt to commit charge later (or cancel on error) either. | |
4465 | */ | |
4466 | if (mem_cgroup_disabled() || !memcg || | |
4467 | !cgroup_subsys_on_dfl(memory_cgrp_subsys) || | |
4468 | !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)) | |
4469 | return -EOPNOTSUPP; | |
4470 | ||
4471 | if (try_charge(memcg, gfp, nr_pages)) | |
4472 | return -ENOMEM; | |
4473 | ||
4474 | return 0; | |
4475 | } | |
4476 | ||
0add0c77 | 4477 | /** |
65995918 MWO |
4478 | * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. |
4479 | * @folio: folio to charge. | |
0add0c77 SB |
4480 | * @mm: mm context of the victim |
4481 | * @gfp: reclaim mode | |
65995918 | 4482 | * @entry: swap entry for which the folio is allocated |
0add0c77 | 4483 | * |
65995918 MWO |
4484 | * This function charges a folio allocated for swapin. Please call this before |
4485 | * adding the folio to the swapcache. | |
0add0c77 SB |
4486 | * |
4487 | * Returns 0 on success. Otherwise, an error code is returned. | |
4488 | */ | |
65995918 | 4489 | int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, |
0add0c77 SB |
4490 | gfp_t gfp, swp_entry_t entry) |
4491 | { | |
4492 | struct mem_cgroup *memcg; | |
4493 | unsigned short id; | |
4494 | int ret; | |
00501b53 | 4495 | |
0add0c77 SB |
4496 | if (mem_cgroup_disabled()) |
4497 | return 0; | |
00501b53 | 4498 | |
0add0c77 SB |
4499 | id = lookup_swap_cgroup_id(entry); |
4500 | rcu_read_lock(); | |
4501 | memcg = mem_cgroup_from_id(id); | |
4502 | if (!memcg || !css_tryget_online(&memcg->css)) | |
4503 | memcg = get_mem_cgroup_from_mm(mm); | |
4504 | rcu_read_unlock(); | |
00501b53 | 4505 | |
8f425e4e | 4506 | ret = charge_memcg(folio, memcg, gfp); |
6abb5a86 | 4507 | |
0add0c77 SB |
4508 | css_put(&memcg->css); |
4509 | return ret; | |
4510 | } | |
00501b53 | 4511 | |
0add0c77 SB |
4512 | /* |
4513 | * mem_cgroup_swapin_uncharge_swap - uncharge swap slot | |
4514 | * @entry: swap entry for which the page is charged | |
4515 | * | |
4516 | * Call this function after successfully adding the charged page to swapcache. | |
4517 | * | |
4518 | * Note: This function assumes the page for which swap slot is being uncharged | |
4519 | * is order 0 page. | |
4520 | */ | |
4521 | void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) | |
4522 | { | |
cae3af62 MS |
4523 | /* |
4524 | * Cgroup1's unified memory+swap counter has been charged with the | |
4525 | * new swapcache page, finish the transfer by uncharging the swap | |
4526 | * slot. The swap slot would also get uncharged when it dies, but | |
4527 | * it can stick around indefinitely and we'd count the page twice | |
4528 | * the entire time. | |
4529 | * | |
4530 | * Cgroup2 has separate resource counters for memory and swap, | |
4531 | * so this is a non-issue here. Memory and swap charge lifetimes | |
4532 | * correspond 1:1 to page and swap slot lifetimes: we charge the | |
4533 | * page to memory here, and uncharge swap when the slot is freed. | |
4534 | */ | |
0add0c77 | 4535 | if (!mem_cgroup_disabled() && do_memsw_account()) { |
00501b53 JW |
4536 | /* |
4537 | * The swap entry might not get freed for a long time, | |
4538 | * let's not wait for it. The page already received a | |
4539 | * memory+swap charge, drop the swap entry duplicate. | |
4540 | */ | |
0add0c77 | 4541 | mem_cgroup_uncharge_swap(entry, 1); |
00501b53 | 4542 | } |
3fea5a49 JW |
4543 | } |
4544 | ||
a9d5adee JG |
4545 | struct uncharge_gather { |
4546 | struct mem_cgroup *memcg; | |
b4e0b68f | 4547 | unsigned long nr_memory; |
a9d5adee | 4548 | unsigned long pgpgout; |
a9d5adee | 4549 | unsigned long nr_kmem; |
8e88bd2d | 4550 | int nid; |
a9d5adee JG |
4551 | }; |
4552 | ||
4553 | static inline void uncharge_gather_clear(struct uncharge_gather *ug) | |
747db954 | 4554 | { |
a9d5adee JG |
4555 | memset(ug, 0, sizeof(*ug)); |
4556 | } | |
4557 | ||
4558 | static void uncharge_batch(const struct uncharge_gather *ug) | |
4559 | { | |
747db954 JW |
4560 | unsigned long flags; |
4561 | ||
b4e0b68f MS |
4562 | if (ug->nr_memory) { |
4563 | page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); | |
7941d214 | 4564 | if (do_memsw_account()) |
b4e0b68f | 4565 | page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); |
04fbe921 RG |
4566 | if (ug->nr_kmem) { |
4567 | mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); | |
4568 | memcg1_account_kmem(ug->memcg, -ug->nr_kmem); | |
4569 | } | |
8d49b699 | 4570 | memcg1_oom_recover(ug->memcg); |
ce00a967 | 4571 | } |
747db954 JW |
4572 | |
4573 | local_irq_save(flags); | |
c9019e9b | 4574 | __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); |
b4e0b68f | 4575 | __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); |
cc7b8504 | 4576 | memcg1_check_events(ug->memcg, ug->nid); |
747db954 | 4577 | local_irq_restore(flags); |
f1796544 | 4578 | |
c4ed6ebf | 4579 | /* drop reference from uncharge_folio */ |
f1796544 | 4580 | css_put(&ug->memcg->css); |
a9d5adee JG |
4581 | } |
4582 | ||
c4ed6ebf | 4583 | static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) |
a9d5adee | 4584 | { |
c4ed6ebf | 4585 | long nr_pages; |
b4e0b68f MS |
4586 | struct mem_cgroup *memcg; |
4587 | struct obj_cgroup *objcg; | |
9f762dbe | 4588 | |
c4ed6ebf | 4589 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); |
b7b098cf MWO |
4590 | VM_BUG_ON_FOLIO(folio_order(folio) > 1 && |
4591 | !folio_test_hugetlb(folio) && | |
4592 | !list_empty(&folio->_deferred_list), folio); | |
a9d5adee | 4593 | |
a9d5adee JG |
4594 | /* |
4595 | * Nobody should be changing or seriously looking at | |
c4ed6ebf MWO |
4596 | * folio memcg or objcg at this point, we have fully |
4597 | * exclusive access to the folio. | |
a9d5adee | 4598 | */ |
fead2b86 | 4599 | if (folio_memcg_kmem(folio)) { |
1b7e4464 | 4600 | objcg = __folio_objcg(folio); |
b4e0b68f MS |
4601 | /* |
4602 | * This get matches the put at the end of the function and | |
4603 | * kmem pages do not hold memcg references anymore. | |
4604 | */ | |
4605 | memcg = get_mem_cgroup_from_objcg(objcg); | |
4606 | } else { | |
1b7e4464 | 4607 | memcg = __folio_memcg(folio); |
b4e0b68f | 4608 | } |
a9d5adee | 4609 | |
b4e0b68f MS |
4610 | if (!memcg) |
4611 | return; | |
4612 | ||
4613 | if (ug->memcg != memcg) { | |
a9d5adee JG |
4614 | if (ug->memcg) { |
4615 | uncharge_batch(ug); | |
4616 | uncharge_gather_clear(ug); | |
4617 | } | |
b4e0b68f | 4618 | ug->memcg = memcg; |
c4ed6ebf | 4619 | ug->nid = folio_nid(folio); |
f1796544 MH |
4620 | |
4621 | /* pairs with css_put in uncharge_batch */ | |
b4e0b68f | 4622 | css_get(&memcg->css); |
a9d5adee JG |
4623 | } |
4624 | ||
c4ed6ebf | 4625 | nr_pages = folio_nr_pages(folio); |
a9d5adee | 4626 | |
fead2b86 | 4627 | if (folio_memcg_kmem(folio)) { |
b4e0b68f | 4628 | ug->nr_memory += nr_pages; |
9f762dbe | 4629 | ug->nr_kmem += nr_pages; |
b4e0b68f | 4630 | |
c4ed6ebf | 4631 | folio->memcg_data = 0; |
b4e0b68f MS |
4632 | obj_cgroup_put(objcg); |
4633 | } else { | |
4634 | /* LRU pages aren't accounted at the root level */ | |
4635 | if (!mem_cgroup_is_root(memcg)) | |
4636 | ug->nr_memory += nr_pages; | |
18b2db3b | 4637 | ug->pgpgout++; |
a9d5adee | 4638 | |
c4ed6ebf | 4639 | folio->memcg_data = 0; |
b4e0b68f MS |
4640 | } |
4641 | ||
4642 | css_put(&memcg->css); | |
747db954 JW |
4643 | } |
4644 | ||
bbc6b703 | 4645 | void __mem_cgroup_uncharge(struct folio *folio) |
0a31bc97 | 4646 | { |
a9d5adee JG |
4647 | struct uncharge_gather ug; |
4648 | ||
bbc6b703 MWO |
4649 | /* Don't touch folio->lru of any random page, pre-check: */ |
4650 | if (!folio_memcg(folio)) | |
0a31bc97 JW |
4651 | return; |
4652 | ||
a9d5adee | 4653 | uncharge_gather_clear(&ug); |
bbc6b703 | 4654 | uncharge_folio(folio, &ug); |
a9d5adee | 4655 | uncharge_batch(&ug); |
747db954 | 4656 | } |
0a31bc97 | 4657 | |
4882c809 MWO |
4658 | void __mem_cgroup_uncharge_folios(struct folio_batch *folios) |
4659 | { | |
4660 | struct uncharge_gather ug; | |
4661 | unsigned int i; | |
4662 | ||
4663 | uncharge_gather_clear(&ug); | |
4664 | for (i = 0; i < folios->nr; i++) | |
4665 | uncharge_folio(folios->folios[i], &ug); | |
4666 | if (ug.memcg) | |
4667 | uncharge_batch(&ug); | |
0a31bc97 JW |
4668 | } |
4669 | ||
4670 | /** | |
85ce2c51 | 4671 | * mem_cgroup_replace_folio - Charge a folio's replacement. |
d21bba2b MWO |
4672 | * @old: Currently circulating folio. |
4673 | * @new: Replacement folio. | |
0a31bc97 | 4674 | * |
d21bba2b | 4675 | * Charge @new as a replacement folio for @old. @old will |
9094b4a1 | 4676 | * be uncharged upon free. |
0a31bc97 | 4677 | * |
d21bba2b | 4678 | * Both folios must be locked, @new->mapping must be set up. |
0a31bc97 | 4679 | */ |
85ce2c51 | 4680 | void mem_cgroup_replace_folio(struct folio *old, struct folio *new) |
0a31bc97 | 4681 | { |
29833315 | 4682 | struct mem_cgroup *memcg; |
d21bba2b | 4683 | long nr_pages = folio_nr_pages(new); |
d93c4130 | 4684 | unsigned long flags; |
0a31bc97 | 4685 | |
d21bba2b MWO |
4686 | VM_BUG_ON_FOLIO(!folio_test_locked(old), old); |
4687 | VM_BUG_ON_FOLIO(!folio_test_locked(new), new); | |
4688 | VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); | |
4689 | VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); | |
0a31bc97 JW |
4690 | |
4691 | if (mem_cgroup_disabled()) | |
4692 | return; | |
4693 | ||
d21bba2b MWO |
4694 | /* Page cache replacement: new folio already charged? */ |
4695 | if (folio_memcg(new)) | |
0a31bc97 JW |
4696 | return; |
4697 | ||
d21bba2b MWO |
4698 | memcg = folio_memcg(old); |
4699 | VM_WARN_ON_ONCE_FOLIO(!memcg, old); | |
29833315 | 4700 | if (!memcg) |
0a31bc97 JW |
4701 | return; |
4702 | ||
44b7a8d3 | 4703 | /* Force-charge the new page. The old one will be freed soon */ |
8dc87c7d MS |
4704 | if (!mem_cgroup_is_root(memcg)) { |
4705 | page_counter_charge(&memcg->memory, nr_pages); | |
4706 | if (do_memsw_account()) | |
4707 | page_counter_charge(&memcg->memsw, nr_pages); | |
4708 | } | |
0a31bc97 | 4709 | |
1a3e1f40 | 4710 | css_get(&memcg->css); |
d21bba2b | 4711 | commit_charge(new, memcg); |
44b7a8d3 | 4712 | |
d93c4130 | 4713 | local_irq_save(flags); |
6e0110c2 | 4714 | mem_cgroup_charge_statistics(memcg, nr_pages); |
cc7b8504 | 4715 | memcg1_check_events(memcg, folio_nid(new)); |
d93c4130 | 4716 | local_irq_restore(flags); |
0a31bc97 JW |
4717 | } |
4718 | ||
85ce2c51 NP |
4719 | /** |
4720 | * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio. | |
4721 | * @old: Currently circulating folio. | |
4722 | * @new: Replacement folio. | |
4723 | * | |
4724 | * Transfer the memcg data from the old folio to the new folio for migration. | |
4725 | * The old folio's data info will be cleared. Note that the memory counters | |
4726 | * will remain unchanged throughout the process. | |
4727 | * | |
4728 | * Both folios must be locked, @new->mapping must be set up. | |
4729 | */ | |
4730 | void mem_cgroup_migrate(struct folio *old, struct folio *new) | |
4731 | { | |
4732 | struct mem_cgroup *memcg; | |
4733 | ||
4734 | VM_BUG_ON_FOLIO(!folio_test_locked(old), old); | |
4735 | VM_BUG_ON_FOLIO(!folio_test_locked(new), new); | |
4736 | VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new); | |
4737 | VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new); | |
a6ab9c82 | 4738 | VM_BUG_ON_FOLIO(folio_test_lru(old), old); |
85ce2c51 NP |
4739 | |
4740 | if (mem_cgroup_disabled()) | |
4741 | return; | |
4742 | ||
4743 | memcg = folio_memcg(old); | |
8cba9576 NP |
4744 | /* |
4745 | * Note that it is normal to see !memcg for a hugetlb folio. | |
4746 | * For e.g, itt could have been allocated when memory_hugetlb_accounting | |
4747 | * was not selected. | |
4748 | */ | |
4749 | VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); | |
85ce2c51 NP |
4750 | if (!memcg) |
4751 | return; | |
4752 | ||
4753 | /* Transfer the charge and the css ref */ | |
4754 | commit_charge(new, memcg); | |
4755 | old->memcg_data = 0; | |
4756 | } | |
4757 | ||
ef12947c | 4758 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); |
11092087 JW |
4759 | EXPORT_SYMBOL(memcg_sockets_enabled_key); |
4760 | ||
2d758073 | 4761 | void mem_cgroup_sk_alloc(struct sock *sk) |
11092087 JW |
4762 | { |
4763 | struct mem_cgroup *memcg; | |
4764 | ||
2d758073 JW |
4765 | if (!mem_cgroup_sockets_enabled) |
4766 | return; | |
4767 | ||
e876ecc6 | 4768 | /* Do not associate the sock with unrelated interrupted task's memcg. */ |
086f694a | 4769 | if (!in_task()) |
e876ecc6 SB |
4770 | return; |
4771 | ||
11092087 JW |
4772 | rcu_read_lock(); |
4773 | memcg = mem_cgroup_from_task(current); | |
7848ed62 | 4774 | if (mem_cgroup_is_root(memcg)) |
f7e1cb6e | 4775 | goto out; |
773e9ae7 | 4776 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) |
f7e1cb6e | 4777 | goto out; |
8965aa28 | 4778 | if (css_tryget(&memcg->css)) |
11092087 | 4779 | sk->sk_memcg = memcg; |
f7e1cb6e | 4780 | out: |
11092087 JW |
4781 | rcu_read_unlock(); |
4782 | } | |
11092087 | 4783 | |
2d758073 | 4784 | void mem_cgroup_sk_free(struct sock *sk) |
11092087 | 4785 | { |
2d758073 JW |
4786 | if (sk->sk_memcg) |
4787 | css_put(&sk->sk_memcg->css); | |
11092087 JW |
4788 | } |
4789 | ||
4790 | /** | |
4791 | * mem_cgroup_charge_skmem - charge socket memory | |
4792 | * @memcg: memcg to charge | |
4793 | * @nr_pages: number of pages to charge | |
4b1327be | 4794 | * @gfp_mask: reclaim mode |
11092087 JW |
4795 | * |
4796 | * Charges @nr_pages to @memcg. Returns %true if the charge fit within | |
4b1327be | 4797 | * @memcg's configured limit, %false if it doesn't. |
11092087 | 4798 | */ |
4b1327be WW |
4799 | bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, |
4800 | gfp_t gfp_mask) | |
11092087 | 4801 | { |
773e9ae7 RG |
4802 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) |
4803 | return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); | |
d886f4e4 | 4804 | |
4b1327be WW |
4805 | if (try_charge(memcg, gfp_mask, nr_pages) == 0) { |
4806 | mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); | |
f7e1cb6e | 4807 | return true; |
4b1327be | 4808 | } |
f7e1cb6e | 4809 | |
11092087 JW |
4810 | return false; |
4811 | } | |
4812 | ||
4813 | /** | |
4814 | * mem_cgroup_uncharge_skmem - uncharge socket memory | |
b7701a5f MR |
4815 | * @memcg: memcg to uncharge |
4816 | * @nr_pages: number of pages to uncharge | |
11092087 JW |
4817 | */ |
4818 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | |
4819 | { | |
f7e1cb6e | 4820 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { |
773e9ae7 | 4821 | memcg1_uncharge_skmem(memcg, nr_pages); |
f7e1cb6e JW |
4822 | return; |
4823 | } | |
d886f4e4 | 4824 | |
c9019e9b | 4825 | mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); |
b2807f07 | 4826 | |
475d0487 | 4827 | refill_stock(memcg, nr_pages); |
11092087 JW |
4828 | } |
4829 | ||
f7e1cb6e JW |
4830 | static int __init cgroup_memory(char *s) |
4831 | { | |
4832 | char *token; | |
4833 | ||
4834 | while ((token = strsep(&s, ",")) != NULL) { | |
4835 | if (!*token) | |
4836 | continue; | |
4837 | if (!strcmp(token, "nosocket")) | |
4838 | cgroup_memory_nosocket = true; | |
04823c83 VD |
4839 | if (!strcmp(token, "nokmem")) |
4840 | cgroup_memory_nokmem = true; | |
b6c1a8af YS |
4841 | if (!strcmp(token, "nobpf")) |
4842 | cgroup_memory_nobpf = true; | |
f7e1cb6e | 4843 | } |
460a79e1 | 4844 | return 1; |
f7e1cb6e JW |
4845 | } |
4846 | __setup("cgroup.memory=", cgroup_memory); | |
11092087 | 4847 | |
2d11085e | 4848 | /* |
1081312f MH |
4849 | * subsys_initcall() for memory controller. |
4850 | * | |
308167fc SAS |
4851 | * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this |
4852 | * context because of lock dependencies (cgroup_lock -> cpu hotplug) but | |
4853 | * basically everything that doesn't depend on a specific mem_cgroup structure | |
4854 | * should be initialized from here. | |
2d11085e MH |
4855 | */ |
4856 | static int __init mem_cgroup_init(void) | |
4857 | { | |
d12f6d22 | 4858 | int cpu; |
95a045f6 | 4859 | |
f3344adf MS |
4860 | /* |
4861 | * Currently s32 type (can refer to struct batched_lruvec_stat) is | |
4862 | * used for per-memcg-per-cpu caching of per-node statistics. In order | |
4863 | * to work fine, we should make sure that the overfill threshold can't | |
4864 | * exceed S32_MAX / PAGE_SIZE. | |
4865 | */ | |
4866 | BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); | |
4867 | ||
308167fc SAS |
4868 | cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, |
4869 | memcg_hotplug_cpu_dead); | |
95a045f6 JW |
4870 | |
4871 | for_each_possible_cpu(cpu) | |
4872 | INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, | |
4873 | drain_local_stock); | |
4874 | ||
2d11085e MH |
4875 | return 0; |
4876 | } | |
4877 | subsys_initcall(mem_cgroup_init); | |
21afa38e | 4878 | |
e55b9f96 | 4879 | #ifdef CONFIG_SWAP |
358c07fc AB |
4880 | static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) |
4881 | { | |
1c2d479a | 4882 | while (!refcount_inc_not_zero(&memcg->id.ref)) { |
358c07fc AB |
4883 | /* |
4884 | * The root cgroup cannot be destroyed, so it's refcount must | |
4885 | * always be >= 1. | |
4886 | */ | |
7848ed62 | 4887 | if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { |
358c07fc AB |
4888 | VM_BUG_ON(1); |
4889 | break; | |
4890 | } | |
4891 | memcg = parent_mem_cgroup(memcg); | |
4892 | if (!memcg) | |
4893 | memcg = root_mem_cgroup; | |
4894 | } | |
4895 | return memcg; | |
4896 | } | |
4897 | ||
21afa38e JW |
4898 | /** |
4899 | * mem_cgroup_swapout - transfer a memsw charge to swap | |
3ecb0087 | 4900 | * @folio: folio whose memsw charge to transfer |
21afa38e JW |
4901 | * @entry: swap entry to move the charge to |
4902 | * | |
3ecb0087 | 4903 | * Transfer the memsw charge of @folio to @entry. |
21afa38e | 4904 | */ |
3ecb0087 | 4905 | void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) |
21afa38e | 4906 | { |
1f47b61f | 4907 | struct mem_cgroup *memcg, *swap_memcg; |
d6810d73 | 4908 | unsigned int nr_entries; |
21afa38e JW |
4909 | unsigned short oldid; |
4910 | ||
3ecb0087 MWO |
4911 | VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); |
4912 | VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); | |
21afa38e | 4913 | |
76358ab5 AS |
4914 | if (mem_cgroup_disabled()) |
4915 | return; | |
4916 | ||
b94c4e94 | 4917 | if (!do_memsw_account()) |
21afa38e JW |
4918 | return; |
4919 | ||
3ecb0087 | 4920 | memcg = folio_memcg(folio); |
21afa38e | 4921 | |
3ecb0087 | 4922 | VM_WARN_ON_ONCE_FOLIO(!memcg, folio); |
21afa38e JW |
4923 | if (!memcg) |
4924 | return; | |
4925 | ||
1f47b61f VD |
4926 | /* |
4927 | * In case the memcg owning these pages has been offlined and doesn't | |
4928 | * have an ID allocated to it anymore, charge the closest online | |
4929 | * ancestor for the swap instead and transfer the memory+swap charge. | |
4930 | */ | |
4931 | swap_memcg = mem_cgroup_id_get_online(memcg); | |
3ecb0087 | 4932 | nr_entries = folio_nr_pages(folio); |
d6810d73 YH |
4933 | /* Get references for the tail pages, too */ |
4934 | if (nr_entries > 1) | |
4935 | mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); | |
4936 | oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), | |
4937 | nr_entries); | |
3ecb0087 | 4938 | VM_BUG_ON_FOLIO(oldid, folio); |
c9019e9b | 4939 | mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); |
21afa38e | 4940 | |
3ecb0087 | 4941 | folio->memcg_data = 0; |
21afa38e JW |
4942 | |
4943 | if (!mem_cgroup_is_root(memcg)) | |
d6810d73 | 4944 | page_counter_uncharge(&memcg->memory, nr_entries); |
21afa38e | 4945 | |
b25806dc | 4946 | if (memcg != swap_memcg) { |
1f47b61f | 4947 | if (!mem_cgroup_is_root(swap_memcg)) |
d6810d73 YH |
4948 | page_counter_charge(&swap_memcg->memsw, nr_entries); |
4949 | page_counter_uncharge(&memcg->memsw, nr_entries); | |
1f47b61f VD |
4950 | } |
4951 | ||
ce9ce665 SAS |
4952 | /* |
4953 | * Interrupts should be disabled here because the caller holds the | |
b93b0163 | 4954 | * i_pages lock which is taken with interrupts-off. It is |
ce9ce665 | 4955 | * important here to have the interrupts disabled because it is the |
b93b0163 | 4956 | * only synchronisation we have for updating the per-CPU variables. |
ce9ce665 | 4957 | */ |
be3e67b5 | 4958 | memcg_stats_lock(); |
6e0110c2 | 4959 | mem_cgroup_charge_statistics(memcg, -nr_entries); |
be3e67b5 | 4960 | memcg_stats_unlock(); |
cc7b8504 | 4961 | memcg1_check_events(memcg, folio_nid(folio)); |
73f576c0 | 4962 | |
1a3e1f40 | 4963 | css_put(&memcg->css); |
21afa38e JW |
4964 | } |
4965 | ||
38d8b4e6 | 4966 | /** |
e2e3fdc7 MWO |
4967 | * __mem_cgroup_try_charge_swap - try charging swap space for a folio |
4968 | * @folio: folio being added to swap | |
37e84351 VD |
4969 | * @entry: swap entry to charge |
4970 | * | |
e2e3fdc7 | 4971 | * Try to charge @folio's memcg for the swap space at @entry. |
37e84351 VD |
4972 | * |
4973 | * Returns 0 on success, -ENOMEM on failure. | |
4974 | */ | |
e2e3fdc7 | 4975 | int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) |
37e84351 | 4976 | { |
e2e3fdc7 | 4977 | unsigned int nr_pages = folio_nr_pages(folio); |
37e84351 | 4978 | struct page_counter *counter; |
38d8b4e6 | 4979 | struct mem_cgroup *memcg; |
37e84351 VD |
4980 | unsigned short oldid; |
4981 | ||
b94c4e94 | 4982 | if (do_memsw_account()) |
37e84351 VD |
4983 | return 0; |
4984 | ||
e2e3fdc7 | 4985 | memcg = folio_memcg(folio); |
37e84351 | 4986 | |
e2e3fdc7 | 4987 | VM_WARN_ON_ONCE_FOLIO(!memcg, folio); |
37e84351 VD |
4988 | if (!memcg) |
4989 | return 0; | |
4990 | ||
f3a53a3a TH |
4991 | if (!entry.val) { |
4992 | memcg_memory_event(memcg, MEMCG_SWAP_FAIL); | |
bb98f2c5 | 4993 | return 0; |
f3a53a3a | 4994 | } |
bb98f2c5 | 4995 | |
1f47b61f VD |
4996 | memcg = mem_cgroup_id_get_online(memcg); |
4997 | ||
b25806dc | 4998 | if (!mem_cgroup_is_root(memcg) && |
38d8b4e6 | 4999 | !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { |
f3a53a3a TH |
5000 | memcg_memory_event(memcg, MEMCG_SWAP_MAX); |
5001 | memcg_memory_event(memcg, MEMCG_SWAP_FAIL); | |
1f47b61f | 5002 | mem_cgroup_id_put(memcg); |
37e84351 | 5003 | return -ENOMEM; |
1f47b61f | 5004 | } |
37e84351 | 5005 | |
38d8b4e6 YH |
5006 | /* Get references for the tail pages, too */ |
5007 | if (nr_pages > 1) | |
5008 | mem_cgroup_id_get_many(memcg, nr_pages - 1); | |
5009 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); | |
e2e3fdc7 | 5010 | VM_BUG_ON_FOLIO(oldid, folio); |
c9019e9b | 5011 | mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); |
37e84351 | 5012 | |
37e84351 VD |
5013 | return 0; |
5014 | } | |
5015 | ||
21afa38e | 5016 | /** |
01c4b28c | 5017 | * __mem_cgroup_uncharge_swap - uncharge swap space |
21afa38e | 5018 | * @entry: swap entry to uncharge |
38d8b4e6 | 5019 | * @nr_pages: the amount of swap space to uncharge |
21afa38e | 5020 | */ |
01c4b28c | 5021 | void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) |
21afa38e JW |
5022 | { |
5023 | struct mem_cgroup *memcg; | |
5024 | unsigned short id; | |
5025 | ||
38d8b4e6 | 5026 | id = swap_cgroup_record(entry, 0, nr_pages); |
21afa38e | 5027 | rcu_read_lock(); |
adbe427b | 5028 | memcg = mem_cgroup_from_id(id); |
21afa38e | 5029 | if (memcg) { |
b25806dc | 5030 | if (!mem_cgroup_is_root(memcg)) { |
b94c4e94 | 5031 | if (do_memsw_account()) |
38d8b4e6 | 5032 | page_counter_uncharge(&memcg->memsw, nr_pages); |
b94c4e94 JW |
5033 | else |
5034 | page_counter_uncharge(&memcg->swap, nr_pages); | |
37e84351 | 5035 | } |
c9019e9b | 5036 | mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); |
38d8b4e6 | 5037 | mem_cgroup_id_put_many(memcg, nr_pages); |
21afa38e JW |
5038 | } |
5039 | rcu_read_unlock(); | |
5040 | } | |
5041 | ||
d8b38438 VD |
5042 | long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) |
5043 | { | |
5044 | long nr_swap_pages = get_nr_swap_pages(); | |
5045 | ||
b25806dc | 5046 | if (mem_cgroup_disabled() || do_memsw_account()) |
d8b38438 | 5047 | return nr_swap_pages; |
7848ed62 | 5048 | for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) |
d8b38438 | 5049 | nr_swap_pages = min_t(long, nr_swap_pages, |
bbec2e15 | 5050 | READ_ONCE(memcg->swap.max) - |
d8b38438 VD |
5051 | page_counter_read(&memcg->swap)); |
5052 | return nr_swap_pages; | |
5053 | } | |
5054 | ||
9202d527 | 5055 | bool mem_cgroup_swap_full(struct folio *folio) |
5ccc5aba VD |
5056 | { |
5057 | struct mem_cgroup *memcg; | |
5058 | ||
9202d527 | 5059 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
5ccc5aba VD |
5060 | |
5061 | if (vm_swap_full()) | |
5062 | return true; | |
b25806dc | 5063 | if (do_memsw_account()) |
5ccc5aba VD |
5064 | return false; |
5065 | ||
9202d527 | 5066 | memcg = folio_memcg(folio); |
5ccc5aba VD |
5067 | if (!memcg) |
5068 | return false; | |
5069 | ||
7848ed62 | 5070 | for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { |
4b82ab4f JK |
5071 | unsigned long usage = page_counter_read(&memcg->swap); |
5072 | ||
5073 | if (usage * 2 >= READ_ONCE(memcg->swap.high) || | |
5074 | usage * 2 >= READ_ONCE(memcg->swap.max)) | |
5ccc5aba | 5075 | return true; |
4b82ab4f | 5076 | } |
5ccc5aba VD |
5077 | |
5078 | return false; | |
5079 | } | |
5080 | ||
eccb52e7 | 5081 | static int __init setup_swap_account(char *s) |
21afa38e | 5082 | { |
118642d7 JW |
5083 | bool res; |
5084 | ||
5085 | if (!kstrtobool(s, &res) && !res) | |
5086 | pr_warn_once("The swapaccount=0 commandline option is deprecated " | |
5087 | "in favor of configuring swap control via cgroupfs. " | |
5088 | "Please report your usecase to [email protected] if you " | |
5089 | "depend on this functionality.\n"); | |
21afa38e JW |
5090 | return 1; |
5091 | } | |
eccb52e7 | 5092 | __setup("swapaccount=", setup_swap_account); |
21afa38e | 5093 | |
37e84351 VD |
5094 | static u64 swap_current_read(struct cgroup_subsys_state *css, |
5095 | struct cftype *cft) | |
5096 | { | |
5097 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
5098 | ||
5099 | return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; | |
5100 | } | |
5101 | ||
e0e0b412 LD |
5102 | static u64 swap_peak_read(struct cgroup_subsys_state *css, |
5103 | struct cftype *cft) | |
5104 | { | |
5105 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | |
5106 | ||
5107 | return (u64)memcg->swap.watermark * PAGE_SIZE; | |
5108 | } | |
5109 | ||
4b82ab4f JK |
5110 | static int swap_high_show(struct seq_file *m, void *v) |
5111 | { | |
5112 | return seq_puts_memcg_tunable(m, | |
5113 | READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); | |
5114 | } | |
5115 | ||
5116 | static ssize_t swap_high_write(struct kernfs_open_file *of, | |
5117 | char *buf, size_t nbytes, loff_t off) | |
5118 | { | |
5119 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
5120 | unsigned long high; | |
5121 | int err; | |
5122 | ||
5123 | buf = strstrip(buf); | |
5124 | err = page_counter_memparse(buf, "max", &high); | |
5125 | if (err) | |
5126 | return err; | |
5127 | ||
5128 | page_counter_set_high(&memcg->swap, high); | |
5129 | ||
5130 | return nbytes; | |
5131 | } | |
5132 | ||
37e84351 VD |
5133 | static int swap_max_show(struct seq_file *m, void *v) |
5134 | { | |
677dc973 CD |
5135 | return seq_puts_memcg_tunable(m, |
5136 | READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); | |
37e84351 VD |
5137 | } |
5138 | ||
5139 | static ssize_t swap_max_write(struct kernfs_open_file *of, | |
5140 | char *buf, size_t nbytes, loff_t off) | |
5141 | { | |
5142 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
5143 | unsigned long max; | |
5144 | int err; | |
5145 | ||
5146 | buf = strstrip(buf); | |
5147 | err = page_counter_memparse(buf, "max", &max); | |
5148 | if (err) | |
5149 | return err; | |
5150 | ||
be09102b | 5151 | xchg(&memcg->swap.max, max); |
37e84351 VD |
5152 | |
5153 | return nbytes; | |
5154 | } | |
5155 | ||
f3a53a3a TH |
5156 | static int swap_events_show(struct seq_file *m, void *v) |
5157 | { | |
aa9694bb | 5158 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
f3a53a3a | 5159 | |
4b82ab4f JK |
5160 | seq_printf(m, "high %lu\n", |
5161 | atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); | |
f3a53a3a TH |
5162 | seq_printf(m, "max %lu\n", |
5163 | atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); | |
5164 | seq_printf(m, "fail %lu\n", | |
5165 | atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); | |
5166 | ||
5167 | return 0; | |
5168 | } | |
5169 | ||
37e84351 VD |
5170 | static struct cftype swap_files[] = { |
5171 | { | |
5172 | .name = "swap.current", | |
5173 | .flags = CFTYPE_NOT_ON_ROOT, | |
5174 | .read_u64 = swap_current_read, | |
5175 | }, | |
4b82ab4f JK |
5176 | { |
5177 | .name = "swap.high", | |
5178 | .flags = CFTYPE_NOT_ON_ROOT, | |
5179 | .seq_show = swap_high_show, | |
5180 | .write = swap_high_write, | |
5181 | }, | |
37e84351 VD |
5182 | { |
5183 | .name = "swap.max", | |
5184 | .flags = CFTYPE_NOT_ON_ROOT, | |
5185 | .seq_show = swap_max_show, | |
5186 | .write = swap_max_write, | |
5187 | }, | |
e0e0b412 LD |
5188 | { |
5189 | .name = "swap.peak", | |
5190 | .flags = CFTYPE_NOT_ON_ROOT, | |
5191 | .read_u64 = swap_peak_read, | |
5192 | }, | |
f3a53a3a TH |
5193 | { |
5194 | .name = "swap.events", | |
5195 | .flags = CFTYPE_NOT_ON_ROOT, | |
5196 | .file_offset = offsetof(struct mem_cgroup, swap_events_file), | |
5197 | .seq_show = swap_events_show, | |
5198 | }, | |
37e84351 VD |
5199 | { } /* terminate */ |
5200 | }; | |
5201 | ||
3a3b7fec | 5202 | #ifdef CONFIG_ZSWAP |
f4840ccf JW |
5203 | /** |
5204 | * obj_cgroup_may_zswap - check if this cgroup can zswap | |
5205 | * @objcg: the object cgroup | |
5206 | * | |
5207 | * Check if the hierarchical zswap limit has been reached. | |
5208 | * | |
5209 | * This doesn't check for specific headroom, and it is not atomic | |
5210 | * either. But with zswap, the size of the allocation is only known | |
be16dd76 | 5211 | * once compression has occurred, and this optimistic pre-check avoids |
f4840ccf JW |
5212 | * spending cycles on compression when there is already no room left |
5213 | * or zswap is disabled altogether somewhere in the hierarchy. | |
5214 | */ | |
5215 | bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) | |
5216 | { | |
5217 | struct mem_cgroup *memcg, *original_memcg; | |
5218 | bool ret = true; | |
5219 | ||
5220 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
5221 | return true; | |
5222 | ||
5223 | original_memcg = get_mem_cgroup_from_objcg(objcg); | |
7848ed62 | 5224 | for (memcg = original_memcg; !mem_cgroup_is_root(memcg); |
f4840ccf JW |
5225 | memcg = parent_mem_cgroup(memcg)) { |
5226 | unsigned long max = READ_ONCE(memcg->zswap_max); | |
5227 | unsigned long pages; | |
5228 | ||
5229 | if (max == PAGE_COUNTER_MAX) | |
5230 | continue; | |
5231 | if (max == 0) { | |
5232 | ret = false; | |
5233 | break; | |
5234 | } | |
5235 | ||
7d7ef0a4 YA |
5236 | /* |
5237 | * mem_cgroup_flush_stats() ignores small changes. Use | |
5238 | * do_flush_stats() directly to get accurate stats for charging. | |
5239 | */ | |
5240 | do_flush_stats(memcg); | |
f4840ccf JW |
5241 | pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; |
5242 | if (pages < max) | |
5243 | continue; | |
5244 | ret = false; | |
5245 | break; | |
5246 | } | |
5247 | mem_cgroup_put(original_memcg); | |
5248 | return ret; | |
5249 | } | |
5250 | ||
5251 | /** | |
5252 | * obj_cgroup_charge_zswap - charge compression backend memory | |
5253 | * @objcg: the object cgroup | |
5254 | * @size: size of compressed object | |
5255 | * | |
3a1060c2 | 5256 | * This forces the charge after obj_cgroup_may_zswap() allowed |
f4840ccf JW |
5257 | * compression and storage in zwap for this cgroup to go ahead. |
5258 | */ | |
5259 | void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) | |
5260 | { | |
5261 | struct mem_cgroup *memcg; | |
5262 | ||
5263 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
5264 | return; | |
5265 | ||
5266 | VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); | |
5267 | ||
5268 | /* PF_MEMALLOC context, charging must succeed */ | |
5269 | if (obj_cgroup_charge(objcg, GFP_KERNEL, size)) | |
5270 | VM_WARN_ON_ONCE(1); | |
5271 | ||
5272 | rcu_read_lock(); | |
5273 | memcg = obj_cgroup_memcg(objcg); | |
5274 | mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); | |
5275 | mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); | |
5276 | rcu_read_unlock(); | |
5277 | } | |
5278 | ||
5279 | /** | |
5280 | * obj_cgroup_uncharge_zswap - uncharge compression backend memory | |
5281 | * @objcg: the object cgroup | |
5282 | * @size: size of compressed object | |
5283 | * | |
5284 | * Uncharges zswap memory on page in. | |
5285 | */ | |
5286 | void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) | |
5287 | { | |
5288 | struct mem_cgroup *memcg; | |
5289 | ||
5290 | if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) | |
5291 | return; | |
5292 | ||
5293 | obj_cgroup_uncharge(objcg, size); | |
5294 | ||
5295 | rcu_read_lock(); | |
5296 | memcg = obj_cgroup_memcg(objcg); | |
5297 | mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); | |
5298 | mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); | |
5299 | rcu_read_unlock(); | |
5300 | } | |
5301 | ||
501a06fe NP |
5302 | bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) |
5303 | { | |
5304 | /* if zswap is disabled, do not block pages going to the swapping device */ | |
2b33a97c | 5305 | return !zswap_is_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback); |
501a06fe NP |
5306 | } |
5307 | ||
f4840ccf JW |
5308 | static u64 zswap_current_read(struct cgroup_subsys_state *css, |
5309 | struct cftype *cft) | |
5310 | { | |
7d7ef0a4 YA |
5311 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
5312 | ||
5313 | mem_cgroup_flush_stats(memcg); | |
5314 | return memcg_page_state(memcg, MEMCG_ZSWAP_B); | |
f4840ccf JW |
5315 | } |
5316 | ||
5317 | static int zswap_max_show(struct seq_file *m, void *v) | |
5318 | { | |
5319 | return seq_puts_memcg_tunable(m, | |
5320 | READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); | |
5321 | } | |
5322 | ||
5323 | static ssize_t zswap_max_write(struct kernfs_open_file *of, | |
5324 | char *buf, size_t nbytes, loff_t off) | |
5325 | { | |
5326 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
5327 | unsigned long max; | |
5328 | int err; | |
5329 | ||
5330 | buf = strstrip(buf); | |
5331 | err = page_counter_memparse(buf, "max", &max); | |
5332 | if (err) | |
5333 | return err; | |
5334 | ||
5335 | xchg(&memcg->zswap_max, max); | |
5336 | ||
5337 | return nbytes; | |
5338 | } | |
5339 | ||
501a06fe NP |
5340 | static int zswap_writeback_show(struct seq_file *m, void *v) |
5341 | { | |
5342 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); | |
5343 | ||
5344 | seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); | |
5345 | return 0; | |
5346 | } | |
5347 | ||
5348 | static ssize_t zswap_writeback_write(struct kernfs_open_file *of, | |
5349 | char *buf, size_t nbytes, loff_t off) | |
5350 | { | |
5351 | struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); | |
5352 | int zswap_writeback; | |
5353 | ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback); | |
5354 | ||
5355 | if (parse_ret) | |
5356 | return parse_ret; | |
5357 | ||
5358 | if (zswap_writeback != 0 && zswap_writeback != 1) | |
5359 | return -EINVAL; | |
5360 | ||
5361 | WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); | |
5362 | return nbytes; | |
5363 | } | |
5364 | ||
f4840ccf JW |
5365 | static struct cftype zswap_files[] = { |
5366 | { | |
5367 | .name = "zswap.current", | |
5368 | .flags = CFTYPE_NOT_ON_ROOT, | |
5369 | .read_u64 = zswap_current_read, | |
5370 | }, | |
5371 | { | |
5372 | .name = "zswap.max", | |
5373 | .flags = CFTYPE_NOT_ON_ROOT, | |
5374 | .seq_show = zswap_max_show, | |
5375 | .write = zswap_max_write, | |
5376 | }, | |
501a06fe NP |
5377 | { |
5378 | .name = "zswap.writeback", | |
5379 | .seq_show = zswap_writeback_show, | |
5380 | .write = zswap_writeback_write, | |
5381 | }, | |
f4840ccf JW |
5382 | { } /* terminate */ |
5383 | }; | |
3a3b7fec | 5384 | #endif /* CONFIG_ZSWAP */ |
f4840ccf | 5385 | |
21afa38e JW |
5386 | static int __init mem_cgroup_swap_init(void) |
5387 | { | |
2d1c4980 | 5388 | if (mem_cgroup_disabled()) |
eccb52e7 JW |
5389 | return 0; |
5390 | ||
5391 | WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files)); | |
e93d4166 | 5392 | #ifdef CONFIG_MEMCG_V1 |
eccb52e7 | 5393 | WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files)); |
e93d4166 | 5394 | #endif |
3a3b7fec | 5395 | #ifdef CONFIG_ZSWAP |
f4840ccf JW |
5396 | WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files)); |
5397 | #endif | |
21afa38e JW |
5398 | return 0; |
5399 | } | |
b25806dc | 5400 | subsys_initcall(mem_cgroup_swap_init); |
21afa38e | 5401 | |
e55b9f96 | 5402 | #endif /* CONFIG_SWAP */ |