]> Git Repo - linux.git/blame - mm/memcontrol.c
mm: use folios_put() in __folio_batch_release()
[linux.git] / mm / memcontrol.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
8cdea7c0
BS
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <[email protected]>
6 *
78fb7466
PE
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <[email protected]>
9 *
2e72b634
KS
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
7ae1e1d0
GC
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
1575e68b
JW
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
6168d0da
AS
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
8cdea7c0
BS
26 */
27
3e32cb2e 28#include <linux/page_counter.h>
8cdea7c0
BS
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
a520110e 31#include <linux/pagewalk.h>
6e84f315 32#include <linux/sched/mm.h>
3a4f8a0b 33#include <linux/shmem_fs.h>
4ffef5fe 34#include <linux/hugetlb.h>
d13d1443 35#include <linux/pagemap.h>
1ff9e6e1 36#include <linux/vm_event_item.h>
d52aa412 37#include <linux/smp.h>
8a9f3ccd 38#include <linux/page-flags.h>
66e1707b 39#include <linux/backing-dev.h>
8a9f3ccd
BS
40#include <linux/bit_spinlock.h>
41#include <linux/rcupdate.h>
e222432b 42#include <linux/limits.h>
b9e15baf 43#include <linux/export.h>
8c7c6e34 44#include <linux/mutex.h>
bb4cc1a8 45#include <linux/rbtree.h>
b6ac57d5 46#include <linux/slab.h>
66e1707b 47#include <linux/swap.h>
02491447 48#include <linux/swapops.h>
66e1707b 49#include <linux/spinlock.h>
2e72b634 50#include <linux/eventfd.h>
79bd9814 51#include <linux/poll.h>
2e72b634 52#include <linux/sort.h>
66e1707b 53#include <linux/fs.h>
d2ceb9b7 54#include <linux/seq_file.h>
70ddf637 55#include <linux/vmpressure.h>
dc90f084 56#include <linux/memremap.h>
b69408e8 57#include <linux/mm_inline.h>
5d1ea48b 58#include <linux/swap_cgroup.h>
cdec2e42 59#include <linux/cpu.h>
158e0a2d 60#include <linux/oom.h>
0056f4e6 61#include <linux/lockdep.h>
79bd9814 62#include <linux/file.h>
03248add 63#include <linux/resume_user_mode.h>
0e4b01df 64#include <linux/psi.h>
c8713d0b 65#include <linux/seq_buf.h>
6a792697 66#include <linux/sched/isolation.h>
6011be59 67#include <linux/kmemleak.h>
08e552c6 68#include "internal.h"
d1a4c0b3 69#include <net/sock.h>
4bd2c1ee 70#include <net/ip.h>
f35c3a8e 71#include "slab.h"
014bb1de 72#include "swap.h"
8cdea7c0 73
7c0f6ba6 74#include <linux/uaccess.h>
8697d331 75
cc8e970c
KM
76#include <trace/events/vmscan.h>
77
073219e9
TH
78struct cgroup_subsys memory_cgrp_subsys __read_mostly;
79EXPORT_SYMBOL(memory_cgrp_subsys);
68ae564b 80
7d828602
JW
81struct mem_cgroup *root_mem_cgroup __read_mostly;
82
37d5985c
RG
83/* Active memory cgroup to use from an interrupt context */
84DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
c74d40e8 85EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
37d5985c 86
f7e1cb6e 87/* Socket memory accounting disabled? */
0f0cace3 88static bool cgroup_memory_nosocket __ro_after_init;
f7e1cb6e 89
04823c83 90/* Kernel memory accounting disabled? */
17c17367 91static bool cgroup_memory_nokmem __ro_after_init;
04823c83 92
b6c1a8af
YS
93/* BPF memory accounting disabled? */
94static bool cgroup_memory_nobpf __ro_after_init;
95
97b27821
TH
96#ifdef CONFIG_CGROUP_WRITEBACK
97static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
98#endif
99
7941d214
JW
100/* Whether legacy memory+swap accounting is active */
101static bool do_memsw_account(void)
102{
b25806dc 103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
7941d214
JW
104}
105
a0db00fc
KS
106#define THRESHOLDS_EVENTS_TARGET 128
107#define SOFTLIMIT_EVENTS_TARGET 1024
e9f8974f 108
bb4cc1a8
AM
109/*
110 * Cgroups above their limits are maintained in a RB-Tree, independent of
111 * their hierarchy representation
112 */
113
ef8f2327 114struct mem_cgroup_tree_per_node {
bb4cc1a8 115 struct rb_root rb_root;
fa90b2fd 116 struct rb_node *rb_rightmost;
bb4cc1a8
AM
117 spinlock_t lock;
118};
119
bb4cc1a8
AM
120struct mem_cgroup_tree {
121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
122};
123
124static struct mem_cgroup_tree soft_limit_tree __read_mostly;
125
9490ff27
KH
126/* for OOM */
127struct mem_cgroup_eventfd_list {
128 struct list_head list;
129 struct eventfd_ctx *eventfd;
130};
2e72b634 131
79bd9814
TH
132/*
133 * cgroup_event represents events which userspace want to receive.
134 */
3bc942f3 135struct mem_cgroup_event {
79bd9814 136 /*
59b6f873 137 * memcg which the event belongs to.
79bd9814 138 */
59b6f873 139 struct mem_cgroup *memcg;
79bd9814
TH
140 /*
141 * eventfd to signal userspace about the event.
142 */
143 struct eventfd_ctx *eventfd;
144 /*
145 * Each of these stored in a list by the cgroup.
146 */
147 struct list_head list;
fba94807
TH
148 /*
149 * register_event() callback will be used to add new userspace
150 * waiter for changes related to this event. Use eventfd_signal()
151 * on eventfd to send notification to userspace.
152 */
59b6f873 153 int (*register_event)(struct mem_cgroup *memcg,
347c4a87 154 struct eventfd_ctx *eventfd, const char *args);
fba94807
TH
155 /*
156 * unregister_event() callback will be called when userspace closes
157 * the eventfd or on cgroup removing. This callback must be set,
158 * if you want provide notification functionality.
159 */
59b6f873 160 void (*unregister_event)(struct mem_cgroup *memcg,
fba94807 161 struct eventfd_ctx *eventfd);
79bd9814
TH
162 /*
163 * All fields below needed to unregister event when
164 * userspace closes eventfd.
165 */
166 poll_table pt;
167 wait_queue_head_t *wqh;
ac6424b9 168 wait_queue_entry_t wait;
79bd9814
TH
169 struct work_struct remove;
170};
171
c0ff4b85
R
172static void mem_cgroup_threshold(struct mem_cgroup *memcg);
173static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 174
7dc74be0
DN
175/* Stuffs for move charges at task migration. */
176/*
1dfab5ab 177 * Types of charges to be moved.
7dc74be0 178 */
1dfab5ab
JW
179#define MOVE_ANON 0x1U
180#define MOVE_FILE 0x2U
181#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
7dc74be0 182
4ffef5fe
DN
183/* "mc" and its members are protected by cgroup_mutex */
184static struct move_charge_struct {
b1dd693e 185 spinlock_t lock; /* for from, to */
264a0ae1 186 struct mm_struct *mm;
4ffef5fe
DN
187 struct mem_cgroup *from;
188 struct mem_cgroup *to;
1dfab5ab 189 unsigned long flags;
4ffef5fe 190 unsigned long precharge;
854ffa8d 191 unsigned long moved_charge;
483c30b5 192 unsigned long moved_swap;
8033b97c
DN
193 struct task_struct *moving_task; /* a task moving charges */
194 wait_queue_head_t waitq; /* a waitq for other context */
195} mc = {
2bd9bb20 196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
198};
4ffef5fe 199
4e416953 200/*
f4d005af 201 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
4e416953
BS
202 * limit reclaim to prevent infinite loops, if they ever occur.
203 */
a0db00fc 204#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
bb4cc1a8 205#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 206
8c7c6e34 207/* for encoding cft->private value on file */
86ae53e1
GC
208enum res_type {
209 _MEM,
210 _MEMSWAP,
510fc4e1 211 _KMEM,
d55f90bf 212 _TCP,
86ae53e1
GC
213};
214
a0db00fc
KS
215#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
216#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34
KH
217#define MEMFILE_ATTR(val) ((val) & 0xffff)
218
b05706f1
KT
219/*
220 * Iteration constructs for visiting all cgroups (under a tree). If
221 * loops are exited prematurely (break), mem_cgroup_iter_break() must
222 * be used for reference counting.
223 */
224#define for_each_mem_cgroup_tree(iter, root) \
225 for (iter = mem_cgroup_iter(root, NULL, NULL); \
226 iter != NULL; \
227 iter = mem_cgroup_iter(root, iter, NULL))
228
229#define for_each_mem_cgroup(iter) \
230 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
231 iter != NULL; \
232 iter = mem_cgroup_iter(NULL, iter, NULL))
233
a4ebf1b6 234static inline bool task_is_dying(void)
7775face
TH
235{
236 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
237 (current->flags & PF_EXITING);
238}
239
70ddf637
AV
240/* Some nice accessors for the vmpressure. */
241struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
242{
243 if (!memcg)
244 memcg = root_mem_cgroup;
245 return &memcg->vmpressure;
246}
247
9647875b 248struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
70ddf637 249{
9647875b 250 return container_of(vmpr, struct mem_cgroup, vmpressure);
70ddf637
AV
251}
252
1aacbd35
RG
253#define CURRENT_OBJCG_UPDATE_BIT 0
254#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
255
84c07d11 256#ifdef CONFIG_MEMCG_KMEM
0764db9b 257static DEFINE_SPINLOCK(objcg_lock);
bf4f0599 258
4d5c8aed
RG
259bool mem_cgroup_kmem_disabled(void)
260{
261 return cgroup_memory_nokmem;
262}
263
f1286fae
MS
264static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
265 unsigned int nr_pages);
c1a660de 266
bf4f0599
RG
267static void obj_cgroup_release(struct percpu_ref *ref)
268{
269 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
bf4f0599
RG
270 unsigned int nr_bytes;
271 unsigned int nr_pages;
272 unsigned long flags;
273
274 /*
275 * At this point all allocated objects are freed, and
276 * objcg->nr_charged_bytes can't have an arbitrary byte value.
277 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
278 *
279 * The following sequence can lead to it:
280 * 1) CPU0: objcg == stock->cached_objcg
281 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
282 * PAGE_SIZE bytes are charged
283 * 3) CPU1: a process from another memcg is allocating something,
284 * the stock if flushed,
285 * objcg->nr_charged_bytes = PAGE_SIZE - 92
286 * 5) CPU0: we do release this object,
287 * 92 bytes are added to stock->nr_bytes
288 * 6) CPU0: stock is flushed,
289 * 92 bytes are added to objcg->nr_charged_bytes
290 *
291 * In the result, nr_charged_bytes == PAGE_SIZE.
292 * This page will be uncharged in obj_cgroup_release().
293 */
294 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
295 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
296 nr_pages = nr_bytes >> PAGE_SHIFT;
297
bf4f0599 298 if (nr_pages)
f1286fae 299 obj_cgroup_uncharge_pages(objcg, nr_pages);
271dd6b1 300
0764db9b 301 spin_lock_irqsave(&objcg_lock, flags);
bf4f0599 302 list_del(&objcg->list);
0764db9b 303 spin_unlock_irqrestore(&objcg_lock, flags);
bf4f0599
RG
304
305 percpu_ref_exit(ref);
306 kfree_rcu(objcg, rcu);
307}
308
309static struct obj_cgroup *obj_cgroup_alloc(void)
310{
311 struct obj_cgroup *objcg;
312 int ret;
313
314 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
315 if (!objcg)
316 return NULL;
317
318 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
319 GFP_KERNEL);
320 if (ret) {
321 kfree(objcg);
322 return NULL;
323 }
324 INIT_LIST_HEAD(&objcg->list);
325 return objcg;
326}
327
328static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
329 struct mem_cgroup *parent)
330{
331 struct obj_cgroup *objcg, *iter;
332
333 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
334
0764db9b 335 spin_lock_irq(&objcg_lock);
bf4f0599 336
9838354e
MS
337 /* 1) Ready to reparent active objcg. */
338 list_add(&objcg->list, &memcg->objcg_list);
339 /* 2) Reparent active objcg and already reparented objcgs to parent. */
340 list_for_each_entry(iter, &memcg->objcg_list, list)
341 WRITE_ONCE(iter->memcg, parent);
342 /* 3) Move already reparented objcgs to the parent's list */
bf4f0599
RG
343 list_splice(&memcg->objcg_list, &parent->objcg_list);
344
0764db9b 345 spin_unlock_irq(&objcg_lock);
bf4f0599
RG
346
347 percpu_ref_kill(&objcg->refcnt);
348}
349
d7f25f8a
GC
350/*
351 * A lot of the calls to the cache allocation functions are expected to be
272911a4 352 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
d7f25f8a
GC
353 * conditional to this static branch, we'll have to allow modules that does
354 * kmem_cache_alloc and the such to see this symbol as well
355 */
f7a449f7
RG
356DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
357EXPORT_SYMBOL(memcg_kmem_online_key);
b6c1a8af
YS
358
359DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
360EXPORT_SYMBOL(memcg_bpf_enabled_key);
0a432dcb 361#endif
17cc4dfe 362
ad7fa852 363/**
75376c6f
MWO
364 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
365 * @folio: folio of interest
ad7fa852
TH
366 *
367 * If memcg is bound to the default hierarchy, css of the memcg associated
75376c6f 368 * with @folio is returned. The returned css remains associated with @folio
ad7fa852
TH
369 * until it is released.
370 *
371 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
372 * is returned.
ad7fa852 373 */
75376c6f 374struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
ad7fa852 375{
75376c6f 376 struct mem_cgroup *memcg = folio_memcg(folio);
ad7fa852 377
9e10a130 378 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
ad7fa852
TH
379 memcg = root_mem_cgroup;
380
ad7fa852
TH
381 return &memcg->css;
382}
383
2fc04524
VD
384/**
385 * page_cgroup_ino - return inode number of the memcg a page is charged to
386 * @page: the page
387 *
388 * Look up the closest online ancestor of the memory cgroup @page is charged to
389 * and return its inode number or 0 if @page is not charged to any cgroup. It
390 * is safe to call this function without holding a reference to @page.
391 *
392 * Note, this function is inherently racy, because there is nothing to prevent
393 * the cgroup inode from getting torn down and potentially reallocated a moment
394 * after page_cgroup_ino() returns, so it only should be used by callers that
395 * do not care (such as procfs interfaces).
396 */
397ino_t page_cgroup_ino(struct page *page)
398{
399 struct mem_cgroup *memcg;
400 unsigned long ino = 0;
401
402 rcu_read_lock();
ec342603
YA
403 /* page_folio() is racy here, but the entire function is racy anyway */
404 memcg = folio_memcg_check(page_folio(page));
286e04b8 405
2fc04524
VD
406 while (memcg && !(memcg->css.flags & CSS_ONLINE))
407 memcg = parent_mem_cgroup(memcg);
408 if (memcg)
409 ino = cgroup_ino(memcg->css.cgroup);
410 rcu_read_unlock();
411 return ino;
412}
413
ef8f2327
MG
414static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
415 struct mem_cgroup_tree_per_node *mctz,
3e32cb2e 416 unsigned long new_usage_in_excess)
bb4cc1a8
AM
417{
418 struct rb_node **p = &mctz->rb_root.rb_node;
419 struct rb_node *parent = NULL;
ef8f2327 420 struct mem_cgroup_per_node *mz_node;
fa90b2fd 421 bool rightmost = true;
bb4cc1a8
AM
422
423 if (mz->on_tree)
424 return;
425
426 mz->usage_in_excess = new_usage_in_excess;
427 if (!mz->usage_in_excess)
428 return;
429 while (*p) {
430 parent = *p;
ef8f2327 431 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
bb4cc1a8 432 tree_node);
fa90b2fd 433 if (mz->usage_in_excess < mz_node->usage_in_excess) {
bb4cc1a8 434 p = &(*p)->rb_left;
fa90b2fd 435 rightmost = false;
378876b0 436 } else {
bb4cc1a8 437 p = &(*p)->rb_right;
378876b0 438 }
bb4cc1a8 439 }
fa90b2fd
DB
440
441 if (rightmost)
442 mctz->rb_rightmost = &mz->tree_node;
443
bb4cc1a8
AM
444 rb_link_node(&mz->tree_node, parent, p);
445 rb_insert_color(&mz->tree_node, &mctz->rb_root);
446 mz->on_tree = true;
447}
448
ef8f2327
MG
449static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
450 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8
AM
451{
452 if (!mz->on_tree)
453 return;
fa90b2fd
DB
454
455 if (&mz->tree_node == mctz->rb_rightmost)
456 mctz->rb_rightmost = rb_prev(&mz->tree_node);
457
bb4cc1a8
AM
458 rb_erase(&mz->tree_node, &mctz->rb_root);
459 mz->on_tree = false;
460}
461
ef8f2327
MG
462static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
463 struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 464{
0a31bc97
JW
465 unsigned long flags;
466
467 spin_lock_irqsave(&mctz->lock, flags);
cf2c8127 468 __mem_cgroup_remove_exceeded(mz, mctz);
0a31bc97 469 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
470}
471
3e32cb2e
JW
472static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
473{
474 unsigned long nr_pages = page_counter_read(&memcg->memory);
4db0c3c2 475 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
3e32cb2e
JW
476 unsigned long excess = 0;
477
478 if (nr_pages > soft_limit)
479 excess = nr_pages - soft_limit;
480
481 return excess;
482}
bb4cc1a8 483
658b69c9 484static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
bb4cc1a8 485{
3e32cb2e 486 unsigned long excess;
ef8f2327
MG
487 struct mem_cgroup_per_node *mz;
488 struct mem_cgroup_tree_per_node *mctz;
bb4cc1a8 489
e4dde56c 490 if (lru_gen_enabled()) {
36c7b4db 491 if (soft_limit_excess(memcg))
5c7e7a0d 492 lru_gen_soft_reclaim(memcg, nid);
e4dde56c
YZ
493 return;
494 }
495
2ab082ba 496 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
497 if (!mctz)
498 return;
bb4cc1a8
AM
499 /*
500 * Necessary to update all ancestors when hierarchy is used.
501 * because their event counter is not touched.
502 */
503 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
658b69c9 504 mz = memcg->nodeinfo[nid];
3e32cb2e 505 excess = soft_limit_excess(memcg);
bb4cc1a8
AM
506 /*
507 * We have to update the tree if mz is on RB-tree or
508 * mem is over its softlimit.
509 */
510 if (excess || mz->on_tree) {
0a31bc97
JW
511 unsigned long flags;
512
513 spin_lock_irqsave(&mctz->lock, flags);
bb4cc1a8
AM
514 /* if on-tree, remove it */
515 if (mz->on_tree)
cf2c8127 516 __mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
517 /*
518 * Insert again. mz->usage_in_excess will be updated.
519 * If excess is 0, no tree ops.
520 */
cf2c8127 521 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 522 spin_unlock_irqrestore(&mctz->lock, flags);
bb4cc1a8
AM
523 }
524 }
525}
526
527static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
528{
ef8f2327
MG
529 struct mem_cgroup_tree_per_node *mctz;
530 struct mem_cgroup_per_node *mz;
531 int nid;
bb4cc1a8 532
e231875b 533 for_each_node(nid) {
a3747b53 534 mz = memcg->nodeinfo[nid];
2ab082ba 535 mctz = soft_limit_tree.rb_tree_per_node[nid];
bfc7228b
LD
536 if (mctz)
537 mem_cgroup_remove_exceeded(mz, mctz);
bb4cc1a8
AM
538 }
539}
540
ef8f2327
MG
541static struct mem_cgroup_per_node *
542__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 543{
ef8f2327 544 struct mem_cgroup_per_node *mz;
bb4cc1a8
AM
545
546retry:
547 mz = NULL;
fa90b2fd 548 if (!mctz->rb_rightmost)
bb4cc1a8
AM
549 goto done; /* Nothing to reclaim from */
550
fa90b2fd
DB
551 mz = rb_entry(mctz->rb_rightmost,
552 struct mem_cgroup_per_node, tree_node);
bb4cc1a8
AM
553 /*
554 * Remove the node now but someone else can add it back,
555 * we will to add it back at the end of reclaim to its correct
556 * position in the tree.
557 */
cf2c8127 558 __mem_cgroup_remove_exceeded(mz, mctz);
3e32cb2e 559 if (!soft_limit_excess(mz->memcg) ||
8965aa28 560 !css_tryget(&mz->memcg->css))
bb4cc1a8
AM
561 goto retry;
562done:
563 return mz;
564}
565
ef8f2327
MG
566static struct mem_cgroup_per_node *
567mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
bb4cc1a8 568{
ef8f2327 569 struct mem_cgroup_per_node *mz;
bb4cc1a8 570
0a31bc97 571 spin_lock_irq(&mctz->lock);
bb4cc1a8 572 mz = __mem_cgroup_largest_soft_limit_node(mctz);
0a31bc97 573 spin_unlock_irq(&mctz->lock);
bb4cc1a8
AM
574 return mz;
575}
576
d396def5
SB
577/* Subset of vm_event_item to report for memcg event stats */
578static const unsigned int memcg_vm_event_stat[] = {
8278f1c7
SB
579 PGPGIN,
580 PGPGOUT,
d396def5
SB
581 PGSCAN_KSWAPD,
582 PGSCAN_DIRECT,
57e9cc50 583 PGSCAN_KHUGEPAGED,
d396def5
SB
584 PGSTEAL_KSWAPD,
585 PGSTEAL_DIRECT,
57e9cc50 586 PGSTEAL_KHUGEPAGED,
d396def5
SB
587 PGFAULT,
588 PGMAJFAULT,
589 PGREFILL,
590 PGACTIVATE,
591 PGDEACTIVATE,
592 PGLAZYFREE,
593 PGLAZYFREED,
594#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
595 ZSWPIN,
596 ZSWPOUT,
e0bf1dc8 597 ZSWPWB,
d396def5
SB
598#endif
599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
600 THP_FAULT_ALLOC,
601 THP_COLLAPSE_ALLOC,
811244a5
XH
602 THP_SWPOUT,
603 THP_SWPOUT_FALLBACK,
d396def5
SB
604#endif
605};
606
8278f1c7
SB
607#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
608static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
609
610static void init_memcg_events(void)
611{
612 int i;
613
614 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
615 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
616}
617
618static inline int memcg_events_index(enum vm_event_item idx)
619{
620 return mem_cgroup_events_index[idx] - 1;
621}
622
410f8e82 623struct memcg_vmstats_percpu {
9cee7e8e
YA
624 /* Stats updates since the last flush */
625 unsigned int stats_updates;
626
627 /* Cached pointers for fast iteration in memcg_rstat_updated() */
628 struct memcg_vmstats_percpu *parent;
629 struct memcg_vmstats *vmstats;
630
631 /* The above should fit a single cacheline for memcg_rstat_updated() */
632
410f8e82
SB
633 /* Local (CPU and cgroup) page state & events */
634 long state[MEMCG_NR_STAT];
8278f1c7 635 unsigned long events[NR_MEMCG_EVENTS];
410f8e82
SB
636
637 /* Delta calculation for lockless upward propagation */
638 long state_prev[MEMCG_NR_STAT];
8278f1c7 639 unsigned long events_prev[NR_MEMCG_EVENTS];
410f8e82
SB
640
641 /* Cgroup1: threshold notifications & softlimit tree updates */
642 unsigned long nr_page_events;
643 unsigned long targets[MEM_CGROUP_NTARGETS];
9cee7e8e 644} ____cacheline_aligned;
410f8e82
SB
645
646struct memcg_vmstats {
647 /* Aggregated (CPU and subtree) page state & events */
648 long state[MEMCG_NR_STAT];
8278f1c7 649 unsigned long events[NR_MEMCG_EVENTS];
410f8e82 650
f82e6bf9
YA
651 /* Non-hierarchical (CPU aggregated) page state & events */
652 long state_local[MEMCG_NR_STAT];
653 unsigned long events_local[NR_MEMCG_EVENTS];
654
410f8e82
SB
655 /* Pending child counts during tree propagation */
656 long state_pending[MEMCG_NR_STAT];
8278f1c7 657 unsigned long events_pending[NR_MEMCG_EVENTS];
8d59d221
YA
658
659 /* Stats updates since the last flush */
660 atomic64_t stats_updates;
410f8e82
SB
661};
662
11192d9c
SB
663/*
664 * memcg and lruvec stats flushing
665 *
666 * Many codepaths leading to stats update or read are performance sensitive and
667 * adding stats flushing in such codepaths is not desirable. So, to optimize the
668 * flushing the kernel does:
669 *
670 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
671 * rstat update tree grow unbounded.
672 *
673 * 2) Flush the stats synchronously on reader side only when there are more than
674 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
675 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
676 * only for 2 seconds due to (1).
677 */
678static void flush_memcg_stats_dwork(struct work_struct *w);
679static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
508bed88 680static u64 flush_last_time;
9b301615
SB
681
682#define FLUSH_TIME (2UL*HZ)
11192d9c 683
be3e67b5
SAS
684/*
685 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
686 * not rely on this as part of an acquired spinlock_t lock. These functions are
687 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
688 * is sufficient.
689 */
690static void memcg_stats_lock(void)
691{
e575d401
TG
692 preempt_disable_nested();
693 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
694}
695
696static void __memcg_stats_lock(void)
697{
e575d401 698 preempt_disable_nested();
be3e67b5
SAS
699}
700
701static void memcg_stats_unlock(void)
702{
e575d401 703 preempt_enable_nested();
be3e67b5
SAS
704}
705
8d59d221 706
9cee7e8e 707static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
8d59d221 708{
9cee7e8e 709 return atomic64_read(&vmstats->stats_updates) >
8d59d221
YA
710 MEMCG_CHARGE_BATCH * num_online_cpus();
711}
712
5b3be698 713static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
11192d9c 714{
9cee7e8e 715 struct memcg_vmstats_percpu *statc;
8d59d221 716 int cpu = smp_processor_id();
5b3be698 717
f9d911ca
YA
718 if (!val)
719 return;
720
8d59d221 721 cgroup_rstat_updated(memcg->css.cgroup, cpu);
9cee7e8e
YA
722 statc = this_cpu_ptr(memcg->vmstats_percpu);
723 for (; statc; statc = statc->parent) {
724 statc->stats_updates += abs(val);
725 if (statc->stats_updates < MEMCG_CHARGE_BATCH)
8d59d221 726 continue;
5b3be698 727
873f64b7 728 /*
8d59d221
YA
729 * If @memcg is already flush-able, increasing stats_updates is
730 * redundant. Avoid the overhead of the atomic update.
873f64b7 731 */
9cee7e8e
YA
732 if (!memcg_vmstats_needs_flush(statc->vmstats))
733 atomic64_add(statc->stats_updates,
734 &statc->vmstats->stats_updates);
735 statc->stats_updates = 0;
5b3be698 736 }
11192d9c
SB
737}
738
7d7ef0a4 739static void do_flush_stats(struct mem_cgroup *memcg)
11192d9c 740{
7d7ef0a4
YA
741 if (mem_cgroup_is_root(memcg))
742 WRITE_ONCE(flush_last_time, jiffies_64);
9fad9aee 743
7d7ef0a4 744 cgroup_rstat_flush(memcg->css.cgroup);
11192d9c
SB
745}
746
7d7ef0a4
YA
747/*
748 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
749 * @memcg: root of the subtree to flush
750 *
751 * Flushing is serialized by the underlying global rstat lock. There is also a
752 * minimum amount of work to be done even if there are no stat updates to flush.
753 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
754 * avoids unnecessary work and contention on the underlying lock.
755 */
756void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
11192d9c 757{
7d7ef0a4
YA
758 if (mem_cgroup_disabled())
759 return;
760
761 if (!memcg)
762 memcg = root_mem_cgroup;
763
9cee7e8e 764 if (memcg_vmstats_needs_flush(memcg->vmstats))
7d7ef0a4 765 do_flush_stats(memcg);
9fad9aee
YA
766}
767
7d7ef0a4 768void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
9b301615 769{
508bed88
YA
770 /* Only flush if the periodic flusher is one full cycle late */
771 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
7d7ef0a4 772 mem_cgroup_flush_stats(memcg);
9b301615
SB
773}
774
11192d9c
SB
775static void flush_memcg_stats_dwork(struct work_struct *w)
776{
9fad9aee 777 /*
9cee7e8e 778 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
8d59d221 779 * in latency-sensitive paths is as cheap as possible.
9fad9aee 780 */
7d7ef0a4 781 do_flush_stats(root_mem_cgroup);
9b301615 782 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
11192d9c
SB
783}
784
410f8e82
SB
785unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
786{
787 long x = READ_ONCE(memcg->vmstats->state[idx]);
788#ifdef CONFIG_SMP
789 if (x < 0)
790 x = 0;
791#endif
792 return x;
793}
794
7bd5bc3c
YA
795static int memcg_page_state_unit(int item);
796
797/*
798 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
799 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
800 */
801static int memcg_state_val_in_pages(int idx, int val)
802{
803 int unit = memcg_page_state_unit(idx);
804
805 if (!val || unit == PAGE_SIZE)
806 return val;
807 else
808 return max(val * unit / PAGE_SIZE, 1UL);
809}
810
db9adbcb
JW
811/**
812 * __mod_memcg_state - update cgroup memory statistics
813 * @memcg: the memory cgroup
814 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
815 * @val: delta to add to the counter, can be negative
816 */
817void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
818{
db9adbcb
JW
819 if (mem_cgroup_disabled())
820 return;
821
2d146aa3 822 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
7bd5bc3c 823 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
db9adbcb
JW
824}
825
2d146aa3 826/* idx can be of type enum memcg_stat_item or node_stat_item. */
a18e6e6e
JW
827static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
828{
f82e6bf9 829 long x = READ_ONCE(memcg->vmstats->state_local[idx]);
a18e6e6e 830
a18e6e6e
JW
831#ifdef CONFIG_SMP
832 if (x < 0)
833 x = 0;
834#endif
835 return x;
836}
837
eedc4e5a
RG
838void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
839 int val)
db9adbcb
JW
840{
841 struct mem_cgroup_per_node *pn;
42a30035 842 struct mem_cgroup *memcg;
db9adbcb 843
db9adbcb 844 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
42a30035 845 memcg = pn->memcg;
db9adbcb 846
be3e67b5 847 /*
be16dd76 848 * The caller from rmap relies on disabled preemption because they never
be3e67b5
SAS
849 * update their counter from in-interrupt context. For these two
850 * counters we check that the update is never performed from an
851 * interrupt context while other caller need to have disabled interrupt.
852 */
853 __memcg_stats_lock();
e575d401 854 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
be3e67b5
SAS
855 switch (idx) {
856 case NR_ANON_MAPPED:
857 case NR_FILE_MAPPED:
858 case NR_ANON_THPS:
859 case NR_SHMEM_PMDMAPPED:
860 case NR_FILE_PMDMAPPED:
861 WARN_ON_ONCE(!in_task());
862 break;
863 default:
e575d401 864 VM_WARN_ON_IRQS_ENABLED();
be3e67b5
SAS
865 }
866 }
867
db9adbcb 868 /* Update memcg */
11192d9c 869 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
db9adbcb 870
b4c46484 871 /* Update lruvec */
7e1c0d6f 872 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
11192d9c 873
7bd5bc3c 874 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
be3e67b5 875 memcg_stats_unlock();
db9adbcb
JW
876}
877
eedc4e5a
RG
878/**
879 * __mod_lruvec_state - update lruvec memory statistics
880 * @lruvec: the lruvec
881 * @idx: the stat item
882 * @val: delta to add to the counter, can be negative
883 *
884 * The lruvec is the intersection of the NUMA node and a cgroup. This
885 * function updates the all three counters that are affected by a
886 * change of state at this level: per-node, per-cgroup, per-lruvec.
887 */
888void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
889 int val)
890{
891 /* Update node */
892 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
893
894 /* Update memcg and lruvec */
895 if (!mem_cgroup_disabled())
896 __mod_memcg_lruvec_state(lruvec, idx, val);
897}
898
c701123b 899void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
c47d5032
SB
900 int val)
901{
b4e0b68f 902 struct mem_cgroup *memcg;
c701123b 903 pg_data_t *pgdat = folio_pgdat(folio);
c47d5032
SB
904 struct lruvec *lruvec;
905
b4e0b68f 906 rcu_read_lock();
c701123b 907 memcg = folio_memcg(folio);
c47d5032 908 /* Untracked pages have no memcg, no lruvec. Update only the node */
d635a69d 909 if (!memcg) {
b4e0b68f 910 rcu_read_unlock();
c47d5032
SB
911 __mod_node_page_state(pgdat, idx, val);
912 return;
913 }
914
d635a69d 915 lruvec = mem_cgroup_lruvec(memcg, pgdat);
c47d5032 916 __mod_lruvec_state(lruvec, idx, val);
b4e0b68f 917 rcu_read_unlock();
c47d5032 918}
c701123b 919EXPORT_SYMBOL(__lruvec_stat_mod_folio);
c47d5032 920
da3ceeff 921void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
ec9f0238 922{
4f103c63 923 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
ec9f0238
RG
924 struct mem_cgroup *memcg;
925 struct lruvec *lruvec;
926
927 rcu_read_lock();
fc4db90f 928 memcg = mem_cgroup_from_slab_obj(p);
ec9f0238 929
8faeb1ff
MS
930 /*
931 * Untracked pages have no memcg, no lruvec. Update only the
932 * node. If we reparent the slab objects to the root memcg,
933 * when we free the slab object, we need to update the per-memcg
934 * vmstats to keep it correct for the root memcg.
935 */
936 if (!memcg) {
ec9f0238
RG
937 __mod_node_page_state(pgdat, idx, val);
938 } else {
867e5e1d 939 lruvec = mem_cgroup_lruvec(memcg, pgdat);
ec9f0238
RG
940 __mod_lruvec_state(lruvec, idx, val);
941 }
942 rcu_read_unlock();
943}
944
db9adbcb
JW
945/**
946 * __count_memcg_events - account VM events in a cgroup
947 * @memcg: the memory cgroup
948 * @idx: the event item
f0953a1b 949 * @count: the number of events that occurred
db9adbcb
JW
950 */
951void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
952 unsigned long count)
953{
8278f1c7
SB
954 int index = memcg_events_index(idx);
955
956 if (mem_cgroup_disabled() || index < 0)
db9adbcb
JW
957 return;
958
be3e67b5 959 memcg_stats_lock();
8278f1c7 960 __this_cpu_add(memcg->vmstats_percpu->events[index], count);
5b3be698 961 memcg_rstat_updated(memcg, count);
be3e67b5 962 memcg_stats_unlock();
db9adbcb
JW
963}
964
42a30035 965static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
e9f8974f 966{
8278f1c7
SB
967 int index = memcg_events_index(event);
968
969 if (index < 0)
970 return 0;
971 return READ_ONCE(memcg->vmstats->events[index]);
e9f8974f
JW
972}
973
42a30035
JW
974static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
975{
8278f1c7
SB
976 int index = memcg_events_index(event);
977
978 if (index < 0)
979 return 0;
815744d7 980
f82e6bf9 981 return READ_ONCE(memcg->vmstats->events_local[index]);
42a30035
JW
982}
983
c0ff4b85 984static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
3fba69a5 985 int nr_pages)
d52aa412 986{
e401f176
KH
987 /* pagein of a big page is an event. So, ignore page size */
988 if (nr_pages > 0)
c9019e9b 989 __count_memcg_events(memcg, PGPGIN, 1);
3751d604 990 else {
c9019e9b 991 __count_memcg_events(memcg, PGPGOUT, 1);
3751d604
KH
992 nr_pages = -nr_pages; /* for event */
993 }
e401f176 994
871789d4 995 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
6d12e2d8
KH
996}
997
f53d7ce3
JW
998static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
999 enum mem_cgroup_events_target target)
7a159cc9
JW
1000{
1001 unsigned long val, next;
1002
871789d4
CD
1003 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1004 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
7a159cc9 1005 /* from time_after() in jiffies.h */
6a1a8b80 1006 if ((long)(next - val) < 0) {
f53d7ce3
JW
1007 switch (target) {
1008 case MEM_CGROUP_TARGET_THRESH:
1009 next = val + THRESHOLDS_EVENTS_TARGET;
1010 break;
bb4cc1a8
AM
1011 case MEM_CGROUP_TARGET_SOFTLIMIT:
1012 next = val + SOFTLIMIT_EVENTS_TARGET;
1013 break;
f53d7ce3
JW
1014 default:
1015 break;
1016 }
871789d4 1017 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
f53d7ce3 1018 return true;
7a159cc9 1019 }
f53d7ce3 1020 return false;
d2265e6f
KH
1021}
1022
1023/*
1024 * Check events in order.
1025 *
1026 */
8e88bd2d 1027static void memcg_check_events(struct mem_cgroup *memcg, int nid)
d2265e6f 1028{
2343e88d
SAS
1029 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1030 return;
1031
d2265e6f 1032 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
1033 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1034 MEM_CGROUP_TARGET_THRESH))) {
bb4cc1a8 1035 bool do_softlimit;
f53d7ce3 1036
bb4cc1a8
AM
1037 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1038 MEM_CGROUP_TARGET_SOFTLIMIT);
c0ff4b85 1039 mem_cgroup_threshold(memcg);
bb4cc1a8 1040 if (unlikely(do_softlimit))
8e88bd2d 1041 mem_cgroup_update_tree(memcg, nid);
0a31bc97 1042 }
d2265e6f
KH
1043}
1044
cf475ad2 1045struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 1046{
31a78f23
BS
1047 /*
1048 * mm_update_next_owner() may clear mm->owner to NULL
1049 * if it races with swapoff, page migration, etc.
1050 * So this can be called with p == NULL.
1051 */
1052 if (unlikely(!p))
1053 return NULL;
1054
073219e9 1055 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
78fb7466 1056}
33398cf2 1057EXPORT_SYMBOL(mem_cgroup_from_task);
78fb7466 1058
04f94e3f
DS
1059static __always_inline struct mem_cgroup *active_memcg(void)
1060{
55a68c82 1061 if (!in_task())
04f94e3f
DS
1062 return this_cpu_read(int_active_memcg);
1063 else
1064 return current->active_memcg;
1065}
1066
d46eb14b
SB
1067/**
1068 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1069 * @mm: mm from which memcg should be extracted. It can be NULL.
1070 *
04f94e3f
DS
1071 * Obtain a reference on mm->memcg and returns it if successful. If mm
1072 * is NULL, then the memcg is chosen as follows:
1073 * 1) The active memcg, if set.
1074 * 2) current->mm->memcg, if available
1075 * 3) root memcg
1076 * If mem_cgroup is disabled, NULL is returned.
d46eb14b
SB
1077 */
1078struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1079{
d46eb14b
SB
1080 struct mem_cgroup *memcg;
1081
1082 if (mem_cgroup_disabled())
1083 return NULL;
0b7f569e 1084
2884b6b7
MS
1085 /*
1086 * Page cache insertions can happen without an
1087 * actual mm context, e.g. during disk probing
1088 * on boot, loopback IO, acct() writes etc.
1089 *
1090 * No need to css_get on root memcg as the reference
1091 * counting is disabled on the root level in the
1092 * cgroup core. See CSS_NO_REF.
1093 */
04f94e3f
DS
1094 if (unlikely(!mm)) {
1095 memcg = active_memcg();
1096 if (unlikely(memcg)) {
1097 /* remote memcg must hold a ref */
1098 css_get(&memcg->css);
1099 return memcg;
1100 }
1101 mm = current->mm;
1102 if (unlikely(!mm))
1103 return root_mem_cgroup;
1104 }
2884b6b7 1105
54595fe2
KH
1106 rcu_read_lock();
1107 do {
2884b6b7
MS
1108 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1109 if (unlikely(!memcg))
df381975 1110 memcg = root_mem_cgroup;
00d484f3 1111 } while (!css_tryget(&memcg->css));
54595fe2 1112 rcu_read_unlock();
c0ff4b85 1113 return memcg;
54595fe2 1114}
d46eb14b
SB
1115EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1116
4b569387
NP
1117/**
1118 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1119 */
1120struct mem_cgroup *get_mem_cgroup_from_current(void)
1121{
1122 struct mem_cgroup *memcg;
1123
1124 if (mem_cgroup_disabled())
1125 return NULL;
1126
1127again:
1128 rcu_read_lock();
1129 memcg = mem_cgroup_from_task(current);
1130 if (!css_tryget(&memcg->css)) {
1131 rcu_read_unlock();
1132 goto again;
1133 }
1134 rcu_read_unlock();
1135 return memcg;
1136}
1137
5660048c
JW
1138/**
1139 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1140 * @root: hierarchy root
1141 * @prev: previously returned memcg, NULL on first invocation
1142 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1143 *
1144 * Returns references to children of the hierarchy below @root, or
1145 * @root itself, or %NULL after a full round-trip.
1146 *
1147 * Caller must pass the return value in @prev on subsequent
1148 * invocations for reference counting, or use mem_cgroup_iter_break()
1149 * to cancel a hierarchy walk before the round-trip is complete.
1150 *
05bdc520
ML
1151 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1152 * in the hierarchy among all concurrent reclaimers operating on the
1153 * same node.
5660048c 1154 */
694fbc0f 1155struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
5660048c 1156 struct mem_cgroup *prev,
694fbc0f 1157 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1158{
3f649ab7 1159 struct mem_cgroup_reclaim_iter *iter;
5ac8fb31 1160 struct cgroup_subsys_state *css = NULL;
9f3a0d09 1161 struct mem_cgroup *memcg = NULL;
5ac8fb31 1162 struct mem_cgroup *pos = NULL;
711d3d2c 1163
694fbc0f
AM
1164 if (mem_cgroup_disabled())
1165 return NULL;
5660048c 1166
9f3a0d09
JW
1167 if (!root)
1168 root = root_mem_cgroup;
7d74b06f 1169
542f85f9 1170 rcu_read_lock();
5f578161 1171
5ac8fb31 1172 if (reclaim) {
ef8f2327 1173 struct mem_cgroup_per_node *mz;
5ac8fb31 1174
a3747b53 1175 mz = root->nodeinfo[reclaim->pgdat->node_id];
9da83f3f 1176 iter = &mz->iter;
5ac8fb31 1177
a9320aae
WY
1178 /*
1179 * On start, join the current reclaim iteration cycle.
1180 * Exit when a concurrent walker completes it.
1181 */
1182 if (!prev)
1183 reclaim->generation = iter->generation;
1184 else if (reclaim->generation != iter->generation)
5ac8fb31
JW
1185 goto out_unlock;
1186
6df38689 1187 while (1) {
4db0c3c2 1188 pos = READ_ONCE(iter->position);
6df38689
VD
1189 if (!pos || css_tryget(&pos->css))
1190 break;
5ac8fb31 1191 /*
6df38689
VD
1192 * css reference reached zero, so iter->position will
1193 * be cleared by ->css_released. However, we should not
1194 * rely on this happening soon, because ->css_released
1195 * is called from a work queue, and by busy-waiting we
1196 * might block it. So we clear iter->position right
1197 * away.
5ac8fb31 1198 */
6df38689
VD
1199 (void)cmpxchg(&iter->position, pos, NULL);
1200 }
89d8330c
WY
1201 } else if (prev) {
1202 pos = prev;
5ac8fb31
JW
1203 }
1204
1205 if (pos)
1206 css = &pos->css;
1207
1208 for (;;) {
1209 css = css_next_descendant_pre(css, &root->css);
1210 if (!css) {
1211 /*
1212 * Reclaimers share the hierarchy walk, and a
1213 * new one might jump in right at the end of
1214 * the hierarchy - make sure they see at least
1215 * one group and restart from the beginning.
1216 */
1217 if (!prev)
1218 continue;
1219 break;
527a5ec9 1220 }
7d74b06f 1221
5ac8fb31
JW
1222 /*
1223 * Verify the css and acquire a reference. The root
1224 * is provided by the caller, so we know it's alive
1225 * and kicking, and don't take an extra reference.
1226 */
41555dad
WY
1227 if (css == &root->css || css_tryget(css)) {
1228 memcg = mem_cgroup_from_css(css);
0b8f73e1 1229 break;
41555dad 1230 }
9f3a0d09 1231 }
5ac8fb31
JW
1232
1233 if (reclaim) {
5ac8fb31 1234 /*
6df38689
VD
1235 * The position could have already been updated by a competing
1236 * thread, so check that the value hasn't changed since we read
1237 * it to avoid reclaiming from the same cgroup twice.
5ac8fb31 1238 */
6df38689
VD
1239 (void)cmpxchg(&iter->position, pos, memcg);
1240
5ac8fb31
JW
1241 if (pos)
1242 css_put(&pos->css);
1243
1244 if (!memcg)
1245 iter->generation++;
9f3a0d09 1246 }
5ac8fb31 1247
542f85f9
MH
1248out_unlock:
1249 rcu_read_unlock();
c40046f3
MH
1250 if (prev && prev != root)
1251 css_put(&prev->css);
1252
9f3a0d09 1253 return memcg;
14067bb3 1254}
7d74b06f 1255
5660048c
JW
1256/**
1257 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1258 * @root: hierarchy root
1259 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1260 */
1261void mem_cgroup_iter_break(struct mem_cgroup *root,
1262 struct mem_cgroup *prev)
9f3a0d09
JW
1263{
1264 if (!root)
1265 root = root_mem_cgroup;
1266 if (prev && prev != root)
1267 css_put(&prev->css);
1268}
7d74b06f 1269
54a83d6b
MC
1270static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1271 struct mem_cgroup *dead_memcg)
6df38689 1272{
6df38689 1273 struct mem_cgroup_reclaim_iter *iter;
ef8f2327
MG
1274 struct mem_cgroup_per_node *mz;
1275 int nid;
6df38689 1276
54a83d6b 1277 for_each_node(nid) {
a3747b53 1278 mz = from->nodeinfo[nid];
9da83f3f
YS
1279 iter = &mz->iter;
1280 cmpxchg(&iter->position, dead_memcg, NULL);
6df38689
VD
1281 }
1282}
1283
54a83d6b
MC
1284static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1285{
1286 struct mem_cgroup *memcg = dead_memcg;
1287 struct mem_cgroup *last;
1288
1289 do {
1290 __invalidate_reclaim_iterators(memcg, dead_memcg);
1291 last = memcg;
1292 } while ((memcg = parent_mem_cgroup(memcg)));
1293
1294 /*
b8dd3ee9 1295 * When cgroup1 non-hierarchy mode is used,
54a83d6b
MC
1296 * parent_mem_cgroup() does not walk all the way up to the
1297 * cgroup root (root_mem_cgroup). So we have to handle
1298 * dead_memcg from cgroup root separately.
1299 */
7848ed62 1300 if (!mem_cgroup_is_root(last))
54a83d6b
MC
1301 __invalidate_reclaim_iterators(root_mem_cgroup,
1302 dead_memcg);
1303}
1304
7c5f64f8
VD
1305/**
1306 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1307 * @memcg: hierarchy root
1308 * @fn: function to call for each task
1309 * @arg: argument passed to @fn
1310 *
1311 * This function iterates over tasks attached to @memcg or to any of its
1312 * descendants and calls @fn for each task. If @fn returns a non-zero
025b7799
Z
1313 * value, the function breaks the iteration loop. Otherwise, it will iterate
1314 * over all tasks and return 0.
7c5f64f8
VD
1315 *
1316 * This function must not be called for the root memory cgroup.
1317 */
025b7799
Z
1318void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1319 int (*fn)(struct task_struct *, void *), void *arg)
7c5f64f8
VD
1320{
1321 struct mem_cgroup *iter;
1322 int ret = 0;
1323
7848ed62 1324 BUG_ON(mem_cgroup_is_root(memcg));
7c5f64f8
VD
1325
1326 for_each_mem_cgroup_tree(iter, memcg) {
1327 struct css_task_iter it;
1328 struct task_struct *task;
1329
f168a9a5 1330 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
7c5f64f8
VD
1331 while (!ret && (task = css_task_iter_next(&it)))
1332 ret = fn(task, arg);
1333 css_task_iter_end(&it);
1334 if (ret) {
1335 mem_cgroup_iter_break(memcg, iter);
1336 break;
1337 }
1338 }
7c5f64f8
VD
1339}
1340
6168d0da 1341#ifdef CONFIG_DEBUG_VM
e809c3fe 1342void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
6168d0da
AS
1343{
1344 struct mem_cgroup *memcg;
1345
1346 if (mem_cgroup_disabled())
1347 return;
1348
e809c3fe 1349 memcg = folio_memcg(folio);
6168d0da
AS
1350
1351 if (!memcg)
7848ed62 1352 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
6168d0da 1353 else
e809c3fe 1354 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
6168d0da
AS
1355}
1356#endif
1357
6168d0da 1358/**
e809c3fe
MWO
1359 * folio_lruvec_lock - Lock the lruvec for a folio.
1360 * @folio: Pointer to the folio.
6168d0da 1361 *
d7e3aba5 1362 * These functions are safe to use under any of the following conditions:
e809c3fe
MWO
1363 * - folio locked
1364 * - folio_test_lru false
1365 * - folio_memcg_lock()
1366 * - folio frozen (refcount of 0)
1367 *
1368 * Return: The lruvec this folio is on with its lock held.
6168d0da 1369 */
e809c3fe 1370struct lruvec *folio_lruvec_lock(struct folio *folio)
6168d0da 1371{
e809c3fe 1372 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1373
6168d0da 1374 spin_lock(&lruvec->lru_lock);
e809c3fe 1375 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1376
1377 return lruvec;
1378}
1379
e809c3fe
MWO
1380/**
1381 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1382 * @folio: Pointer to the folio.
1383 *
1384 * These functions are safe to use under any of the following conditions:
1385 * - folio locked
1386 * - folio_test_lru false
1387 * - folio_memcg_lock()
1388 * - folio frozen (refcount of 0)
1389 *
1390 * Return: The lruvec this folio is on with its lock held and interrupts
1391 * disabled.
1392 */
1393struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
6168d0da 1394{
e809c3fe 1395 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1396
6168d0da 1397 spin_lock_irq(&lruvec->lru_lock);
e809c3fe 1398 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1399
1400 return lruvec;
1401}
1402
e809c3fe
MWO
1403/**
1404 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1405 * @folio: Pointer to the folio.
1406 * @flags: Pointer to irqsave flags.
1407 *
1408 * These functions are safe to use under any of the following conditions:
1409 * - folio locked
1410 * - folio_test_lru false
1411 * - folio_memcg_lock()
1412 * - folio frozen (refcount of 0)
1413 *
1414 * Return: The lruvec this folio is on with its lock held and interrupts
1415 * disabled.
1416 */
1417struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1418 unsigned long *flags)
6168d0da 1419{
e809c3fe 1420 struct lruvec *lruvec = folio_lruvec(folio);
6168d0da 1421
6168d0da 1422 spin_lock_irqsave(&lruvec->lru_lock, *flags);
e809c3fe 1423 lruvec_memcg_debug(lruvec, folio);
6168d0da
AS
1424
1425 return lruvec;
1426}
1427
925b7673 1428/**
fa9add64
HD
1429 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430 * @lruvec: mem_cgroup per zone lru vector
1431 * @lru: index of lru list the page is sitting on
b4536f0c 1432 * @zid: zone id of the accounted pages
fa9add64 1433 * @nr_pages: positive when adding or negative when removing
925b7673 1434 *
ca707239 1435 * This function must be called under lru_lock, just before a page is added
07ca7606 1436 * to or just after a page is removed from an lru list.
3f58a829 1437 */
fa9add64 1438void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
b4536f0c 1439 int zid, int nr_pages)
3f58a829 1440{
ef8f2327 1441 struct mem_cgroup_per_node *mz;
fa9add64 1442 unsigned long *lru_size;
ca707239 1443 long size;
3f58a829
MK
1444
1445 if (mem_cgroup_disabled())
1446 return;
1447
ef8f2327 1448 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
b4536f0c 1449 lru_size = &mz->lru_zone_size[zid][lru];
ca707239
HD
1450
1451 if (nr_pages < 0)
1452 *lru_size += nr_pages;
1453
1454 size = *lru_size;
b4536f0c
MH
1455 if (WARN_ONCE(size < 0,
1456 "%s(%p, %d, %d): lru_size %ld\n",
1457 __func__, lruvec, lru, nr_pages, size)) {
ca707239
HD
1458 VM_BUG_ON(1);
1459 *lru_size = 0;
1460 }
1461
1462 if (nr_pages > 0)
1463 *lru_size += nr_pages;
08e552c6 1464}
544122e5 1465
19942822 1466/**
9d11ea9f 1467 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1468 * @memcg: the memory cgroup
19942822 1469 *
9d11ea9f 1470 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1471 * pages.
19942822 1472 */
c0ff4b85 1473static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1474{
3e32cb2e
JW
1475 unsigned long margin = 0;
1476 unsigned long count;
1477 unsigned long limit;
9d11ea9f 1478
3e32cb2e 1479 count = page_counter_read(&memcg->memory);
bbec2e15 1480 limit = READ_ONCE(memcg->memory.max);
3e32cb2e
JW
1481 if (count < limit)
1482 margin = limit - count;
1483
7941d214 1484 if (do_memsw_account()) {
3e32cb2e 1485 count = page_counter_read(&memcg->memsw);
bbec2e15 1486 limit = READ_ONCE(memcg->memsw.max);
1c4448ed 1487 if (count < limit)
3e32cb2e 1488 margin = min(margin, limit - count);
cbedbac3
LR
1489 else
1490 margin = 0;
3e32cb2e
JW
1491 }
1492
1493 return margin;
19942822
JW
1494}
1495
32047e2a 1496/*
bdcbb659 1497 * A routine for checking "mem" is under move_account() or not.
32047e2a 1498 *
bdcbb659
QH
1499 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1500 * moving cgroups. This is for waiting at high-memory pressure
1501 * caused by "move".
32047e2a 1502 */
c0ff4b85 1503static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1504{
2bd9bb20
KH
1505 struct mem_cgroup *from;
1506 struct mem_cgroup *to;
4b534334 1507 bool ret = false;
2bd9bb20
KH
1508 /*
1509 * Unlike task_move routines, we access mc.to, mc.from not under
1510 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1511 */
1512 spin_lock(&mc.lock);
1513 from = mc.from;
1514 to = mc.to;
1515 if (!from)
1516 goto unlock;
3e92041d 1517
2314b42d
JW
1518 ret = mem_cgroup_is_descendant(from, memcg) ||
1519 mem_cgroup_is_descendant(to, memcg);
2bd9bb20
KH
1520unlock:
1521 spin_unlock(&mc.lock);
4b534334
KH
1522 return ret;
1523}
1524
c0ff4b85 1525static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1526{
1527 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1528 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1529 DEFINE_WAIT(wait);
1530 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1531 /* moving charge context might have finished. */
1532 if (mc.moving_task)
1533 schedule();
1534 finish_wait(&mc.waitq, &wait);
1535 return true;
1536 }
1537 }
1538 return false;
1539}
1540
5f9a4f4a
MS
1541struct memory_stat {
1542 const char *name;
5f9a4f4a
MS
1543 unsigned int idx;
1544};
1545
57b2847d 1546static const struct memory_stat memory_stats[] = {
fff66b79
MS
1547 { "anon", NR_ANON_MAPPED },
1548 { "file", NR_FILE_PAGES },
a8c49af3 1549 { "kernel", MEMCG_KMEM },
fff66b79
MS
1550 { "kernel_stack", NR_KERNEL_STACK_KB },
1551 { "pagetables", NR_PAGETABLE },
ebc97a52 1552 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
fff66b79
MS
1553 { "percpu", MEMCG_PERCPU_B },
1554 { "sock", MEMCG_SOCK },
4e5aa1f4 1555 { "vmalloc", MEMCG_VMALLOC },
fff66b79 1556 { "shmem", NR_SHMEM },
f4840ccf
JW
1557#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1558 { "zswap", MEMCG_ZSWAP_B },
1559 { "zswapped", MEMCG_ZSWAPPED },
1560#endif
fff66b79
MS
1561 { "file_mapped", NR_FILE_MAPPED },
1562 { "file_dirty", NR_FILE_DIRTY },
1563 { "file_writeback", NR_WRITEBACK },
b6038942
SB
1564#ifdef CONFIG_SWAP
1565 { "swapcached", NR_SWAPCACHE },
1566#endif
5f9a4f4a 1567#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fff66b79
MS
1568 { "anon_thp", NR_ANON_THPS },
1569 { "file_thp", NR_FILE_THPS },
1570 { "shmem_thp", NR_SHMEM_THPS },
5f9a4f4a 1571#endif
fff66b79
MS
1572 { "inactive_anon", NR_INACTIVE_ANON },
1573 { "active_anon", NR_ACTIVE_ANON },
1574 { "inactive_file", NR_INACTIVE_FILE },
1575 { "active_file", NR_ACTIVE_FILE },
1576 { "unevictable", NR_UNEVICTABLE },
1577 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1578 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
5f9a4f4a
MS
1579
1580 /* The memory events */
fff66b79
MS
1581 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1582 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1583 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1584 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1585 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1586 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1587 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
5f9a4f4a
MS
1588};
1589
ff841a06 1590/* The actual unit of the state item, not the same as the output unit */
fff66b79
MS
1591static int memcg_page_state_unit(int item)
1592{
1593 switch (item) {
1594 case MEMCG_PERCPU_B:
f4840ccf 1595 case MEMCG_ZSWAP_B:
fff66b79
MS
1596 case NR_SLAB_RECLAIMABLE_B:
1597 case NR_SLAB_UNRECLAIMABLE_B:
ff841a06
YA
1598 return 1;
1599 case NR_KERNEL_STACK_KB:
1600 return SZ_1K;
1601 default:
1602 return PAGE_SIZE;
1603 }
1604}
1605
1606/* Translate stat items to the correct unit for memory.stat output */
1607static int memcg_page_state_output_unit(int item)
1608{
1609 /*
1610 * Workingset state is actually in pages, but we export it to userspace
1611 * as a scalar count of events, so special case it here.
1612 */
1613 switch (item) {
fff66b79
MS
1614 case WORKINGSET_REFAULT_ANON:
1615 case WORKINGSET_REFAULT_FILE:
1616 case WORKINGSET_ACTIVATE_ANON:
1617 case WORKINGSET_ACTIVATE_FILE:
1618 case WORKINGSET_RESTORE_ANON:
1619 case WORKINGSET_RESTORE_FILE:
1620 case WORKINGSET_NODERECLAIM:
1621 return 1;
fff66b79 1622 default:
ff841a06 1623 return memcg_page_state_unit(item);
fff66b79
MS
1624 }
1625}
1626
1627static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1628 int item)
1629{
ff841a06
YA
1630 return memcg_page_state(memcg, item) *
1631 memcg_page_state_output_unit(item);
1632}
1633
1634static inline unsigned long memcg_page_state_local_output(
1635 struct mem_cgroup *memcg, int item)
1636{
1637 return memcg_page_state_local(memcg, item) *
1638 memcg_page_state_output_unit(item);
fff66b79
MS
1639}
1640
dddb44ff 1641static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
c8713d0b 1642{
c8713d0b 1643 int i;
71cd3113 1644
c8713d0b
JW
1645 /*
1646 * Provide statistics on the state of the memory subsystem as
1647 * well as cumulative event counters that show past behavior.
1648 *
1649 * This list is ordered following a combination of these gradients:
1650 * 1) generic big picture -> specifics and details
1651 * 2) reflecting userspace activity -> reflecting kernel heuristics
1652 *
1653 * Current memory state:
1654 */
7d7ef0a4 1655 mem_cgroup_flush_stats(memcg);
c8713d0b 1656
5f9a4f4a
MS
1657 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1658 u64 size;
c8713d0b 1659
fff66b79 1660 size = memcg_page_state_output(memcg, memory_stats[i].idx);
5b42360c 1661 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
c8713d0b 1662
5f9a4f4a 1663 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
fff66b79
MS
1664 size += memcg_page_state_output(memcg,
1665 NR_SLAB_RECLAIMABLE_B);
5b42360c 1666 seq_buf_printf(s, "slab %llu\n", size);
5f9a4f4a
MS
1667 }
1668 }
c8713d0b
JW
1669
1670 /* Accumulated memory events */
5b42360c 1671 seq_buf_printf(s, "pgscan %lu\n",
c8713d0b 1672 memcg_events(memcg, PGSCAN_KSWAPD) +
57e9cc50
JW
1673 memcg_events(memcg, PGSCAN_DIRECT) +
1674 memcg_events(memcg, PGSCAN_KHUGEPAGED));
5b42360c 1675 seq_buf_printf(s, "pgsteal %lu\n",
c8713d0b 1676 memcg_events(memcg, PGSTEAL_KSWAPD) +
57e9cc50
JW
1677 memcg_events(memcg, PGSTEAL_DIRECT) +
1678 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
c8713d0b 1679
8278f1c7
SB
1680 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1681 if (memcg_vm_event_stat[i] == PGPGIN ||
1682 memcg_vm_event_stat[i] == PGPGOUT)
1683 continue;
1684
5b42360c 1685 seq_buf_printf(s, "%s %lu\n",
673520f8
QZ
1686 vm_event_name(memcg_vm_event_stat[i]),
1687 memcg_events(memcg, memcg_vm_event_stat[i]));
8278f1c7 1688 }
c8713d0b
JW
1689
1690 /* The above should easily fit into one page */
5b42360c 1691 WARN_ON_ONCE(seq_buf_has_overflowed(s));
c8713d0b 1692}
71cd3113 1693
dddb44ff
YA
1694static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1695
1696static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1697{
1698 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699 memcg_stat_format(memcg, s);
1700 else
1701 memcg1_stat_format(memcg, s);
1702 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1703}
1704
e222432b 1705/**
f0c867d9 1706 * mem_cgroup_print_oom_context: Print OOM information relevant to
1707 * memory controller.
e222432b
BS
1708 * @memcg: The memory cgroup that went over limit
1709 * @p: Task that is going to be killed
1710 *
1711 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1712 * enabled
1713 */
f0c867d9 1714void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
e222432b 1715{
e222432b
BS
1716 rcu_read_lock();
1717
f0c867d9 1718 if (memcg) {
1719 pr_cont(",oom_memcg=");
1720 pr_cont_cgroup_path(memcg->css.cgroup);
1721 } else
1722 pr_cont(",global_oom");
2415b9f5 1723 if (p) {
f0c867d9 1724 pr_cont(",task_memcg=");
2415b9f5 1725 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
2415b9f5 1726 }
e222432b 1727 rcu_read_unlock();
f0c867d9 1728}
1729
1730/**
1731 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1732 * memory controller.
1733 * @memcg: The memory cgroup that went over limit
1734 */
1735void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1736{
68aaee14
TH
1737 /* Use static buffer, for the caller is holding oom_lock. */
1738 static char buf[PAGE_SIZE];
5b42360c 1739 struct seq_buf s;
68aaee14
TH
1740
1741 lockdep_assert_held(&oom_lock);
e222432b 1742
3e32cb2e
JW
1743 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1744 K((u64)page_counter_read(&memcg->memory)),
15b42562 1745 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
c8713d0b
JW
1746 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1747 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1748 K((u64)page_counter_read(&memcg->swap)),
32d087cd 1749 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
c8713d0b
JW
1750 else {
1751 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1752 K((u64)page_counter_read(&memcg->memsw)),
1753 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1754 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1755 K((u64)page_counter_read(&memcg->kmem)),
1756 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
58cf188e 1757 }
c8713d0b
JW
1758
1759 pr_info("Memory cgroup stats for ");
1760 pr_cont_cgroup_path(memcg->css.cgroup);
1761 pr_cont(":");
5b42360c
YA
1762 seq_buf_init(&s, buf, sizeof(buf));
1763 memory_stat_format(memcg, &s);
1764 seq_buf_do_printk(&s, KERN_INFO);
e222432b
BS
1765}
1766
a63d83f4
DR
1767/*
1768 * Return the memory (and swap, if configured) limit for a memcg.
1769 */
bbec2e15 1770unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
a63d83f4 1771{
8d387a5f
WL
1772 unsigned long max = READ_ONCE(memcg->memory.max);
1773
b94c4e94 1774 if (do_memsw_account()) {
8d387a5f
WL
1775 if (mem_cgroup_swappiness(memcg)) {
1776 /* Calculate swap excess capacity from memsw limit */
1777 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1778
1779 max += min(swap, (unsigned long)total_swap_pages);
1780 }
b94c4e94
JW
1781 } else {
1782 if (mem_cgroup_swappiness(memcg))
1783 max += min(READ_ONCE(memcg->swap.max),
1784 (unsigned long)total_swap_pages);
9a5a8f19 1785 }
bbec2e15 1786 return max;
a63d83f4
DR
1787}
1788
9783aa99
CD
1789unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1790{
1791 return page_counter_read(&memcg->memory);
1792}
1793
b6e6edcf 1794static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
19965460 1795 int order)
9cbb78bb 1796{
6e0fc46d
DR
1797 struct oom_control oc = {
1798 .zonelist = NULL,
1799 .nodemask = NULL,
2a966b77 1800 .memcg = memcg,
6e0fc46d
DR
1801 .gfp_mask = gfp_mask,
1802 .order = order,
6e0fc46d 1803 };
1378b37d 1804 bool ret = true;
9cbb78bb 1805
7775face
TH
1806 if (mutex_lock_killable(&oom_lock))
1807 return true;
1378b37d
YS
1808
1809 if (mem_cgroup_margin(memcg) >= (1 << order))
1810 goto unlock;
1811
7775face
TH
1812 /*
1813 * A few threads which were not waiting at mutex_lock_killable() can
1814 * fail to bail out. Therefore, check again after holding oom_lock.
1815 */
a4ebf1b6 1816 ret = task_is_dying() || out_of_memory(&oc);
1378b37d
YS
1817
1818unlock:
dc56401f 1819 mutex_unlock(&oom_lock);
7c5f64f8 1820 return ret;
9cbb78bb
DR
1821}
1822
0608f43d 1823static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
ef8f2327 1824 pg_data_t *pgdat,
0608f43d
AM
1825 gfp_t gfp_mask,
1826 unsigned long *total_scanned)
1827{
1828 struct mem_cgroup *victim = NULL;
1829 int total = 0;
1830 int loop = 0;
1831 unsigned long excess;
1832 unsigned long nr_scanned;
1833 struct mem_cgroup_reclaim_cookie reclaim = {
ef8f2327 1834 .pgdat = pgdat,
0608f43d
AM
1835 };
1836
3e32cb2e 1837 excess = soft_limit_excess(root_memcg);
0608f43d
AM
1838
1839 while (1) {
1840 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1841 if (!victim) {
1842 loop++;
1843 if (loop >= 2) {
1844 /*
1845 * If we have not been able to reclaim
1846 * anything, it might because there are
1847 * no reclaimable pages under this hierarchy
1848 */
1849 if (!total)
1850 break;
1851 /*
1852 * We want to do more targeted reclaim.
1853 * excess >> 2 is not to excessive so as to
1854 * reclaim too much, nor too less that we keep
1855 * coming back to reclaim from this cgroup
1856 */
1857 if (total >= (excess >> 2) ||
1858 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1859 break;
1860 }
1861 continue;
1862 }
a9dd0a83 1863 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
ef8f2327 1864 pgdat, &nr_scanned);
0608f43d 1865 *total_scanned += nr_scanned;
3e32cb2e 1866 if (!soft_limit_excess(root_memcg))
0608f43d 1867 break;
6d61ef40 1868 }
0608f43d
AM
1869 mem_cgroup_iter_break(root_memcg, victim);
1870 return total;
6d61ef40
BS
1871}
1872
0056f4e6
JW
1873#ifdef CONFIG_LOCKDEP
1874static struct lockdep_map memcg_oom_lock_dep_map = {
1875 .name = "memcg_oom_lock",
1876};
1877#endif
1878
fb2a6fc5
JW
1879static DEFINE_SPINLOCK(memcg_oom_lock);
1880
867578cb
KH
1881/*
1882 * Check OOM-Killer is already running under our hierarchy.
1883 * If someone is running, return false.
1884 */
fb2a6fc5 1885static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
867578cb 1886{
79dfdacc 1887 struct mem_cgroup *iter, *failed = NULL;
a636b327 1888
fb2a6fc5
JW
1889 spin_lock(&memcg_oom_lock);
1890
9f3a0d09 1891 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 1892 if (iter->oom_lock) {
79dfdacc
MH
1893 /*
1894 * this subtree of our hierarchy is already locked
1895 * so we cannot give a lock.
1896 */
79dfdacc 1897 failed = iter;
9f3a0d09
JW
1898 mem_cgroup_iter_break(memcg, iter);
1899 break;
23751be0
JW
1900 } else
1901 iter->oom_lock = true;
7d74b06f 1902 }
867578cb 1903
fb2a6fc5
JW
1904 if (failed) {
1905 /*
1906 * OK, we failed to lock the whole subtree so we have
1907 * to clean up what we set up to the failing subtree
1908 */
1909 for_each_mem_cgroup_tree(iter, memcg) {
1910 if (iter == failed) {
1911 mem_cgroup_iter_break(memcg, iter);
1912 break;
1913 }
1914 iter->oom_lock = false;
79dfdacc 1915 }
0056f4e6
JW
1916 } else
1917 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
fb2a6fc5
JW
1918
1919 spin_unlock(&memcg_oom_lock);
1920
1921 return !failed;
a636b327 1922}
0b7f569e 1923
fb2a6fc5 1924static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 1925{
7d74b06f
KH
1926 struct mem_cgroup *iter;
1927
fb2a6fc5 1928 spin_lock(&memcg_oom_lock);
5facae4f 1929 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
c0ff4b85 1930 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 1931 iter->oom_lock = false;
fb2a6fc5 1932 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1933}
1934
c0ff4b85 1935static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1936{
1937 struct mem_cgroup *iter;
1938
c2b42d3c 1939 spin_lock(&memcg_oom_lock);
c0ff4b85 1940 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1941 iter->under_oom++;
1942 spin_unlock(&memcg_oom_lock);
79dfdacc
MH
1943}
1944
c0ff4b85 1945static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
1946{
1947 struct mem_cgroup *iter;
1948
867578cb 1949 /*
f0953a1b 1950 * Be careful about under_oom underflows because a child memcg
7a52d4d8 1951 * could have been added after mem_cgroup_mark_under_oom.
867578cb 1952 */
c2b42d3c 1953 spin_lock(&memcg_oom_lock);
c0ff4b85 1954 for_each_mem_cgroup_tree(iter, memcg)
c2b42d3c
TH
1955 if (iter->under_oom > 0)
1956 iter->under_oom--;
1957 spin_unlock(&memcg_oom_lock);
0b7f569e
KH
1958}
1959
867578cb
KH
1960static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1961
dc98df5a 1962struct oom_wait_info {
d79154bb 1963 struct mem_cgroup *memcg;
ac6424b9 1964 wait_queue_entry_t wait;
dc98df5a
KH
1965};
1966
ac6424b9 1967static int memcg_oom_wake_function(wait_queue_entry_t *wait,
dc98df5a
KH
1968 unsigned mode, int sync, void *arg)
1969{
d79154bb
HD
1970 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1971 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
1972 struct oom_wait_info *oom_wait_info;
1973
1974 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 1975 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 1976
2314b42d
JW
1977 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1978 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
dc98df5a 1979 return 0;
dc98df5a
KH
1980 return autoremove_wake_function(wait, mode, sync, arg);
1981}
1982
c0ff4b85 1983static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 1984{
c2b42d3c
TH
1985 /*
1986 * For the following lockless ->under_oom test, the only required
1987 * guarantee is that it must see the state asserted by an OOM when
1988 * this function is called as a result of userland actions
1989 * triggered by the notification of the OOM. This is trivially
1990 * achieved by invoking mem_cgroup_mark_under_oom() before
1991 * triggering notification.
1992 */
1993 if (memcg && memcg->under_oom)
f4b90b70 1994 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
3c11ecf4
KH
1995}
1996
becdf89d
SB
1997/*
1998 * Returns true if successfully killed one or more processes. Though in some
1999 * corner cases it can return true even without killing any process.
2000 */
2001static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
0b7f569e 2002{
becdf89d 2003 bool locked, ret;
7056d3a3 2004
29ef680a 2005 if (order > PAGE_ALLOC_COSTLY_ORDER)
becdf89d 2006 return false;
29ef680a 2007
7a1adfdd
RG
2008 memcg_memory_event(memcg, MEMCG_OOM);
2009
867578cb 2010 /*
49426420
JW
2011 * We are in the middle of the charge context here, so we
2012 * don't want to block when potentially sitting on a callstack
2013 * that holds all kinds of filesystem and mm locks.
2014 *
29ef680a
MH
2015 * cgroup1 allows disabling the OOM killer and waiting for outside
2016 * handling until the charge can succeed; remember the context and put
2017 * the task to sleep at the end of the page fault when all locks are
2018 * released.
49426420 2019 *
29ef680a
MH
2020 * On the other hand, in-kernel OOM killer allows for an async victim
2021 * memory reclaim (oom_reaper) and that means that we are not solely
2022 * relying on the oom victim to make a forward progress and we can
2023 * invoke the oom killer here.
2024 *
2025 * Please note that mem_cgroup_out_of_memory might fail to find a
2026 * victim and then we have to bail out from the charge path.
867578cb 2027 */
17c56de6 2028 if (READ_ONCE(memcg->oom_kill_disable)) {
becdf89d
SB
2029 if (current->in_user_fault) {
2030 css_get(&memcg->css);
2031 current->memcg_in_oom = memcg;
2032 current->memcg_oom_gfp_mask = mask;
2033 current->memcg_oom_order = order;
2034 }
2035 return false;
29ef680a
MH
2036 }
2037
7056d3a3
MH
2038 mem_cgroup_mark_under_oom(memcg);
2039
2040 locked = mem_cgroup_oom_trylock(memcg);
2041
2042 if (locked)
2043 mem_cgroup_oom_notify(memcg);
2044
2045 mem_cgroup_unmark_under_oom(memcg);
becdf89d 2046 ret = mem_cgroup_out_of_memory(memcg, mask, order);
7056d3a3
MH
2047
2048 if (locked)
2049 mem_cgroup_oom_unlock(memcg);
29ef680a 2050
7056d3a3 2051 return ret;
3812c8c8
JW
2052}
2053
2054/**
2055 * mem_cgroup_oom_synchronize - complete memcg OOM handling
49426420 2056 * @handle: actually kill/wait or just clean up the OOM state
3812c8c8 2057 *
49426420
JW
2058 * This has to be called at the end of a page fault if the memcg OOM
2059 * handler was enabled.
3812c8c8 2060 *
49426420 2061 * Memcg supports userspace OOM handling where failed allocations must
3812c8c8
JW
2062 * sleep on a waitqueue until the userspace task resolves the
2063 * situation. Sleeping directly in the charge context with all kinds
2064 * of locks held is not a good idea, instead we remember an OOM state
2065 * in the task and mem_cgroup_oom_synchronize() has to be called at
49426420 2066 * the end of the page fault to complete the OOM handling.
3812c8c8
JW
2067 *
2068 * Returns %true if an ongoing memcg OOM situation was detected and
49426420 2069 * completed, %false otherwise.
3812c8c8 2070 */
49426420 2071bool mem_cgroup_oom_synchronize(bool handle)
3812c8c8 2072{
626ebc41 2073 struct mem_cgroup *memcg = current->memcg_in_oom;
3812c8c8 2074 struct oom_wait_info owait;
49426420 2075 bool locked;
3812c8c8
JW
2076
2077 /* OOM is global, do not handle */
3812c8c8 2078 if (!memcg)
49426420 2079 return false;
3812c8c8 2080
7c5f64f8 2081 if (!handle)
49426420 2082 goto cleanup;
3812c8c8
JW
2083
2084 owait.memcg = memcg;
2085 owait.wait.flags = 0;
2086 owait.wait.func = memcg_oom_wake_function;
2087 owait.wait.private = current;
2055da97 2088 INIT_LIST_HEAD(&owait.wait.entry);
867578cb 2089
3812c8c8 2090 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
49426420
JW
2091 mem_cgroup_mark_under_oom(memcg);
2092
2093 locked = mem_cgroup_oom_trylock(memcg);
2094
2095 if (locked)
2096 mem_cgroup_oom_notify(memcg);
2097
857f2139
HX
2098 schedule();
2099 mem_cgroup_unmark_under_oom(memcg);
2100 finish_wait(&memcg_oom_waitq, &owait.wait);
49426420 2101
18b1d18b 2102 if (locked)
fb2a6fc5 2103 mem_cgroup_oom_unlock(memcg);
49426420 2104cleanup:
626ebc41 2105 current->memcg_in_oom = NULL;
3812c8c8 2106 css_put(&memcg->css);
867578cb 2107 return true;
0b7f569e
KH
2108}
2109
3d8b38eb
RG
2110/**
2111 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2112 * @victim: task to be killed by the OOM killer
2113 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2114 *
2115 * Returns a pointer to a memory cgroup, which has to be cleaned up
2116 * by killing all belonging OOM-killable tasks.
2117 *
2118 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2119 */
2120struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2121 struct mem_cgroup *oom_domain)
2122{
2123 struct mem_cgroup *oom_group = NULL;
2124 struct mem_cgroup *memcg;
2125
2126 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2127 return NULL;
2128
2129 if (!oom_domain)
2130 oom_domain = root_mem_cgroup;
2131
2132 rcu_read_lock();
2133
2134 memcg = mem_cgroup_from_task(victim);
7848ed62 2135 if (mem_cgroup_is_root(memcg))
3d8b38eb
RG
2136 goto out;
2137
48fe267c
RG
2138 /*
2139 * If the victim task has been asynchronously moved to a different
2140 * memory cgroup, we might end up killing tasks outside oom_domain.
2141 * In this case it's better to ignore memory.group.oom.
2142 */
2143 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2144 goto out;
2145
3d8b38eb
RG
2146 /*
2147 * Traverse the memory cgroup hierarchy from the victim task's
2148 * cgroup up to the OOMing cgroup (or root) to find the
2149 * highest-level memory cgroup with oom.group set.
2150 */
2151 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
eaf7b66b 2152 if (READ_ONCE(memcg->oom_group))
3d8b38eb
RG
2153 oom_group = memcg;
2154
2155 if (memcg == oom_domain)
2156 break;
2157 }
2158
2159 if (oom_group)
2160 css_get(&oom_group->css);
2161out:
2162 rcu_read_unlock();
2163
2164 return oom_group;
2165}
2166
2167void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2168{
2169 pr_info("Tasks in ");
2170 pr_cont_cgroup_path(memcg->css.cgroup);
2171 pr_cont(" are going to be killed due to memory.oom.group set\n");
2172}
2173
d7365e78 2174/**
f70ad448
MWO
2175 * folio_memcg_lock - Bind a folio to its memcg.
2176 * @folio: The folio.
32047e2a 2177 *
f70ad448 2178 * This function prevents unlocked LRU folios from being moved to
739f79fc
JW
2179 * another cgroup.
2180 *
f70ad448
MWO
2181 * It ensures lifetime of the bound memcg. The caller is responsible
2182 * for the lifetime of the folio.
d69b042f 2183 */
f70ad448 2184void folio_memcg_lock(struct folio *folio)
89c06bd5
KH
2185{
2186 struct mem_cgroup *memcg;
6de22619 2187 unsigned long flags;
89c06bd5 2188
6de22619
JW
2189 /*
2190 * The RCU lock is held throughout the transaction. The fast
2191 * path can get away without acquiring the memcg->move_lock
2192 * because page moving starts with an RCU grace period.
739f79fc 2193 */
d7365e78
JW
2194 rcu_read_lock();
2195
2196 if (mem_cgroup_disabled())
1c824a68 2197 return;
89c06bd5 2198again:
f70ad448 2199 memcg = folio_memcg(folio);
29833315 2200 if (unlikely(!memcg))
1c824a68 2201 return;
d7365e78 2202
20ad50d6
AS
2203#ifdef CONFIG_PROVE_LOCKING
2204 local_irq_save(flags);
2205 might_lock(&memcg->move_lock);
2206 local_irq_restore(flags);
2207#endif
2208
bdcbb659 2209 if (atomic_read(&memcg->moving_account) <= 0)
1c824a68 2210 return;
89c06bd5 2211
6de22619 2212 spin_lock_irqsave(&memcg->move_lock, flags);
f70ad448 2213 if (memcg != folio_memcg(folio)) {
6de22619 2214 spin_unlock_irqrestore(&memcg->move_lock, flags);
89c06bd5
KH
2215 goto again;
2216 }
6de22619
JW
2217
2218 /*
1c824a68
JW
2219 * When charge migration first begins, we can have multiple
2220 * critical sections holding the fast-path RCU lock and one
2221 * holding the slowpath move_lock. Track the task who has the
6c77b607 2222 * move_lock for folio_memcg_unlock().
6de22619
JW
2223 */
2224 memcg->move_lock_task = current;
2225 memcg->move_lock_flags = flags;
89c06bd5 2226}
f70ad448 2227
f70ad448 2228static void __folio_memcg_unlock(struct mem_cgroup *memcg)
89c06bd5 2229{
6de22619
JW
2230 if (memcg && memcg->move_lock_task == current) {
2231 unsigned long flags = memcg->move_lock_flags;
2232
2233 memcg->move_lock_task = NULL;
2234 memcg->move_lock_flags = 0;
2235
2236 spin_unlock_irqrestore(&memcg->move_lock, flags);
2237 }
89c06bd5 2238
d7365e78 2239 rcu_read_unlock();
89c06bd5 2240}
739f79fc
JW
2241
2242/**
f70ad448
MWO
2243 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2244 * @folio: The folio.
2245 *
2246 * This releases the binding created by folio_memcg_lock(). This does
2247 * not change the accounting of this folio to its memcg, but it does
2248 * permit others to change it.
739f79fc 2249 */
f70ad448 2250void folio_memcg_unlock(struct folio *folio)
739f79fc 2251{
f70ad448
MWO
2252 __folio_memcg_unlock(folio_memcg(folio));
2253}
9da7b521 2254
fead2b86 2255struct memcg_stock_pcp {
56751146 2256 local_lock_t stock_lock;
fead2b86
MH
2257 struct mem_cgroup *cached; /* this never be root cgroup */
2258 unsigned int nr_pages;
2259
bf4f0599
RG
2260#ifdef CONFIG_MEMCG_KMEM
2261 struct obj_cgroup *cached_objcg;
68ac5b3c 2262 struct pglist_data *cached_pgdat;
bf4f0599 2263 unsigned int nr_bytes;
68ac5b3c
WL
2264 int nr_slab_reclaimable_b;
2265 int nr_slab_unreclaimable_b;
bf4f0599
RG
2266#endif
2267
cdec2e42 2268 struct work_struct work;
26fe6168 2269 unsigned long flags;
a0db00fc 2270#define FLUSHING_CACHED_CHARGE 0
cdec2e42 2271};
56751146
SAS
2272static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2273 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2274};
9f50fad6 2275static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2276
bf4f0599 2277#ifdef CONFIG_MEMCG_KMEM
56751146 2278static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
bf4f0599
RG
2279static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2280 struct mem_cgroup *root_memcg);
a8c49af3 2281static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
bf4f0599
RG
2282
2283#else
56751146 2284static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599 2285{
56751146 2286 return NULL;
bf4f0599
RG
2287}
2288static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2289 struct mem_cgroup *root_memcg)
2290{
2291 return false;
2292}
a8c49af3
YA
2293static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2294{
2295}
bf4f0599
RG
2296#endif
2297
a0956d54
SS
2298/**
2299 * consume_stock: Try to consume stocked charge on this cpu.
2300 * @memcg: memcg to consume from.
2301 * @nr_pages: how many pages to charge.
2302 *
2303 * The charges will only happen if @memcg matches the current cpu's memcg
2304 * stock, and at least @nr_pages are available in that stock. Failure to
2305 * service an allocation will refill the stock.
2306 *
2307 * returns true if successful, false otherwise.
cdec2e42 2308 */
a0956d54 2309static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2310{
2311 struct memcg_stock_pcp *stock;
db2ba40c 2312 unsigned long flags;
3e32cb2e 2313 bool ret = false;
cdec2e42 2314
a983b5eb 2315 if (nr_pages > MEMCG_CHARGE_BATCH)
3e32cb2e 2316 return ret;
a0956d54 2317
56751146 2318 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2319
2320 stock = this_cpu_ptr(&memcg_stock);
f785a8f2 2321 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
a0956d54 2322 stock->nr_pages -= nr_pages;
3e32cb2e
JW
2323 ret = true;
2324 }
db2ba40c 2325
56751146 2326 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
db2ba40c 2327
cdec2e42
KH
2328 return ret;
2329}
2330
2331/*
3e32cb2e 2332 * Returns stocks cached in percpu and reset cached information.
cdec2e42
KH
2333 */
2334static void drain_stock(struct memcg_stock_pcp *stock)
2335{
f785a8f2 2336 struct mem_cgroup *old = READ_ONCE(stock->cached);
cdec2e42 2337
1a3e1f40
JW
2338 if (!old)
2339 return;
2340
11c9ea4e 2341 if (stock->nr_pages) {
3e32cb2e 2342 page_counter_uncharge(&old->memory, stock->nr_pages);
7941d214 2343 if (do_memsw_account())
3e32cb2e 2344 page_counter_uncharge(&old->memsw, stock->nr_pages);
11c9ea4e 2345 stock->nr_pages = 0;
cdec2e42 2346 }
1a3e1f40
JW
2347
2348 css_put(&old->css);
f785a8f2 2349 WRITE_ONCE(stock->cached, NULL);
cdec2e42
KH
2350}
2351
cdec2e42
KH
2352static void drain_local_stock(struct work_struct *dummy)
2353{
db2ba40c 2354 struct memcg_stock_pcp *stock;
56751146 2355 struct obj_cgroup *old = NULL;
db2ba40c
JW
2356 unsigned long flags;
2357
72f0184c 2358 /*
5c49cf9a
MH
2359 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2360 * drain_stock races is that we always operate on local CPU stock
2361 * here with IRQ disabled
72f0184c 2362 */
56751146 2363 local_lock_irqsave(&memcg_stock.stock_lock, flags);
db2ba40c
JW
2364
2365 stock = this_cpu_ptr(&memcg_stock);
56751146 2366 old = drain_obj_stock(stock);
cdec2e42 2367 drain_stock(stock);
26fe6168 2368 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
db2ba40c 2369
56751146
SAS
2370 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2371 if (old)
2372 obj_cgroup_put(old);
cdec2e42
KH
2373}
2374
2375/*
3e32cb2e 2376 * Cache charges(val) to local per_cpu area.
320cc51d 2377 * This will be consumed by consume_stock() function, later.
cdec2e42 2378 */
af9a3b69 2379static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42 2380{
db2ba40c 2381 struct memcg_stock_pcp *stock;
cdec2e42 2382
db2ba40c 2383 stock = this_cpu_ptr(&memcg_stock);
f785a8f2 2384 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
cdec2e42 2385 drain_stock(stock);
1a3e1f40 2386 css_get(&memcg->css);
f785a8f2 2387 WRITE_ONCE(stock->cached, memcg);
cdec2e42 2388 }
11c9ea4e 2389 stock->nr_pages += nr_pages;
db2ba40c 2390
a983b5eb 2391 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
475d0487 2392 drain_stock(stock);
af9a3b69
JW
2393}
2394
2395static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2396{
2397 unsigned long flags;
475d0487 2398
56751146 2399 local_lock_irqsave(&memcg_stock.stock_lock, flags);
af9a3b69 2400 __refill_stock(memcg, nr_pages);
56751146 2401 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
cdec2e42
KH
2402}
2403
2404/*
c0ff4b85 2405 * Drains all per-CPU charge caches for given root_memcg resp. subtree
6d3d6aa2 2406 * of the hierarchy under it.
cdec2e42 2407 */
6d3d6aa2 2408static void drain_all_stock(struct mem_cgroup *root_memcg)
cdec2e42 2409{
26fe6168 2410 int cpu, curcpu;
d38144b7 2411
6d3d6aa2
JW
2412 /* If someone's already draining, avoid adding running more workers. */
2413 if (!mutex_trylock(&percpu_charge_mutex))
2414 return;
72f0184c
MH
2415 /*
2416 * Notify other cpus that system-wide "drain" is running
2417 * We do not care about races with the cpu hotplug because cpu down
2418 * as well as workers from this path always operate on the local
2419 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2420 */
0790ed62
SAS
2421 migrate_disable();
2422 curcpu = smp_processor_id();
cdec2e42
KH
2423 for_each_online_cpu(cpu) {
2424 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2425 struct mem_cgroup *memcg;
e1a366be 2426 bool flush = false;
26fe6168 2427
e1a366be 2428 rcu_read_lock();
f785a8f2 2429 memcg = READ_ONCE(stock->cached);
e1a366be
RG
2430 if (memcg && stock->nr_pages &&
2431 mem_cgroup_is_descendant(memcg, root_memcg))
2432 flush = true;
27fb0956 2433 else if (obj_stock_flush_required(stock, root_memcg))
bf4f0599 2434 flush = true;
e1a366be
RG
2435 rcu_read_unlock();
2436
2437 if (flush &&
2438 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
d1a05b69
MH
2439 if (cpu == curcpu)
2440 drain_local_stock(&stock->work);
6a792697 2441 else if (!cpu_is_isolated(cpu))
d1a05b69
MH
2442 schedule_work_on(cpu, &stock->work);
2443 }
cdec2e42 2444 }
0790ed62 2445 migrate_enable();
9f50fad6 2446 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2447}
2448
2cd21c89
JW
2449static int memcg_hotplug_cpu_dead(unsigned int cpu)
2450{
2451 struct memcg_stock_pcp *stock;
a3d4c05a 2452
2cd21c89
JW
2453 stock = &per_cpu(memcg_stock, cpu);
2454 drain_stock(stock);
a3d4c05a 2455
308167fc 2456 return 0;
cdec2e42
KH
2457}
2458
b3ff9291
CD
2459static unsigned long reclaim_high(struct mem_cgroup *memcg,
2460 unsigned int nr_pages,
2461 gfp_t gfp_mask)
f7e1cb6e 2462{
b3ff9291
CD
2463 unsigned long nr_reclaimed = 0;
2464
f7e1cb6e 2465 do {
e22c6ed9
JW
2466 unsigned long pflags;
2467
d1663a90
JK
2468 if (page_counter_read(&memcg->memory) <=
2469 READ_ONCE(memcg->memory.high))
f7e1cb6e 2470 continue;
e22c6ed9 2471
e27be240 2472 memcg_memory_event(memcg, MEMCG_HIGH);
e22c6ed9
JW
2473
2474 psi_memstall_enter(&pflags);
b3ff9291 2475 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
73b73bac 2476 gfp_mask,
55ab834a 2477 MEMCG_RECLAIM_MAY_SWAP);
e22c6ed9 2478 psi_memstall_leave(&pflags);
4bf17307
CD
2479 } while ((memcg = parent_mem_cgroup(memcg)) &&
2480 !mem_cgroup_is_root(memcg));
b3ff9291
CD
2481
2482 return nr_reclaimed;
f7e1cb6e
JW
2483}
2484
2485static void high_work_func(struct work_struct *work)
2486{
2487 struct mem_cgroup *memcg;
2488
2489 memcg = container_of(work, struct mem_cgroup, high_work);
a983b5eb 2490 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
f7e1cb6e
JW
2491}
2492
0e4b01df
CD
2493/*
2494 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495 * enough to still cause a significant slowdown in most cases, while still
2496 * allowing diagnostics and tracing to proceed without becoming stuck.
2497 */
2498#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499
2500/*
2501 * When calculating the delay, we use these either side of the exponentiation to
2502 * maintain precision and scale to a reasonable number of jiffies (see the table
2503 * below.
2504 *
2505 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506 * overage ratio to a delay.
ac5ddd0f 2507 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
0e4b01df
CD
2508 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2509 * to produce a reasonable delay curve.
2510 *
2511 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512 * reasonable delay curve compared to precision-adjusted overage, not
2513 * penalising heavily at first, but still making sure that growth beyond the
2514 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515 * example, with a high of 100 megabytes:
2516 *
2517 * +-------+------------------------+
2518 * | usage | time to allocate in ms |
2519 * +-------+------------------------+
2520 * | 100M | 0 |
2521 * | 101M | 6 |
2522 * | 102M | 25 |
2523 * | 103M | 57 |
2524 * | 104M | 102 |
2525 * | 105M | 159 |
2526 * | 106M | 230 |
2527 * | 107M | 313 |
2528 * | 108M | 409 |
2529 * | 109M | 518 |
2530 * | 110M | 639 |
2531 * | 111M | 774 |
2532 * | 112M | 921 |
2533 * | 113M | 1081 |
2534 * | 114M | 1254 |
2535 * | 115M | 1439 |
2536 * | 116M | 1638 |
2537 * | 117M | 1849 |
2538 * | 118M | 2000 |
2539 * | 119M | 2000 |
2540 * | 120M | 2000 |
2541 * +-------+------------------------+
2542 */
2543 #define MEMCG_DELAY_PRECISION_SHIFT 20
2544 #define MEMCG_DELAY_SCALING_SHIFT 14
2545
8a5dbc65 2546static u64 calculate_overage(unsigned long usage, unsigned long high)
b23afb93 2547{
8a5dbc65 2548 u64 overage;
b23afb93 2549
8a5dbc65
JK
2550 if (usage <= high)
2551 return 0;
e26733e0 2552
8a5dbc65
JK
2553 /*
2554 * Prevent division by 0 in overage calculation by acting as if
2555 * it was a threshold of 1 page
2556 */
2557 high = max(high, 1UL);
9b8b1754 2558
8a5dbc65
JK
2559 overage = usage - high;
2560 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561 return div64_u64(overage, high);
2562}
e26733e0 2563
8a5dbc65
JK
2564static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565{
2566 u64 overage, max_overage = 0;
e26733e0 2567
8a5dbc65
JK
2568 do {
2569 overage = calculate_overage(page_counter_read(&memcg->memory),
d1663a90 2570 READ_ONCE(memcg->memory.high));
8a5dbc65 2571 max_overage = max(overage, max_overage);
e26733e0
CD
2572 } while ((memcg = parent_mem_cgroup(memcg)) &&
2573 !mem_cgroup_is_root(memcg));
2574
8a5dbc65
JK
2575 return max_overage;
2576}
2577
4b82ab4f
JK
2578static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579{
2580 u64 overage, max_overage = 0;
2581
2582 do {
2583 overage = calculate_overage(page_counter_read(&memcg->swap),
2584 READ_ONCE(memcg->swap.high));
2585 if (overage)
2586 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587 max_overage = max(overage, max_overage);
2588 } while ((memcg = parent_mem_cgroup(memcg)) &&
2589 !mem_cgroup_is_root(memcg));
2590
2591 return max_overage;
2592}
2593
8a5dbc65
JK
2594/*
2595 * Get the number of jiffies that we should penalise a mischievous cgroup which
2596 * is exceeding its memory.high by checking both it and its ancestors.
2597 */
2598static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2599 unsigned int nr_pages,
2600 u64 max_overage)
2601{
2602 unsigned long penalty_jiffies;
2603
e26733e0
CD
2604 if (!max_overage)
2605 return 0;
0e4b01df
CD
2606
2607 /*
0e4b01df
CD
2608 * We use overage compared to memory.high to calculate the number of
2609 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610 * fairly lenient on small overages, and increasingly harsh when the
2611 * memcg in question makes it clear that it has no intention of stopping
2612 * its crazy behaviour, so we exponentially increase the delay based on
2613 * overage amount.
2614 */
e26733e0
CD
2615 penalty_jiffies = max_overage * max_overage * HZ;
2616 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
0e4b01df
CD
2618
2619 /*
2620 * Factor in the task's own contribution to the overage, such that four
2621 * N-sized allocations are throttled approximately the same as one
2622 * 4N-sized allocation.
2623 *
2624 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625 * larger the current charge patch is than that.
2626 */
ff144e69 2627 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
e26733e0
CD
2628}
2629
2630/*
63fd3270
JW
2631 * Reclaims memory over the high limit. Called directly from
2632 * try_charge() (context permitting), as well as from the userland
2633 * return path where reclaim is always able to block.
e26733e0 2634 */
9ea9cb00 2635void mem_cgroup_handle_over_high(gfp_t gfp_mask)
e26733e0
CD
2636{
2637 unsigned long penalty_jiffies;
2638 unsigned long pflags;
b3ff9291 2639 unsigned long nr_reclaimed;
e26733e0 2640 unsigned int nr_pages = current->memcg_nr_pages_over_high;
d977aa93 2641 int nr_retries = MAX_RECLAIM_RETRIES;
e26733e0 2642 struct mem_cgroup *memcg;
b3ff9291 2643 bool in_retry = false;
e26733e0
CD
2644
2645 if (likely(!nr_pages))
2646 return;
2647
2648 memcg = get_mem_cgroup_from_mm(current->mm);
e26733e0
CD
2649 current->memcg_nr_pages_over_high = 0;
2650
b3ff9291 2651retry_reclaim:
63fd3270
JW
2652 /*
2653 * Bail if the task is already exiting. Unlike memory.max,
2654 * memory.high enforcement isn't as strict, and there is no
2655 * OOM killer involved, which means the excess could already
2656 * be much bigger (and still growing) than it could for
2657 * memory.max; the dying task could get stuck in fruitless
2658 * reclaim for a long time, which isn't desirable.
2659 */
2660 if (task_is_dying())
2661 goto out;
2662
b3ff9291
CD
2663 /*
2664 * The allocating task should reclaim at least the batch size, but for
2665 * subsequent retries we only want to do what's necessary to prevent oom
2666 * or breaching resource isolation.
2667 *
2668 * This is distinct from memory.max or page allocator behaviour because
2669 * memory.high is currently batched, whereas memory.max and the page
2670 * allocator run every time an allocation is made.
2671 */
2672 nr_reclaimed = reclaim_high(memcg,
2673 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
9ea9cb00 2674 gfp_mask);
b3ff9291 2675
e26733e0
CD
2676 /*
2677 * memory.high is breached and reclaim is unable to keep up. Throttle
2678 * allocators proactively to slow down excessive growth.
2679 */
8a5dbc65
JK
2680 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2681 mem_find_max_overage(memcg));
0e4b01df 2682
4b82ab4f
JK
2683 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2684 swap_find_max_overage(memcg));
2685
ff144e69
JK
2686 /*
2687 * Clamp the max delay per usermode return so as to still keep the
2688 * application moving forwards and also permit diagnostics, albeit
2689 * extremely slowly.
2690 */
2691 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2692
0e4b01df
CD
2693 /*
2694 * Don't sleep if the amount of jiffies this memcg owes us is so low
2695 * that it's not even worth doing, in an attempt to be nice to those who
2696 * go only a small amount over their memory.high value and maybe haven't
2697 * been aggressively reclaimed enough yet.
2698 */
2699 if (penalty_jiffies <= HZ / 100)
2700 goto out;
2701
b3ff9291
CD
2702 /*
2703 * If reclaim is making forward progress but we're still over
2704 * memory.high, we want to encourage that rather than doing allocator
2705 * throttling.
2706 */
2707 if (nr_reclaimed || nr_retries--) {
2708 in_retry = true;
2709 goto retry_reclaim;
2710 }
2711
0e4b01df 2712 /*
63fd3270
JW
2713 * Reclaim didn't manage to push usage below the limit, slow
2714 * this allocating task down.
2715 *
0e4b01df
CD
2716 * If we exit early, we're guaranteed to die (since
2717 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2718 * need to account for any ill-begotten jiffies to pay them off later.
2719 */
2720 psi_memstall_enter(&pflags);
2721 schedule_timeout_killable(penalty_jiffies);
2722 psi_memstall_leave(&pflags);
2723
2724out:
2725 css_put(&memcg->css);
b23afb93
TH
2726}
2727
c5c8b16b
MS
2728static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2729 unsigned int nr_pages)
8a9f3ccd 2730{
a983b5eb 2731 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
d977aa93 2732 int nr_retries = MAX_RECLAIM_RETRIES;
6539cc05 2733 struct mem_cgroup *mem_over_limit;
3e32cb2e 2734 struct page_counter *counter;
6539cc05 2735 unsigned long nr_reclaimed;
a4ebf1b6 2736 bool passed_oom = false;
73b73bac 2737 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
b70a2a21 2738 bool drained = false;
d6e103a7 2739 bool raised_max_event = false;
e22c6ed9 2740 unsigned long pflags;
a636b327 2741
6539cc05 2742retry:
b6b6cc72 2743 if (consume_stock(memcg, nr_pages))
10d53c74 2744 return 0;
8a9f3ccd 2745
7941d214 2746 if (!do_memsw_account() ||
6071ca52
JW
2747 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2748 if (page_counter_try_charge(&memcg->memory, batch, &counter))
6539cc05 2749 goto done_restock;
7941d214 2750 if (do_memsw_account())
3e32cb2e
JW
2751 page_counter_uncharge(&memcg->memsw, batch);
2752 mem_over_limit = mem_cgroup_from_counter(counter, memory);
3fbe7244 2753 } else {
3e32cb2e 2754 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
73b73bac 2755 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
3fbe7244 2756 }
7a81b88c 2757
6539cc05
JW
2758 if (batch > nr_pages) {
2759 batch = nr_pages;
2760 goto retry;
2761 }
6d61ef40 2762
89a28483
JW
2763 /*
2764 * Prevent unbounded recursion when reclaim operations need to
2765 * allocate memory. This might exceed the limits temporarily,
2766 * but we prefer facilitating memory reclaim and getting back
2767 * under the limit over triggering OOM kills in these cases.
2768 */
2769 if (unlikely(current->flags & PF_MEMALLOC))
2770 goto force;
2771
06b078fc
JW
2772 if (unlikely(task_in_memcg_oom(current)))
2773 goto nomem;
2774
d0164adc 2775 if (!gfpflags_allow_blocking(gfp_mask))
6539cc05 2776 goto nomem;
4b534334 2777
e27be240 2778 memcg_memory_event(mem_over_limit, MEMCG_MAX);
d6e103a7 2779 raised_max_event = true;
241994ed 2780
e22c6ed9 2781 psi_memstall_enter(&pflags);
b70a2a21 2782 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
55ab834a 2783 gfp_mask, reclaim_options);
e22c6ed9 2784 psi_memstall_leave(&pflags);
6539cc05 2785
61e02c74 2786 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
6539cc05 2787 goto retry;
28c34c29 2788
b70a2a21 2789 if (!drained) {
6d3d6aa2 2790 drain_all_stock(mem_over_limit);
b70a2a21
JW
2791 drained = true;
2792 goto retry;
2793 }
2794
28c34c29
JW
2795 if (gfp_mask & __GFP_NORETRY)
2796 goto nomem;
6539cc05
JW
2797 /*
2798 * Even though the limit is exceeded at this point, reclaim
2799 * may have been able to free some pages. Retry the charge
2800 * before killing the task.
2801 *
2802 * Only for regular pages, though: huge pages are rather
2803 * unlikely to succeed so close to the limit, and we fall back
2804 * to regular pages anyway in case of failure.
2805 */
61e02c74 2806 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
6539cc05
JW
2807 goto retry;
2808 /*
2809 * At task move, charge accounts can be doubly counted. So, it's
2810 * better to wait until the end of task_move if something is going on.
2811 */
2812 if (mem_cgroup_wait_acct_move(mem_over_limit))
2813 goto retry;
2814
9b130619
JW
2815 if (nr_retries--)
2816 goto retry;
2817
38d38493 2818 if (gfp_mask & __GFP_RETRY_MAYFAIL)
29ef680a
MH
2819 goto nomem;
2820
a4ebf1b6
VA
2821 /* Avoid endless loop for tasks bypassed by the oom killer */
2822 if (passed_oom && task_is_dying())
2823 goto nomem;
6539cc05 2824
29ef680a
MH
2825 /*
2826 * keep retrying as long as the memcg oom killer is able to make
2827 * a forward progress or bypass the charge if the oom killer
2828 * couldn't make any progress.
2829 */
becdf89d
SB
2830 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2831 get_order(nr_pages * PAGE_SIZE))) {
a4ebf1b6 2832 passed_oom = true;
d977aa93 2833 nr_retries = MAX_RECLAIM_RETRIES;
29ef680a 2834 goto retry;
29ef680a 2835 }
7a81b88c 2836nomem:
1461e8c2
SB
2837 /*
2838 * Memcg doesn't have a dedicated reserve for atomic
2839 * allocations. But like the global atomic pool, we need to
2840 * put the burden of reclaim on regular allocation requests
2841 * and let these go through as privileged allocations.
2842 */
2843 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
3168ecbe 2844 return -ENOMEM;
10d53c74 2845force:
d6e103a7
RG
2846 /*
2847 * If the allocation has to be enforced, don't forget to raise
2848 * a MEMCG_MAX event.
2849 */
2850 if (!raised_max_event)
2851 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2852
10d53c74
TH
2853 /*
2854 * The allocation either can't fail or will lead to more memory
2855 * being freed very soon. Allow memory usage go over the limit
2856 * temporarily by force charging it.
2857 */
2858 page_counter_charge(&memcg->memory, nr_pages);
7941d214 2859 if (do_memsw_account())
10d53c74 2860 page_counter_charge(&memcg->memsw, nr_pages);
10d53c74
TH
2861
2862 return 0;
6539cc05
JW
2863
2864done_restock:
2865 if (batch > nr_pages)
2866 refill_stock(memcg, batch - nr_pages);
b23afb93 2867
241994ed 2868 /*
b23afb93
TH
2869 * If the hierarchy is above the normal consumption range, schedule
2870 * reclaim on returning to userland. We can perform reclaim here
71baba4b 2871 * if __GFP_RECLAIM but let's always punt for simplicity and so that
b23afb93
TH
2872 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2873 * not recorded as it most likely matches current's and won't
2874 * change in the meantime. As high limit is checked again before
2875 * reclaim, the cost of mismatch is negligible.
241994ed
JW
2876 */
2877 do {
4b82ab4f
JK
2878 bool mem_high, swap_high;
2879
2880 mem_high = page_counter_read(&memcg->memory) >
2881 READ_ONCE(memcg->memory.high);
2882 swap_high = page_counter_read(&memcg->swap) >
2883 READ_ONCE(memcg->swap.high);
2884
2885 /* Don't bother a random interrupted task */
086f694a 2886 if (!in_task()) {
4b82ab4f 2887 if (mem_high) {
f7e1cb6e
JW
2888 schedule_work(&memcg->high_work);
2889 break;
2890 }
4b82ab4f
JK
2891 continue;
2892 }
2893
2894 if (mem_high || swap_high) {
2895 /*
2896 * The allocating tasks in this cgroup will need to do
2897 * reclaim or be throttled to prevent further growth
2898 * of the memory or swap footprints.
2899 *
2900 * Target some best-effort fairness between the tasks,
2901 * and distribute reclaim work and delay penalties
2902 * based on how much each task is actually allocating.
2903 */
9516a18a 2904 current->memcg_nr_pages_over_high += batch;
b23afb93
TH
2905 set_notify_resume(current);
2906 break;
2907 }
241994ed 2908 } while ((memcg = parent_mem_cgroup(memcg)));
10d53c74 2909
63fd3270
JW
2910 /*
2911 * Reclaim is set up above to be called from the userland
2912 * return path. But also attempt synchronous reclaim to avoid
2913 * excessive overrun while the task is still inside the
2914 * kernel. If this is successful, the return path will see it
2915 * when it rechecks the overage and simply bail out.
2916 */
c9afe31e
SB
2917 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2918 !(current->flags & PF_MEMALLOC) &&
63fd3270 2919 gfpflags_allow_blocking(gfp_mask))
9ea9cb00 2920 mem_cgroup_handle_over_high(gfp_mask);
10d53c74 2921 return 0;
7a81b88c 2922}
8a9f3ccd 2923
c5c8b16b
MS
2924static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2925 unsigned int nr_pages)
2926{
2927 if (mem_cgroup_is_root(memcg))
2928 return 0;
2929
2930 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2931}
2932
4b569387
NP
2933/**
2934 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2935 * @memcg: memcg previously charged.
2936 * @nr_pages: number of pages previously charged.
2937 */
2938void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
a3032a2c 2939{
ce00a967
JW
2940 if (mem_cgroup_is_root(memcg))
2941 return;
2942
3e32cb2e 2943 page_counter_uncharge(&memcg->memory, nr_pages);
7941d214 2944 if (do_memsw_account())
3e32cb2e 2945 page_counter_uncharge(&memcg->memsw, nr_pages);
d01dd17f
KH
2946}
2947
118f2875 2948static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
0a31bc97 2949{
118f2875 2950 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
0a31bc97 2951 /*
a5eb011a 2952 * Any of the following ensures page's memcg stability:
0a31bc97 2953 *
a0b5b414
JW
2954 * - the page lock
2955 * - LRU isolation
6c77b607 2956 * - folio_memcg_lock()
a0b5b414 2957 * - exclusive reference
018ee47f 2958 * - mem_cgroup_trylock_pages()
0a31bc97 2959 */
118f2875 2960 folio->memcg_data = (unsigned long)memcg;
7a81b88c 2961}
66e1707b 2962
4b569387
NP
2963/**
2964 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2965 * @folio: folio to commit the charge to.
2966 * @memcg: memcg previously charged.
2967 */
2968void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2969{
2970 css_get(&memcg->css);
2971 commit_charge(folio, memcg);
2972
2973 local_irq_disable();
2974 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2975 memcg_check_events(memcg, folio_nid(folio));
2976 local_irq_enable();
2977}
2978
84c07d11 2979#ifdef CONFIG_MEMCG_KMEM
41eb5df1
WL
2980/*
2981 * The allocated objcg pointers array is not accounted directly.
2982 * Moreover, it should not come from DMA buffer and is not readily
2983 * reclaimable. So those GFP bits should be masked off.
2984 */
24948e3b
RG
2985#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2986 __GFP_ACCOUNT | __GFP_NOFAIL)
41eb5df1 2987
a7ebf564
WL
2988/*
2989 * mod_objcg_mlstate() may be called with irq enabled, so
2990 * mod_memcg_lruvec_state() should be used.
2991 */
2992static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2993 struct pglist_data *pgdat,
2994 enum node_stat_item idx, int nr)
2995{
2996 struct mem_cgroup *memcg;
2997 struct lruvec *lruvec;
2998
2999 rcu_read_lock();
3000 memcg = obj_cgroup_memcg(objcg);
3001 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3002 mod_memcg_lruvec_state(lruvec, idx, nr);
3003 rcu_read_unlock();
3004}
3005
4b5f8d9a
VB
3006int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3007 gfp_t gfp, bool new_slab)
10befea9 3008{
4b5f8d9a 3009 unsigned int objects = objs_per_slab(s, slab);
2e9bd483 3010 unsigned long memcg_data;
10befea9
RG
3011 void *vec;
3012
41eb5df1 3013 gfp &= ~OBJCGS_CLEAR_MASK;
10befea9 3014 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
4b5f8d9a 3015 slab_nid(slab));
10befea9
RG
3016 if (!vec)
3017 return -ENOMEM;
3018
2e9bd483 3019 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
4b5f8d9a 3020 if (new_slab) {
2e9bd483 3021 /*
4b5f8d9a
VB
3022 * If the slab is brand new and nobody can yet access its
3023 * memcg_data, no synchronization is required and memcg_data can
3024 * be simply assigned.
2e9bd483 3025 */
4b5f8d9a
VB
3026 slab->memcg_data = memcg_data;
3027 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2e9bd483 3028 /*
4b5f8d9a
VB
3029 * If the slab is already in use, somebody can allocate and
3030 * assign obj_cgroups in parallel. In this case the existing
2e9bd483
RG
3031 * objcg vector should be reused.
3032 */
10befea9 3033 kfree(vec);
2e9bd483
RG
3034 return 0;
3035 }
10befea9 3036
2e9bd483 3037 kmemleak_not_leak(vec);
10befea9
RG
3038 return 0;
3039}
3040
fc4db90f
RG
3041static __always_inline
3042struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
8380ce47 3043{
8380ce47 3044 /*
9855609b
RG
3045 * Slab objects are accounted individually, not per-page.
3046 * Memcg membership data for each individual object is saved in
4b5f8d9a 3047 * slab->memcg_data.
8380ce47 3048 */
4b5f8d9a
VB
3049 if (folio_test_slab(folio)) {
3050 struct obj_cgroup **objcgs;
3051 struct slab *slab;
9855609b
RG
3052 unsigned int off;
3053
4b5f8d9a
VB
3054 slab = folio_slab(folio);
3055 objcgs = slab_objcgs(slab);
3056 if (!objcgs)
3057 return NULL;
3058
3059 off = obj_to_index(slab->slab_cache, slab, p);
3060 if (objcgs[off])
3061 return obj_cgroup_memcg(objcgs[off]);
10befea9
RG
3062
3063 return NULL;
9855609b 3064 }
8380ce47 3065
bcfe06bf 3066 /*
becacb04 3067 * folio_memcg_check() is used here, because in theory we can encounter
4b5f8d9a
VB
3068 * a folio where the slab flag has been cleared already, but
3069 * slab->memcg_data has not been freed yet
becacb04 3070 * folio_memcg_check() will guarantee that a proper memory
bcfe06bf
RG
3071 * cgroup pointer or NULL will be returned.
3072 */
becacb04 3073 return folio_memcg_check(folio);
8380ce47
RG
3074}
3075
fc4db90f
RG
3076/*
3077 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3078 *
3079 * A passed kernel object can be a slab object, vmalloc object or a generic
3080 * kernel page, so different mechanisms for getting the memory cgroup pointer
3081 * should be used.
3082 *
3083 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3084 * can not know for sure how the kernel object is implemented.
3085 * mem_cgroup_from_obj() can be safely used in such cases.
3086 *
3087 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3088 * cgroup_mutex, etc.
3089 */
3090struct mem_cgroup *mem_cgroup_from_obj(void *p)
3091{
3092 struct folio *folio;
3093
3094 if (mem_cgroup_disabled())
3095 return NULL;
3096
3097 if (unlikely(is_vmalloc_addr(p)))
3098 folio = page_folio(vmalloc_to_page(p));
3099 else
3100 folio = virt_to_folio(p);
3101
3102 return mem_cgroup_from_obj_folio(folio, p);
3103}
3104
3105/*
3106 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3107 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3108 * allocated using vmalloc().
3109 *
3110 * A passed kernel object must be a slab object or a generic kernel page.
3111 *
3112 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3113 * cgroup_mutex, etc.
3114 */
3115struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3116{
3117 if (mem_cgroup_disabled())
3118 return NULL;
3119
3120 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3121}
3122
f4840ccf
JW
3123static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3124{
3125 struct obj_cgroup *objcg = NULL;
3126
7848ed62 3127 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
f4840ccf 3128 objcg = rcu_dereference(memcg->objcg);
7d0715d0 3129 if (likely(objcg && obj_cgroup_tryget(objcg)))
f4840ccf
JW
3130 break;
3131 objcg = NULL;
3132 }
3133 return objcg;
3134}
3135
1aacbd35
RG
3136static struct obj_cgroup *current_objcg_update(void)
3137{
3138 struct mem_cgroup *memcg;
3139 struct obj_cgroup *old, *objcg = NULL;
3140
3141 do {
3142 /* Atomically drop the update bit. */
3143 old = xchg(&current->objcg, NULL);
3144 if (old) {
3145 old = (struct obj_cgroup *)
3146 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3147 if (old)
3148 obj_cgroup_put(old);
3149
3150 old = NULL;
3151 }
3152
3153 /* If new objcg is NULL, no reason for the second atomic update. */
3154 if (!current->mm || (current->flags & PF_KTHREAD))
3155 return NULL;
3156
3157 /*
3158 * Release the objcg pointer from the previous iteration,
3159 * if try_cmpxcg() below fails.
3160 */
3161 if (unlikely(objcg)) {
3162 obj_cgroup_put(objcg);
3163 objcg = NULL;
3164 }
3165
3166 /*
3167 * Obtain the new objcg pointer. The current task can be
3168 * asynchronously moved to another memcg and the previous
3169 * memcg can be offlined. So let's get the memcg pointer
3170 * and try get a reference to objcg under a rcu read lock.
3171 */
3172
3173 rcu_read_lock();
3174 memcg = mem_cgroup_from_task(current);
3175 objcg = __get_obj_cgroup_from_memcg(memcg);
3176 rcu_read_unlock();
3177
3178 /*
3179 * Try set up a new objcg pointer atomically. If it
3180 * fails, it means the update flag was set concurrently, so
3181 * the whole procedure should be repeated.
3182 */
3183 } while (!try_cmpxchg(&current->objcg, &old, objcg));
3184
3185 return objcg;
3186}
3187
e86828e5
RG
3188__always_inline struct obj_cgroup *current_obj_cgroup(void)
3189{
3190 struct mem_cgroup *memcg;
3191 struct obj_cgroup *objcg;
3192
3193 if (in_task()) {
3194 memcg = current->active_memcg;
3195 if (unlikely(memcg))
3196 goto from_memcg;
3197
3198 objcg = READ_ONCE(current->objcg);
3199 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3200 objcg = current_objcg_update();
3201 /*
3202 * Objcg reference is kept by the task, so it's safe
3203 * to use the objcg by the current task.
3204 */
3205 return objcg;
3206 }
3207
3208 memcg = this_cpu_read(int_active_memcg);
3209 if (unlikely(memcg))
3210 goto from_memcg;
3211
3212 return NULL;
3213
3214from_memcg:
5f79489a 3215 objcg = NULL;
e86828e5
RG
3216 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3217 /*
3218 * Memcg pointer is protected by scope (see set_active_memcg())
3219 * and is pinning the corresponding objcg, so objcg can't go
3220 * away and can be used within the scope without any additional
3221 * protection.
3222 */
3223 objcg = rcu_dereference_check(memcg->objcg, 1);
3224 if (likely(objcg))
3225 break;
e86828e5
RG
3226 }
3227
3228 return objcg;
3229}
3230
074e3e26 3231struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
f4840ccf
JW
3232{
3233 struct obj_cgroup *objcg;
3234
f7a449f7 3235 if (!memcg_kmem_online())
f4840ccf
JW
3236 return NULL;
3237
074e3e26
MWO
3238 if (folio_memcg_kmem(folio)) {
3239 objcg = __folio_objcg(folio);
f4840ccf
JW
3240 obj_cgroup_get(objcg);
3241 } else {
3242 struct mem_cgroup *memcg;
bf4f0599 3243
f4840ccf 3244 rcu_read_lock();
074e3e26 3245 memcg = __folio_memcg(folio);
f4840ccf
JW
3246 if (memcg)
3247 objcg = __get_obj_cgroup_from_memcg(memcg);
3248 else
3249 objcg = NULL;
3250 rcu_read_unlock();
3251 }
bf4f0599
RG
3252 return objcg;
3253}
3254
a8c49af3
YA
3255static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3256{
3257 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3258 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3259 if (nr_pages > 0)
3260 page_counter_charge(&memcg->kmem, nr_pages);
3261 else
3262 page_counter_uncharge(&memcg->kmem, -nr_pages);
3263 }
3264}
3265
3266
f1286fae
MS
3267/*
3268 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3269 * @objcg: object cgroup to uncharge
3270 * @nr_pages: number of pages to uncharge
3271 */
e74d2259
MS
3272static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3273 unsigned int nr_pages)
3274{
3275 struct mem_cgroup *memcg;
3276
3277 memcg = get_mem_cgroup_from_objcg(objcg);
e74d2259 3278
a8c49af3 3279 memcg_account_kmem(memcg, -nr_pages);
f1286fae 3280 refill_stock(memcg, nr_pages);
e74d2259 3281
e74d2259 3282 css_put(&memcg->css);
e74d2259
MS
3283}
3284
f1286fae
MS
3285/*
3286 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3287 * @objcg: object cgroup to charge
45264778 3288 * @gfp: reclaim mode
92d0510c 3289 * @nr_pages: number of pages to charge
45264778
VD
3290 *
3291 * Returns 0 on success, an error code on failure.
3292 */
f1286fae
MS
3293static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3294 unsigned int nr_pages)
7ae1e1d0 3295{
f1286fae 3296 struct mem_cgroup *memcg;
7ae1e1d0
GC
3297 int ret;
3298
f1286fae
MS
3299 memcg = get_mem_cgroup_from_objcg(objcg);
3300
c5c8b16b 3301 ret = try_charge_memcg(memcg, gfp, nr_pages);
52c29b04 3302 if (ret)
f1286fae 3303 goto out;
52c29b04 3304
a8c49af3 3305 memcg_account_kmem(memcg, nr_pages);
f1286fae
MS
3306out:
3307 css_put(&memcg->css);
4b13f64d 3308
f1286fae 3309 return ret;
4b13f64d
RG
3310}
3311
45264778 3312/**
f4b00eab 3313 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
45264778
VD
3314 * @page: page to charge
3315 * @gfp: reclaim mode
3316 * @order: allocation order
3317 *
3318 * Returns 0 on success, an error code on failure.
3319 */
f4b00eab 3320int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
7ae1e1d0 3321{
b4e0b68f 3322 struct obj_cgroup *objcg;
fcff7d7e 3323 int ret = 0;
7ae1e1d0 3324
e86828e5 3325 objcg = current_obj_cgroup();
b4e0b68f
MS
3326 if (objcg) {
3327 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
4d96ba35 3328 if (!ret) {
e86828e5 3329 obj_cgroup_get(objcg);
b4e0b68f 3330 page->memcg_data = (unsigned long)objcg |
18b2db3b 3331 MEMCG_DATA_KMEM;
1a3e1f40 3332 return 0;
4d96ba35 3333 }
c4159a75 3334 }
d05e83a6 3335 return ret;
7ae1e1d0 3336}
49a18eae 3337
45264778 3338/**
f4b00eab 3339 * __memcg_kmem_uncharge_page: uncharge a kmem page
45264778
VD
3340 * @page: page to uncharge
3341 * @order: allocation order
3342 */
f4b00eab 3343void __memcg_kmem_uncharge_page(struct page *page, int order)
7ae1e1d0 3344{
1b7e4464 3345 struct folio *folio = page_folio(page);
b4e0b68f 3346 struct obj_cgroup *objcg;
f3ccb2c4 3347 unsigned int nr_pages = 1 << order;
7ae1e1d0 3348
1b7e4464 3349 if (!folio_memcg_kmem(folio))
7ae1e1d0
GC
3350 return;
3351
1b7e4464 3352 objcg = __folio_objcg(folio);
b4e0b68f 3353 obj_cgroup_uncharge_pages(objcg, nr_pages);
1b7e4464 3354 folio->memcg_data = 0;
b4e0b68f 3355 obj_cgroup_put(objcg);
60d3fd32 3356}
bf4f0599 3357
68ac5b3c
WL
3358void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3359 enum node_stat_item idx, int nr)
3360{
fead2b86 3361 struct memcg_stock_pcp *stock;
56751146 3362 struct obj_cgroup *old = NULL;
68ac5b3c
WL
3363 unsigned long flags;
3364 int *bytes;
3365
56751146 3366 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3367 stock = this_cpu_ptr(&memcg_stock);
3368
68ac5b3c
WL
3369 /*
3370 * Save vmstat data in stock and skip vmstat array update unless
3371 * accumulating over a page of vmstat data or when pgdat or idx
3372 * changes.
3373 */
3b8abb32 3374 if (READ_ONCE(stock->cached_objcg) != objcg) {
56751146 3375 old = drain_obj_stock(stock);
68ac5b3c
WL
3376 obj_cgroup_get(objcg);
3377 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3378 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3b8abb32 3379 WRITE_ONCE(stock->cached_objcg, objcg);
68ac5b3c
WL
3380 stock->cached_pgdat = pgdat;
3381 } else if (stock->cached_pgdat != pgdat) {
3382 /* Flush the existing cached vmstat data */
7fa0dacb
WL
3383 struct pglist_data *oldpg = stock->cached_pgdat;
3384
68ac5b3c 3385 if (stock->nr_slab_reclaimable_b) {
7fa0dacb 3386 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
68ac5b3c
WL
3387 stock->nr_slab_reclaimable_b);
3388 stock->nr_slab_reclaimable_b = 0;
3389 }
3390 if (stock->nr_slab_unreclaimable_b) {
7fa0dacb 3391 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
68ac5b3c
WL
3392 stock->nr_slab_unreclaimable_b);
3393 stock->nr_slab_unreclaimable_b = 0;
3394 }
3395 stock->cached_pgdat = pgdat;
3396 }
3397
3398 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3399 : &stock->nr_slab_unreclaimable_b;
3400 /*
3401 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3402 * cached locally at least once before pushing it out.
3403 */
3404 if (!*bytes) {
3405 *bytes = nr;
3406 nr = 0;
3407 } else {
3408 *bytes += nr;
3409 if (abs(*bytes) > PAGE_SIZE) {
3410 nr = *bytes;
3411 *bytes = 0;
3412 } else {
3413 nr = 0;
3414 }
3415 }
3416 if (nr)
3417 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3418
56751146
SAS
3419 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3420 if (old)
3421 obj_cgroup_put(old);
68ac5b3c
WL
3422}
3423
bf4f0599
RG
3424static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3425{
fead2b86 3426 struct memcg_stock_pcp *stock;
bf4f0599
RG
3427 unsigned long flags;
3428 bool ret = false;
3429
56751146 3430 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3431
3432 stock = this_cpu_ptr(&memcg_stock);
3b8abb32 3433 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
bf4f0599
RG
3434 stock->nr_bytes -= nr_bytes;
3435 ret = true;
3436 }
3437
56751146 3438 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
bf4f0599
RG
3439
3440 return ret;
3441}
3442
56751146 3443static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
bf4f0599 3444{
3b8abb32 3445 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
bf4f0599
RG
3446
3447 if (!old)
56751146 3448 return NULL;
bf4f0599
RG
3449
3450 if (stock->nr_bytes) {
3451 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3452 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3453
af9a3b69
JW
3454 if (nr_pages) {
3455 struct mem_cgroup *memcg;
3456
3457 memcg = get_mem_cgroup_from_objcg(old);
3458
3459 memcg_account_kmem(memcg, -nr_pages);
3460 __refill_stock(memcg, nr_pages);
3461
3462 css_put(&memcg->css);
3463 }
bf4f0599
RG
3464
3465 /*
3466 * The leftover is flushed to the centralized per-memcg value.
3467 * On the next attempt to refill obj stock it will be moved
3468 * to a per-cpu stock (probably, on an other CPU), see
3469 * refill_obj_stock().
3470 *
3471 * How often it's flushed is a trade-off between the memory
3472 * limit enforcement accuracy and potential CPU contention,
3473 * so it might be changed in the future.
3474 */
3475 atomic_add(nr_bytes, &old->nr_charged_bytes);
3476 stock->nr_bytes = 0;
3477 }
3478
68ac5b3c
WL
3479 /*
3480 * Flush the vmstat data in current stock
3481 */
3482 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3483 if (stock->nr_slab_reclaimable_b) {
3484 mod_objcg_mlstate(old, stock->cached_pgdat,
3485 NR_SLAB_RECLAIMABLE_B,
3486 stock->nr_slab_reclaimable_b);
3487 stock->nr_slab_reclaimable_b = 0;
3488 }
3489 if (stock->nr_slab_unreclaimable_b) {
3490 mod_objcg_mlstate(old, stock->cached_pgdat,
3491 NR_SLAB_UNRECLAIMABLE_B,
3492 stock->nr_slab_unreclaimable_b);
3493 stock->nr_slab_unreclaimable_b = 0;
3494 }
3495 stock->cached_pgdat = NULL;
3496 }
3497
3b8abb32 3498 WRITE_ONCE(stock->cached_objcg, NULL);
56751146
SAS
3499 /*
3500 * The `old' objects needs to be released by the caller via
3501 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3502 */
3503 return old;
bf4f0599
RG
3504}
3505
3506static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3507 struct mem_cgroup *root_memcg)
3508{
3b8abb32 3509 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
bf4f0599
RG
3510 struct mem_cgroup *memcg;
3511
3b8abb32
RG
3512 if (objcg) {
3513 memcg = obj_cgroup_memcg(objcg);
bf4f0599
RG
3514 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3515 return true;
3516 }
3517
3518 return false;
3519}
3520
5387c904
WL
3521static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3522 bool allow_uncharge)
bf4f0599 3523{
fead2b86 3524 struct memcg_stock_pcp *stock;
56751146 3525 struct obj_cgroup *old = NULL;
bf4f0599 3526 unsigned long flags;
5387c904 3527 unsigned int nr_pages = 0;
bf4f0599 3528
56751146 3529 local_lock_irqsave(&memcg_stock.stock_lock, flags);
fead2b86
MH
3530
3531 stock = this_cpu_ptr(&memcg_stock);
3b8abb32 3532 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
56751146 3533 old = drain_obj_stock(stock);
bf4f0599 3534 obj_cgroup_get(objcg);
3b8abb32 3535 WRITE_ONCE(stock->cached_objcg, objcg);
5387c904
WL
3536 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3537 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3538 allow_uncharge = true; /* Allow uncharge when objcg changes */
bf4f0599
RG
3539 }
3540 stock->nr_bytes += nr_bytes;
3541
5387c904
WL
3542 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3543 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3544 stock->nr_bytes &= (PAGE_SIZE - 1);
3545 }
bf4f0599 3546
56751146
SAS
3547 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3548 if (old)
3549 obj_cgroup_put(old);
5387c904
WL
3550
3551 if (nr_pages)
3552 obj_cgroup_uncharge_pages(objcg, nr_pages);
bf4f0599
RG
3553}
3554
3555int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3556{
bf4f0599
RG
3557 unsigned int nr_pages, nr_bytes;
3558 int ret;
3559
3560 if (consume_obj_stock(objcg, size))
3561 return 0;
3562
3563 /*
5387c904 3564 * In theory, objcg->nr_charged_bytes can have enough
bf4f0599 3565 * pre-charged bytes to satisfy the allocation. However,
5387c904
WL
3566 * flushing objcg->nr_charged_bytes requires two atomic
3567 * operations, and objcg->nr_charged_bytes can't be big.
3568 * The shared objcg->nr_charged_bytes can also become a
3569 * performance bottleneck if all tasks of the same memcg are
3570 * trying to update it. So it's better to ignore it and try
3571 * grab some new pages. The stock's nr_bytes will be flushed to
3572 * objcg->nr_charged_bytes later on when objcg changes.
3573 *
3574 * The stock's nr_bytes may contain enough pre-charged bytes
3575 * to allow one less page from being charged, but we can't rely
3576 * on the pre-charged bytes not being changed outside of
3577 * consume_obj_stock() or refill_obj_stock(). So ignore those
3578 * pre-charged bytes as well when charging pages. To avoid a
3579 * page uncharge right after a page charge, we set the
3580 * allow_uncharge flag to false when calling refill_obj_stock()
3581 * to temporarily allow the pre-charged bytes to exceed the page
3582 * size limit. The maximum reachable value of the pre-charged
3583 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3584 * race.
bf4f0599 3585 */
bf4f0599
RG
3586 nr_pages = size >> PAGE_SHIFT;
3587 nr_bytes = size & (PAGE_SIZE - 1);
3588
3589 if (nr_bytes)
3590 nr_pages += 1;
3591
e74d2259 3592 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
bf4f0599 3593 if (!ret && nr_bytes)
5387c904 3594 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
bf4f0599 3595
bf4f0599
RG
3596 return ret;
3597}
3598
3599void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3600{
5387c904 3601 refill_obj_stock(objcg, size, true);
bf4f0599
RG
3602}
3603
84c07d11 3604#endif /* CONFIG_MEMCG_KMEM */
7ae1e1d0 3605
ca3e0214 3606/*
be6c8982 3607 * Because page_memcg(head) is not set on tails, set it now.
ca3e0214 3608 */
b8791381 3609void split_page_memcg(struct page *head, int old_order, int new_order)
ca3e0214 3610{
1b7e4464
MWO
3611 struct folio *folio = page_folio(head);
3612 struct mem_cgroup *memcg = folio_memcg(folio);
e94c8a9c 3613 int i;
b8791381
ZY
3614 unsigned int old_nr = 1 << old_order;
3615 unsigned int new_nr = 1 << new_order;
ca3e0214 3616
be6c8982 3617 if (mem_cgroup_disabled() || !memcg)
3d37c4a9 3618 return;
b070e65c 3619
b8791381 3620 for (i = new_nr; i < old_nr; i += new_nr)
1b7e4464 3621 folio_page(folio, i)->memcg_data = folio->memcg_data;
b4e0b68f 3622
1b7e4464 3623 if (folio_memcg_kmem(folio))
b8791381 3624 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
b4e0b68f 3625 else
b8791381 3626 css_get_many(&memcg->css, old_nr / new_nr - 1);
ca3e0214 3627}
ca3e0214 3628
e55b9f96 3629#ifdef CONFIG_SWAP
02491447
DN
3630/**
3631 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3632 * @entry: swap entry to be moved
3633 * @from: mem_cgroup which the entry is moved from
3634 * @to: mem_cgroup which the entry is moved to
3635 *
3636 * It succeeds only when the swap_cgroup's record for this entry is the same
3637 * as the mem_cgroup's id of @from.
3638 *
3639 * Returns 0 on success, -EINVAL on failure.
3640 *
3e32cb2e 3641 * The caller must have charged to @to, IOW, called page_counter_charge() about
02491447
DN
3642 * both res and memsw, and called css_get().
3643 */
3644static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3645 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3646{
3647 unsigned short old_id, new_id;
3648
34c00c31
LZ
3649 old_id = mem_cgroup_id(from);
3650 new_id = mem_cgroup_id(to);
02491447
DN
3651
3652 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
c9019e9b
JW
3653 mod_memcg_state(from, MEMCG_SWAP, -1);
3654 mod_memcg_state(to, MEMCG_SWAP, 1);
02491447
DN
3655 return 0;
3656 }
3657 return -EINVAL;
3658}
3659#else
3660static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 3661 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
3662{
3663 return -EINVAL;
3664}
8c7c6e34 3665#endif
d13d1443 3666
bbec2e15 3667static DEFINE_MUTEX(memcg_max_mutex);
f212ad7c 3668
bbec2e15
RG
3669static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3670 unsigned long max, bool memsw)
628f4235 3671{
3e32cb2e 3672 bool enlarge = false;
bb4a7ea2 3673 bool drained = false;
3e32cb2e 3674 int ret;
c054a78c
YZ
3675 bool limits_invariant;
3676 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
81d39c20 3677
3e32cb2e 3678 do {
628f4235
KH
3679 if (signal_pending(current)) {
3680 ret = -EINTR;
3681 break;
3682 }
3e32cb2e 3683
bbec2e15 3684 mutex_lock(&memcg_max_mutex);
c054a78c
YZ
3685 /*
3686 * Make sure that the new limit (memsw or memory limit) doesn't
bbec2e15 3687 * break our basic invariant rule memory.max <= memsw.max.
c054a78c 3688 */
15b42562 3689 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
bbec2e15 3690 max <= memcg->memsw.max;
c054a78c 3691 if (!limits_invariant) {
bbec2e15 3692 mutex_unlock(&memcg_max_mutex);
8c7c6e34 3693 ret = -EINVAL;
8c7c6e34
KH
3694 break;
3695 }
bbec2e15 3696 if (max > counter->max)
3e32cb2e 3697 enlarge = true;
bbec2e15
RG
3698 ret = page_counter_set_max(counter, max);
3699 mutex_unlock(&memcg_max_mutex);
8c7c6e34
KH
3700
3701 if (!ret)
3702 break;
3703
bb4a7ea2
SB
3704 if (!drained) {
3705 drain_all_stock(memcg);
3706 drained = true;
3707 continue;
3708 }
3709
73b73bac 3710 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 3711 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
1ab5c056
AR
3712 ret = -EBUSY;
3713 break;
3714 }
3715 } while (true);
3e32cb2e 3716
3c11ecf4
KH
3717 if (!ret && enlarge)
3718 memcg_oom_recover(memcg);
3e32cb2e 3719
628f4235
KH
3720 return ret;
3721}
3722
ef8f2327 3723unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
0608f43d
AM
3724 gfp_t gfp_mask,
3725 unsigned long *total_scanned)
3726{
3727 unsigned long nr_reclaimed = 0;
ef8f2327 3728 struct mem_cgroup_per_node *mz, *next_mz = NULL;
0608f43d
AM
3729 unsigned long reclaimed;
3730 int loop = 0;
ef8f2327 3731 struct mem_cgroup_tree_per_node *mctz;
3e32cb2e 3732 unsigned long excess;
0608f43d 3733
e4dde56c
YZ
3734 if (lru_gen_enabled())
3735 return 0;
3736
0608f43d
AM
3737 if (order > 0)
3738 return 0;
3739
2ab082ba 3740 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
d6507ff5
MH
3741
3742 /*
3743 * Do not even bother to check the largest node if the root
3744 * is empty. Do it lockless to prevent lock bouncing. Races
3745 * are acceptable as soft limit is best effort anyway.
3746 */
bfc7228b 3747 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
d6507ff5
MH
3748 return 0;
3749
0608f43d
AM
3750 /*
3751 * This loop can run a while, specially if mem_cgroup's continuously
3752 * keep exceeding their soft limit and putting the system under
3753 * pressure
3754 */
3755 do {
3756 if (next_mz)
3757 mz = next_mz;
3758 else
3759 mz = mem_cgroup_largest_soft_limit_node(mctz);
3760 if (!mz)
3761 break;
3762
ef8f2327 3763 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
d8f65338 3764 gfp_mask, total_scanned);
0608f43d 3765 nr_reclaimed += reclaimed;
0a31bc97 3766 spin_lock_irq(&mctz->lock);
0608f43d
AM
3767
3768 /*
3769 * If we failed to reclaim anything from this memory cgroup
3770 * it is time to move on to the next cgroup
3771 */
3772 next_mz = NULL;
bc2f2e7f
VD
3773 if (!reclaimed)
3774 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3775
3e32cb2e 3776 excess = soft_limit_excess(mz->memcg);
0608f43d
AM
3777 /*
3778 * One school of thought says that we should not add
3779 * back the node to the tree if reclaim returns 0.
3780 * But our reclaim could return 0, simply because due
3781 * to priority we are exposing a smaller subset of
3782 * memory to reclaim from. Consider this as a longer
3783 * term TODO.
3784 */
3785 /* If excess == 0, no tree ops */
cf2c8127 3786 __mem_cgroup_insert_exceeded(mz, mctz, excess);
0a31bc97 3787 spin_unlock_irq(&mctz->lock);
0608f43d
AM
3788 css_put(&mz->memcg->css);
3789 loop++;
3790 /*
3791 * Could not reclaim anything and there are no more
3792 * mem cgroups to try or we seem to be looping without
3793 * reclaiming anything.
3794 */
3795 if (!nr_reclaimed &&
3796 (next_mz == NULL ||
3797 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3798 break;
3799 } while (!nr_reclaimed);
3800 if (next_mz)
3801 css_put(&next_mz->memcg->css);
3802 return nr_reclaimed;
3803}
3804
c26251f9 3805/*
51038171 3806 * Reclaims as many pages from the given memcg as possible.
c26251f9
MH
3807 *
3808 * Caller is responsible for holding css reference for memcg.
3809 */
3810static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3811{
d977aa93 3812 int nr_retries = MAX_RECLAIM_RETRIES;
c26251f9 3813
c1e862c1
KH
3814 /* we call try-to-free pages for make this cgroup empty */
3815 lru_add_drain_all();
d12c60f6
JS
3816
3817 drain_all_stock(memcg);
3818
f817ed48 3819 /* try to free all pages in this cgroup */
3e32cb2e 3820 while (nr_retries && page_counter_read(&memcg->memory)) {
c26251f9
MH
3821 if (signal_pending(current))
3822 return -EINTR;
3823
73b73bac 3824 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
55ab834a 3825 MEMCG_RECLAIM_MAY_SWAP))
f817ed48 3826 nr_retries--;
f817ed48 3827 }
ab5196c2
MH
3828
3829 return 0;
cc847582
KH
3830}
3831
6770c64e
TH
3832static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3833 char *buf, size_t nbytes,
3834 loff_t off)
c1e862c1 3835{
6770c64e 3836 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
c26251f9 3837
d8423011
MH
3838 if (mem_cgroup_is_root(memcg))
3839 return -EINVAL;
6770c64e 3840 return mem_cgroup_force_empty(memcg) ?: nbytes;
c1e862c1
KH
3841}
3842
182446d0
TH
3843static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3844 struct cftype *cft)
18f59ea7 3845{
bef8620c 3846 return 1;
18f59ea7
BS
3847}
3848
182446d0
TH
3849static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3850 struct cftype *cft, u64 val)
18f59ea7 3851{
bef8620c 3852 if (val == 1)
0b8f73e1 3853 return 0;
567fb435 3854
bef8620c
RG
3855 pr_warn_once("Non-hierarchical mode is deprecated. "
3856 "Please report your usecase to [email protected] if you "
3857 "depend on this functionality.\n");
567fb435 3858
bef8620c 3859 return -EINVAL;
18f59ea7
BS
3860}
3861
6f646156 3862static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
ce00a967 3863{
42a30035 3864 unsigned long val;
ce00a967 3865
3e32cb2e 3866 if (mem_cgroup_is_root(memcg)) {
a2174e95 3867 /*
f82a7a86
YA
3868 * Approximate root's usage from global state. This isn't
3869 * perfect, but the root usage was always an approximation.
a2174e95 3870 */
f82a7a86
YA
3871 val = global_node_page_state(NR_FILE_PAGES) +
3872 global_node_page_state(NR_ANON_MAPPED);
42a30035 3873 if (swap)
f82a7a86 3874 val += total_swap_pages - get_nr_swap_pages();
3e32cb2e 3875 } else {
ce00a967 3876 if (!swap)
3e32cb2e 3877 val = page_counter_read(&memcg->memory);
ce00a967 3878 else
3e32cb2e 3879 val = page_counter_read(&memcg->memsw);
ce00a967 3880 }
c12176d3 3881 return val;
ce00a967
JW
3882}
3883
3e32cb2e
JW
3884enum {
3885 RES_USAGE,
3886 RES_LIMIT,
3887 RES_MAX_USAGE,
3888 RES_FAILCNT,
3889 RES_SOFT_LIMIT,
3890};
ce00a967 3891
791badbd 3892static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
05b84301 3893 struct cftype *cft)
8cdea7c0 3894{
182446d0 3895 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3e32cb2e 3896 struct page_counter *counter;
af36f906 3897
3e32cb2e 3898 switch (MEMFILE_TYPE(cft->private)) {
8c7c6e34 3899 case _MEM:
3e32cb2e
JW
3900 counter = &memcg->memory;
3901 break;
8c7c6e34 3902 case _MEMSWAP:
3e32cb2e
JW
3903 counter = &memcg->memsw;
3904 break;
510fc4e1 3905 case _KMEM:
3e32cb2e 3906 counter = &memcg->kmem;
510fc4e1 3907 break;
d55f90bf 3908 case _TCP:
0db15298 3909 counter = &memcg->tcpmem;
d55f90bf 3910 break;
8c7c6e34
KH
3911 default:
3912 BUG();
8c7c6e34 3913 }
3e32cb2e
JW
3914
3915 switch (MEMFILE_ATTR(cft->private)) {
3916 case RES_USAGE:
3917 if (counter == &memcg->memory)
c12176d3 3918 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3e32cb2e 3919 if (counter == &memcg->memsw)
c12176d3 3920 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3e32cb2e
JW
3921 return (u64)page_counter_read(counter) * PAGE_SIZE;
3922 case RES_LIMIT:
bbec2e15 3923 return (u64)counter->max * PAGE_SIZE;
3e32cb2e
JW
3924 case RES_MAX_USAGE:
3925 return (u64)counter->watermark * PAGE_SIZE;
3926 case RES_FAILCNT:
3927 return counter->failcnt;
3928 case RES_SOFT_LIMIT:
2178e20c 3929 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3e32cb2e
JW
3930 default:
3931 BUG();
3932 }
8cdea7c0 3933}
510fc4e1 3934
6b0ba2ab
FS
3935/*
3936 * This function doesn't do anything useful. Its only job is to provide a read
3937 * handler for a file so that cgroup_file_mode() will add read permissions.
3938 */
3939static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3940 __always_unused void *v)
3941{
3942 return -EINVAL;
3943}
3944
84c07d11 3945#ifdef CONFIG_MEMCG_KMEM
567e9ab2 3946static int memcg_online_kmem(struct mem_cgroup *memcg)
d6441637 3947{
bf4f0599 3948 struct obj_cgroup *objcg;
d6441637 3949
9c94bef9 3950 if (mem_cgroup_kmem_disabled())
b313aeee
VD
3951 return 0;
3952
da0efe30
MS
3953 if (unlikely(mem_cgroup_is_root(memcg)))
3954 return 0;
d6441637 3955
bf4f0599 3956 objcg = obj_cgroup_alloc();
f9c69d63 3957 if (!objcg)
bf4f0599 3958 return -ENOMEM;
f9c69d63 3959
bf4f0599
RG
3960 objcg->memcg = memcg;
3961 rcu_assign_pointer(memcg->objcg, objcg);
675d6c9b
RG
3962 obj_cgroup_get(objcg);
3963 memcg->orig_objcg = objcg;
bf4f0599 3964
f7a449f7 3965 static_branch_enable(&memcg_kmem_online_key);
d648bcc7 3966
f9c69d63 3967 memcg->kmemcg_id = memcg->id.id;
0b8f73e1
JW
3968
3969 return 0;
d6441637
VD
3970}
3971
8e0a8912
JW
3972static void memcg_offline_kmem(struct mem_cgroup *memcg)
3973{
64268868 3974 struct mem_cgroup *parent;
8e0a8912 3975
9c94bef9 3976 if (mem_cgroup_kmem_disabled())
da0efe30
MS
3977 return;
3978
3979 if (unlikely(mem_cgroup_is_root(memcg)))
8e0a8912 3980 return;
9855609b 3981
8e0a8912
JW
3982 parent = parent_mem_cgroup(memcg);
3983 if (!parent)
3984 parent = root_mem_cgroup;
3985
bf4f0599 3986 memcg_reparent_objcgs(memcg, parent);
fb2f2b0a 3987
8e0a8912 3988 /*
64268868
MS
3989 * After we have finished memcg_reparent_objcgs(), all list_lrus
3990 * corresponding to this cgroup are guaranteed to remain empty.
3991 * The ordering is imposed by list_lru_node->lock taken by
1f391eb2 3992 * memcg_reparent_list_lrus().
8e0a8912 3993 */
1f391eb2 3994 memcg_reparent_list_lrus(memcg, parent);
8e0a8912 3995}
d6441637 3996#else
0b8f73e1 3997static int memcg_online_kmem(struct mem_cgroup *memcg)
127424c8
JW
3998{
3999 return 0;
4000}
4001static void memcg_offline_kmem(struct mem_cgroup *memcg)
4002{
4003}
84c07d11 4004#endif /* CONFIG_MEMCG_KMEM */
127424c8 4005
bbec2e15 4006static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
d55f90bf
VD
4007{
4008 int ret;
4009
bbec2e15 4010 mutex_lock(&memcg_max_mutex);
d55f90bf 4011
bbec2e15 4012 ret = page_counter_set_max(&memcg->tcpmem, max);
d55f90bf
VD
4013 if (ret)
4014 goto out;
4015
0db15298 4016 if (!memcg->tcpmem_active) {
d55f90bf
VD
4017 /*
4018 * The active flag needs to be written after the static_key
4019 * update. This is what guarantees that the socket activation
2d758073
JW
4020 * function is the last one to run. See mem_cgroup_sk_alloc()
4021 * for details, and note that we don't mark any socket as
4022 * belonging to this memcg until that flag is up.
d55f90bf
VD
4023 *
4024 * We need to do this, because static_keys will span multiple
4025 * sites, but we can't control their order. If we mark a socket
4026 * as accounted, but the accounting functions are not patched in
4027 * yet, we'll lose accounting.
4028 *
2d758073 4029 * We never race with the readers in mem_cgroup_sk_alloc(),
d55f90bf
VD
4030 * because when this value change, the code to process it is not
4031 * patched in yet.
4032 */
4033 static_branch_inc(&memcg_sockets_enabled_key);
0db15298 4034 memcg->tcpmem_active = true;
d55f90bf
VD
4035 }
4036out:
bbec2e15 4037 mutex_unlock(&memcg_max_mutex);
d55f90bf
VD
4038 return ret;
4039}
d55f90bf 4040
628f4235
KH
4041/*
4042 * The user of this function is...
4043 * RES_LIMIT.
4044 */
451af504
TH
4045static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4046 char *buf, size_t nbytes, loff_t off)
8cdea7c0 4047{
451af504 4048 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 4049 unsigned long nr_pages;
628f4235
KH
4050 int ret;
4051
451af504 4052 buf = strstrip(buf);
650c5e56 4053 ret = page_counter_memparse(buf, "-1", &nr_pages);
3e32cb2e
JW
4054 if (ret)
4055 return ret;
af36f906 4056
3e32cb2e 4057 switch (MEMFILE_ATTR(of_cft(of)->private)) {
628f4235 4058 case RES_LIMIT:
4b3bde4c
BS
4059 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4060 ret = -EINVAL;
4061 break;
4062 }
3e32cb2e
JW
4063 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4064 case _MEM:
bbec2e15 4065 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
8c7c6e34 4066 break;
3e32cb2e 4067 case _MEMSWAP:
bbec2e15 4068 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
296c81d8 4069 break;
4597648f
MH
4070 case _KMEM:
4071 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4072 "Writing any value to this file has no effect. "
4073 "Please report your usecase to [email protected] if you "
4074 "depend on this functionality.\n");
4075 ret = 0;
4076 break;
d55f90bf 4077 case _TCP:
bbec2e15 4078 ret = memcg_update_tcp_max(memcg, nr_pages);
d55f90bf 4079 break;
3e32cb2e 4080 }
296c81d8 4081 break;
3e32cb2e 4082 case RES_SOFT_LIMIT:
2343e88d
SAS
4083 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4084 ret = -EOPNOTSUPP;
4085 } else {
2178e20c 4086 WRITE_ONCE(memcg->soft_limit, nr_pages);
2343e88d
SAS
4087 ret = 0;
4088 }
628f4235
KH
4089 break;
4090 }
451af504 4091 return ret ?: nbytes;
8cdea7c0
BS
4092}
4093
6770c64e
TH
4094static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4095 size_t nbytes, loff_t off)
c84872e1 4096{
6770c64e 4097 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3e32cb2e 4098 struct page_counter *counter;
c84872e1 4099
3e32cb2e
JW
4100 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4101 case _MEM:
4102 counter = &memcg->memory;
4103 break;
4104 case _MEMSWAP:
4105 counter = &memcg->memsw;
4106 break;
4107 case _KMEM:
4108 counter = &memcg->kmem;
4109 break;
d55f90bf 4110 case _TCP:
0db15298 4111 counter = &memcg->tcpmem;
d55f90bf 4112 break;
3e32cb2e
JW
4113 default:
4114 BUG();
4115 }
af36f906 4116
3e32cb2e 4117 switch (MEMFILE_ATTR(of_cft(of)->private)) {
29f2a4da 4118 case RES_MAX_USAGE:
3e32cb2e 4119 page_counter_reset_watermark(counter);
29f2a4da
PE
4120 break;
4121 case RES_FAILCNT:
3e32cb2e 4122 counter->failcnt = 0;
29f2a4da 4123 break;
3e32cb2e
JW
4124 default:
4125 BUG();
29f2a4da 4126 }
f64c3f54 4127
6770c64e 4128 return nbytes;
c84872e1
PE
4129}
4130
182446d0 4131static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
7dc74be0
DN
4132 struct cftype *cft)
4133{
182446d0 4134 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
7dc74be0
DN
4135}
4136
02491447 4137#ifdef CONFIG_MMU
182446d0 4138static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
7dc74be0
DN
4139 struct cftype *cft, u64 val)
4140{
182446d0 4141 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7dc74be0 4142
da34a848
JW
4143 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4144 "Please report your usecase to [email protected] if you "
4145 "depend on this functionality.\n");
4146
1dfab5ab 4147 if (val & ~MOVE_MASK)
7dc74be0 4148 return -EINVAL;
ee5e8472 4149
7dc74be0 4150 /*
ee5e8472
GC
4151 * No kind of locking is needed in here, because ->can_attach() will
4152 * check this value once in the beginning of the process, and then carry
4153 * on with stale data. This means that changes to this value will only
4154 * affect task migrations starting after the change.
7dc74be0 4155 */
c0ff4b85 4156 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
4157 return 0;
4158}
02491447 4159#else
182446d0 4160static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
02491447
DN
4161 struct cftype *cft, u64 val)
4162{
4163 return -ENOSYS;
4164}
4165#endif
7dc74be0 4166
406eb0c9 4167#ifdef CONFIG_NUMA
113b7dfd
JW
4168
4169#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4170#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4171#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4172
4173static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6 4174 int nid, unsigned int lru_mask, bool tree)
113b7dfd 4175{
867e5e1d 4176 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
113b7dfd
JW
4177 unsigned long nr = 0;
4178 enum lru_list lru;
4179
4180 VM_BUG_ON((unsigned)nid >= nr_node_ids);
4181
4182 for_each_lru(lru) {
4183 if (!(BIT(lru) & lru_mask))
4184 continue;
dd8657b6
SB
4185 if (tree)
4186 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4187 else
4188 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
113b7dfd
JW
4189 }
4190 return nr;
4191}
4192
4193static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
dd8657b6
SB
4194 unsigned int lru_mask,
4195 bool tree)
113b7dfd
JW
4196{
4197 unsigned long nr = 0;
4198 enum lru_list lru;
4199
4200 for_each_lru(lru) {
4201 if (!(BIT(lru) & lru_mask))
4202 continue;
dd8657b6
SB
4203 if (tree)
4204 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4205 else
4206 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
113b7dfd
JW
4207 }
4208 return nr;
4209}
4210
2da8ca82 4211static int memcg_numa_stat_show(struct seq_file *m, void *v)
406eb0c9 4212{
25485de6
GT
4213 struct numa_stat {
4214 const char *name;
4215 unsigned int lru_mask;
4216 };
4217
4218 static const struct numa_stat stats[] = {
4219 { "total", LRU_ALL },
4220 { "file", LRU_ALL_FILE },
4221 { "anon", LRU_ALL_ANON },
4222 { "unevictable", BIT(LRU_UNEVICTABLE) },
4223 };
4224 const struct numa_stat *stat;
406eb0c9 4225 int nid;
aa9694bb 4226 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
406eb0c9 4227
7d7ef0a4 4228 mem_cgroup_flush_stats(memcg);
2d146aa3 4229
25485de6 4230 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4231 seq_printf(m, "%s=%lu", stat->name,
4232 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4233 false));
4234 for_each_node_state(nid, N_MEMORY)
4235 seq_printf(m, " N%d=%lu", nid,
4236 mem_cgroup_node_nr_lru_pages(memcg, nid,
4237 stat->lru_mask, false));
25485de6 4238 seq_putc(m, '\n');
406eb0c9 4239 }
406eb0c9 4240
071aee13 4241 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
dd8657b6
SB
4242
4243 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4244 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4245 true));
4246 for_each_node_state(nid, N_MEMORY)
4247 seq_printf(m, " N%d=%lu", nid,
4248 mem_cgroup_node_nr_lru_pages(memcg, nid,
4249 stat->lru_mask, true));
071aee13 4250 seq_putc(m, '\n');
406eb0c9 4251 }
406eb0c9 4252
406eb0c9
YH
4253 return 0;
4254}
4255#endif /* CONFIG_NUMA */
4256
c8713d0b 4257static const unsigned int memcg1_stats[] = {
0d1c2072 4258 NR_FILE_PAGES,
be5d0a74 4259 NR_ANON_MAPPED,
468c3982
JW
4260#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4261 NR_ANON_THPS,
4262#endif
c8713d0b
JW
4263 NR_SHMEM,
4264 NR_FILE_MAPPED,
4265 NR_FILE_DIRTY,
4266 NR_WRITEBACK,
e09b0b61
YS
4267 WORKINGSET_REFAULT_ANON,
4268 WORKINGSET_REFAULT_FILE,
72a14e82 4269#ifdef CONFIG_SWAP
c8713d0b 4270 MEMCG_SWAP,
72a14e82
LS
4271 NR_SWAPCACHE,
4272#endif
c8713d0b
JW
4273};
4274
4275static const char *const memcg1_stat_names[] = {
4276 "cache",
4277 "rss",
468c3982 4278#ifdef CONFIG_TRANSPARENT_HUGEPAGE
c8713d0b 4279 "rss_huge",
468c3982 4280#endif
c8713d0b
JW
4281 "shmem",
4282 "mapped_file",
4283 "dirty",
4284 "writeback",
e09b0b61
YS
4285 "workingset_refault_anon",
4286 "workingset_refault_file",
72a14e82 4287#ifdef CONFIG_SWAP
c8713d0b 4288 "swap",
72a14e82
LS
4289 "swapcached",
4290#endif
c8713d0b
JW
4291};
4292
df0e53d0 4293/* Universal VM events cgroup1 shows, original sort order */
8dd53fd3 4294static const unsigned int memcg1_events[] = {
df0e53d0
JW
4295 PGPGIN,
4296 PGPGOUT,
4297 PGFAULT,
4298 PGMAJFAULT,
4299};
4300
dddb44ff 4301static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
d2ceb9b7 4302{
3e32cb2e 4303 unsigned long memory, memsw;
af7c4b0e
JW
4304 struct mem_cgroup *mi;
4305 unsigned int i;
406eb0c9 4306
71cd3113 4307 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
70bc068c 4308
7d7ef0a4 4309 mem_cgroup_flush_stats(memcg);
2d146aa3 4310
71cd3113 4311 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
468c3982
JW
4312 unsigned long nr;
4313
ff841a06
YA
4314 nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4315 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
1dd3a273 4316 }
7b854121 4317
df0e53d0 4318 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
dddb44ff
YA
4319 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4320 memcg_events_local(memcg, memcg1_events[i]));
af7c4b0e
JW
4321
4322 for (i = 0; i < NR_LRU_LISTS; i++)
dddb44ff
YA
4323 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4324 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4325 PAGE_SIZE);
af7c4b0e 4326
14067bb3 4327 /* Hierarchical information */
3e32cb2e
JW
4328 memory = memsw = PAGE_COUNTER_MAX;
4329 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
15b42562
CD
4330 memory = min(memory, READ_ONCE(mi->memory.max));
4331 memsw = min(memsw, READ_ONCE(mi->memsw.max));
fee7b548 4332 }
dddb44ff
YA
4333 seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4334 (u64)memory * PAGE_SIZE);
840ea53a
LS
4335 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4336 (u64)memsw * PAGE_SIZE);
7f016ee8 4337
8de7ecc6 4338 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
7de2e9f1 4339 unsigned long nr;
4340
ff841a06 4341 nr = memcg_page_state_output(memcg, memcg1_stats[i]);
dddb44ff 4342 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
ff841a06 4343 (u64)nr);
af7c4b0e
JW
4344 }
4345
8de7ecc6 4346 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
dddb44ff
YA
4347 seq_buf_printf(s, "total_%s %llu\n",
4348 vm_event_name(memcg1_events[i]),
4349 (u64)memcg_events(memcg, memcg1_events[i]));
af7c4b0e 4350
8de7ecc6 4351 for (i = 0; i < NR_LRU_LISTS; i++)
dddb44ff
YA
4352 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4353 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4354 PAGE_SIZE);
14067bb3 4355
7f016ee8 4356#ifdef CONFIG_DEBUG_VM
7f016ee8 4357 {
ef8f2327
MG
4358 pg_data_t *pgdat;
4359 struct mem_cgroup_per_node *mz;
1431d4d1
JW
4360 unsigned long anon_cost = 0;
4361 unsigned long file_cost = 0;
7f016ee8 4362
ef8f2327 4363 for_each_online_pgdat(pgdat) {
a3747b53 4364 mz = memcg->nodeinfo[pgdat->node_id];
7f016ee8 4365
1431d4d1
JW
4366 anon_cost += mz->lruvec.anon_cost;
4367 file_cost += mz->lruvec.file_cost;
ef8f2327 4368 }
dddb44ff
YA
4369 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4370 seq_buf_printf(s, "file_cost %lu\n", file_cost);
7f016ee8
KM
4371 }
4372#endif
d2ceb9b7
KH
4373}
4374
182446d0
TH
4375static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4376 struct cftype *cft)
a7885eb8 4377{
182446d0 4378 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4379
1f4c025b 4380 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
4381}
4382
182446d0
TH
4383static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4384 struct cftype *cft, u64 val)
a7885eb8 4385{
182446d0 4386 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
a7885eb8 4387
37bc3cb9 4388 if (val > 200)
a7885eb8
KM
4389 return -EINVAL;
4390
a4792030 4391 if (!mem_cgroup_is_root(memcg))
82b3aa26 4392 WRITE_ONCE(memcg->swappiness, val);
3dae7fec 4393 else
82b3aa26 4394 WRITE_ONCE(vm_swappiness, val);
068b38c1 4395
a7885eb8
KM
4396 return 0;
4397}
4398
2e72b634
KS
4399static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4400{
4401 struct mem_cgroup_threshold_ary *t;
3e32cb2e 4402 unsigned long usage;
2e72b634
KS
4403 int i;
4404
4405 rcu_read_lock();
4406 if (!swap)
2c488db2 4407 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 4408 else
2c488db2 4409 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
4410
4411 if (!t)
4412 goto unlock;
4413
ce00a967 4414 usage = mem_cgroup_usage(memcg, swap);
2e72b634
KS
4415
4416 /*
748dad36 4417 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
4418 * If it's not true, a threshold was crossed after last
4419 * call of __mem_cgroup_threshold().
4420 */
5407a562 4421 i = t->current_threshold;
2e72b634
KS
4422
4423 /*
4424 * Iterate backward over array of thresholds starting from
4425 * current_threshold and check if a threshold is crossed.
4426 * If none of thresholds below usage is crossed, we read
4427 * only one element of the array here.
4428 */
4429 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3652117f 4430 eventfd_signal(t->entries[i].eventfd);
2e72b634
KS
4431
4432 /* i = current_threshold + 1 */
4433 i++;
4434
4435 /*
4436 * Iterate forward over array of thresholds starting from
4437 * current_threshold+1 and check if a threshold is crossed.
4438 * If none of thresholds above usage is crossed, we read
4439 * only one element of the array here.
4440 */
4441 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3652117f 4442 eventfd_signal(t->entries[i].eventfd);
2e72b634
KS
4443
4444 /* Update current_threshold */
5407a562 4445 t->current_threshold = i - 1;
2e72b634
KS
4446unlock:
4447 rcu_read_unlock();
4448}
4449
4450static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4451{
ad4ca5f4
KS
4452 while (memcg) {
4453 __mem_cgroup_threshold(memcg, false);
7941d214 4454 if (do_memsw_account())
ad4ca5f4
KS
4455 __mem_cgroup_threshold(memcg, true);
4456
4457 memcg = parent_mem_cgroup(memcg);
4458 }
2e72b634
KS
4459}
4460
4461static int compare_thresholds(const void *a, const void *b)
4462{
4463 const struct mem_cgroup_threshold *_a = a;
4464 const struct mem_cgroup_threshold *_b = b;
4465
2bff24a3
GT
4466 if (_a->threshold > _b->threshold)
4467 return 1;
4468
4469 if (_a->threshold < _b->threshold)
4470 return -1;
4471
4472 return 0;
2e72b634
KS
4473}
4474
c0ff4b85 4475static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
4476{
4477 struct mem_cgroup_eventfd_list *ev;
4478
2bcf2e92
MH
4479 spin_lock(&memcg_oom_lock);
4480
c0ff4b85 4481 list_for_each_entry(ev, &memcg->oom_notify, list)
3652117f 4482 eventfd_signal(ev->eventfd);
2bcf2e92
MH
4483
4484 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4485 return 0;
4486}
4487
c0ff4b85 4488static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 4489{
7d74b06f
KH
4490 struct mem_cgroup *iter;
4491
c0ff4b85 4492 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 4493 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
4494}
4495
59b6f873 4496static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87 4497 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
2e72b634 4498{
2c488db2
KS
4499 struct mem_cgroup_thresholds *thresholds;
4500 struct mem_cgroup_threshold_ary *new;
3e32cb2e
JW
4501 unsigned long threshold;
4502 unsigned long usage;
2c488db2 4503 int i, size, ret;
2e72b634 4504
650c5e56 4505 ret = page_counter_memparse(args, "-1", &threshold);
2e72b634
KS
4506 if (ret)
4507 return ret;
4508
4509 mutex_lock(&memcg->thresholds_lock);
2c488db2 4510
05b84301 4511 if (type == _MEM) {
2c488db2 4512 thresholds = &memcg->thresholds;
ce00a967 4513 usage = mem_cgroup_usage(memcg, false);
05b84301 4514 } else if (type == _MEMSWAP) {
2c488db2 4515 thresholds = &memcg->memsw_thresholds;
ce00a967 4516 usage = mem_cgroup_usage(memcg, true);
05b84301 4517 } else
2e72b634
KS
4518 BUG();
4519
2e72b634 4520 /* Check if a threshold crossed before adding a new one */
2c488db2 4521 if (thresholds->primary)
2e72b634
KS
4522 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4523
2c488db2 4524 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
4525
4526 /* Allocate memory for new array of thresholds */
67b8046f 4527 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
2c488db2 4528 if (!new) {
2e72b634
KS
4529 ret = -ENOMEM;
4530 goto unlock;
4531 }
2c488db2 4532 new->size = size;
2e72b634
KS
4533
4534 /* Copy thresholds (if any) to new array */
e90342e6
GS
4535 if (thresholds->primary)
4536 memcpy(new->entries, thresholds->primary->entries,
4537 flex_array_size(new, entries, size - 1));
2c488db2 4538
2e72b634 4539 /* Add new threshold */
2c488db2
KS
4540 new->entries[size - 1].eventfd = eventfd;
4541 new->entries[size - 1].threshold = threshold;
2e72b634
KS
4542
4543 /* Sort thresholds. Registering of new threshold isn't time-critical */
61e604e6 4544 sort(new->entries, size, sizeof(*new->entries),
2e72b634
KS
4545 compare_thresholds, NULL);
4546
4547 /* Find current threshold */
2c488db2 4548 new->current_threshold = -1;
2e72b634 4549 for (i = 0; i < size; i++) {
748dad36 4550 if (new->entries[i].threshold <= usage) {
2e72b634 4551 /*
2c488db2
KS
4552 * new->current_threshold will not be used until
4553 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
4554 * it here.
4555 */
2c488db2 4556 ++new->current_threshold;
748dad36
SZ
4557 } else
4558 break;
2e72b634
KS
4559 }
4560
2c488db2
KS
4561 /* Free old spare buffer and save old primary buffer as spare */
4562 kfree(thresholds->spare);
4563 thresholds->spare = thresholds->primary;
4564
4565 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4566
907860ed 4567 /* To be sure that nobody uses thresholds */
2e72b634
KS
4568 synchronize_rcu();
4569
2e72b634
KS
4570unlock:
4571 mutex_unlock(&memcg->thresholds_lock);
4572
4573 return ret;
4574}
4575
59b6f873 4576static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4577 struct eventfd_ctx *eventfd, const char *args)
4578{
59b6f873 4579 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
347c4a87
TH
4580}
4581
59b6f873 4582static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
347c4a87
TH
4583 struct eventfd_ctx *eventfd, const char *args)
4584{
59b6f873 4585 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
347c4a87
TH
4586}
4587
59b6f873 4588static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87 4589 struct eventfd_ctx *eventfd, enum res_type type)
2e72b634 4590{
2c488db2
KS
4591 struct mem_cgroup_thresholds *thresholds;
4592 struct mem_cgroup_threshold_ary *new;
3e32cb2e 4593 unsigned long usage;
7d36665a 4594 int i, j, size, entries;
2e72b634
KS
4595
4596 mutex_lock(&memcg->thresholds_lock);
05b84301
JW
4597
4598 if (type == _MEM) {
2c488db2 4599 thresholds = &memcg->thresholds;
ce00a967 4600 usage = mem_cgroup_usage(memcg, false);
05b84301 4601 } else if (type == _MEMSWAP) {
2c488db2 4602 thresholds = &memcg->memsw_thresholds;
ce00a967 4603 usage = mem_cgroup_usage(memcg, true);
05b84301 4604 } else
2e72b634
KS
4605 BUG();
4606
371528ca
AV
4607 if (!thresholds->primary)
4608 goto unlock;
4609
2e72b634
KS
4610 /* Check if a threshold crossed before removing */
4611 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4612
4613 /* Calculate new number of threshold */
7d36665a 4614 size = entries = 0;
2c488db2
KS
4615 for (i = 0; i < thresholds->primary->size; i++) {
4616 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634 4617 size++;
7d36665a
CX
4618 else
4619 entries++;
2e72b634
KS
4620 }
4621
2c488db2 4622 new = thresholds->spare;
907860ed 4623
7d36665a
CX
4624 /* If no items related to eventfd have been cleared, nothing to do */
4625 if (!entries)
4626 goto unlock;
4627
2e72b634
KS
4628 /* Set thresholds array to NULL if we don't have thresholds */
4629 if (!size) {
2c488db2
KS
4630 kfree(new);
4631 new = NULL;
907860ed 4632 goto swap_buffers;
2e72b634
KS
4633 }
4634
2c488db2 4635 new->size = size;
2e72b634
KS
4636
4637 /* Copy thresholds and find current threshold */
2c488db2
KS
4638 new->current_threshold = -1;
4639 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4640 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
4641 continue;
4642
2c488db2 4643 new->entries[j] = thresholds->primary->entries[i];
748dad36 4644 if (new->entries[j].threshold <= usage) {
2e72b634 4645 /*
2c488db2 4646 * new->current_threshold will not be used
2e72b634
KS
4647 * until rcu_assign_pointer(), so it's safe to increment
4648 * it here.
4649 */
2c488db2 4650 ++new->current_threshold;
2e72b634
KS
4651 }
4652 j++;
4653 }
4654
907860ed 4655swap_buffers:
2c488db2
KS
4656 /* Swap primary and spare array */
4657 thresholds->spare = thresholds->primary;
8c757763 4658
2c488db2 4659 rcu_assign_pointer(thresholds->primary, new);
2e72b634 4660
907860ed 4661 /* To be sure that nobody uses thresholds */
2e72b634 4662 synchronize_rcu();
6611d8d7
MC
4663
4664 /* If all events are unregistered, free the spare array */
4665 if (!new) {
4666 kfree(thresholds->spare);
4667 thresholds->spare = NULL;
4668 }
371528ca 4669unlock:
2e72b634 4670 mutex_unlock(&memcg->thresholds_lock);
2e72b634 4671}
c1e862c1 4672
59b6f873 4673static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4674 struct eventfd_ctx *eventfd)
4675{
59b6f873 4676 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
347c4a87
TH
4677}
4678
59b6f873 4679static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
347c4a87
TH
4680 struct eventfd_ctx *eventfd)
4681{
59b6f873 4682 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
347c4a87
TH
4683}
4684
59b6f873 4685static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
347c4a87 4686 struct eventfd_ctx *eventfd, const char *args)
9490ff27 4687{
9490ff27 4688 struct mem_cgroup_eventfd_list *event;
9490ff27 4689
9490ff27
KH
4690 event = kmalloc(sizeof(*event), GFP_KERNEL);
4691 if (!event)
4692 return -ENOMEM;
4693
1af8efe9 4694 spin_lock(&memcg_oom_lock);
9490ff27
KH
4695
4696 event->eventfd = eventfd;
4697 list_add(&event->list, &memcg->oom_notify);
4698
4699 /* already in OOM ? */
c2b42d3c 4700 if (memcg->under_oom)
3652117f 4701 eventfd_signal(eventfd);
1af8efe9 4702 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4703
4704 return 0;
4705}
4706
59b6f873 4707static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
347c4a87 4708 struct eventfd_ctx *eventfd)
9490ff27 4709{
9490ff27 4710 struct mem_cgroup_eventfd_list *ev, *tmp;
9490ff27 4711
1af8efe9 4712 spin_lock(&memcg_oom_lock);
9490ff27 4713
c0ff4b85 4714 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
4715 if (ev->eventfd == eventfd) {
4716 list_del(&ev->list);
4717 kfree(ev);
4718 }
4719 }
4720
1af8efe9 4721 spin_unlock(&memcg_oom_lock);
9490ff27
KH
4722}
4723
2da8ca82 4724static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3c11ecf4 4725{
aa9694bb 4726 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
3c11ecf4 4727
17c56de6 4728 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
c2b42d3c 4729 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
fe6bdfc8
RG
4730 seq_printf(sf, "oom_kill %lu\n",
4731 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
3c11ecf4
KH
4732 return 0;
4733}
4734
182446d0 4735static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3c11ecf4
KH
4736 struct cftype *cft, u64 val)
4737{
182446d0 4738 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3c11ecf4
KH
4739
4740 /* cannot set to root cgroup and only 0 and 1 are allowed */
a4792030 4741 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
3c11ecf4
KH
4742 return -EINVAL;
4743
17c56de6 4744 WRITE_ONCE(memcg->oom_kill_disable, val);
4d845ebf 4745 if (!val)
c0ff4b85 4746 memcg_oom_recover(memcg);
3dae7fec 4747
3c11ecf4
KH
4748 return 0;
4749}
4750
52ebea74
TH
4751#ifdef CONFIG_CGROUP_WRITEBACK
4752
3a8e9ac8
TH
4753#include <trace/events/writeback.h>
4754
841710aa
TH
4755static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4756{
4757 return wb_domain_init(&memcg->cgwb_domain, gfp);
4758}
4759
4760static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4761{
4762 wb_domain_exit(&memcg->cgwb_domain);
4763}
4764
2529bb3a
TH
4765static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4766{
4767 wb_domain_size_changed(&memcg->cgwb_domain);
4768}
4769
841710aa
TH
4770struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4771{
4772 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4773
4774 if (!memcg->css.parent)
4775 return NULL;
4776
4777 return &memcg->cgwb_domain;
4778}
4779
c2aa723a
TH
4780/**
4781 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4782 * @wb: bdi_writeback in question
c5edf9cd
TH
4783 * @pfilepages: out parameter for number of file pages
4784 * @pheadroom: out parameter for number of allocatable pages according to memcg
c2aa723a
TH
4785 * @pdirty: out parameter for number of dirty pages
4786 * @pwriteback: out parameter for number of pages under writeback
4787 *
c5edf9cd
TH
4788 * Determine the numbers of file, headroom, dirty, and writeback pages in
4789 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4790 * is a bit more involved.
c2aa723a 4791 *
c5edf9cd
TH
4792 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4793 * headroom is calculated as the lowest headroom of itself and the
4794 * ancestors. Note that this doesn't consider the actual amount of
4795 * available memory in the system. The caller should further cap
4796 * *@pheadroom accordingly.
c2aa723a 4797 */
c5edf9cd
TH
4798void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4799 unsigned long *pheadroom, unsigned long *pdirty,
4800 unsigned long *pwriteback)
c2aa723a
TH
4801{
4802 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4803 struct mem_cgroup *parent;
c2aa723a 4804
d9b3ce87 4805 mem_cgroup_flush_stats_ratelimited(memcg);
c2aa723a 4806
2d146aa3
JW
4807 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4808 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4809 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4810 memcg_page_state(memcg, NR_ACTIVE_FILE);
c2aa723a 4811
2d146aa3 4812 *pheadroom = PAGE_COUNTER_MAX;
c2aa723a 4813 while ((parent = parent_mem_cgroup(memcg))) {
15b42562 4814 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
d1663a90 4815 READ_ONCE(memcg->memory.high));
c2aa723a
TH
4816 unsigned long used = page_counter_read(&memcg->memory);
4817
c5edf9cd 4818 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
c2aa723a
TH
4819 memcg = parent;
4820 }
c2aa723a
TH
4821}
4822
97b27821
TH
4823/*
4824 * Foreign dirty flushing
4825 *
4826 * There's an inherent mismatch between memcg and writeback. The former
f0953a1b 4827 * tracks ownership per-page while the latter per-inode. This was a
97b27821
TH
4828 * deliberate design decision because honoring per-page ownership in the
4829 * writeback path is complicated, may lead to higher CPU and IO overheads
4830 * and deemed unnecessary given that write-sharing an inode across
4831 * different cgroups isn't a common use-case.
4832 *
4833 * Combined with inode majority-writer ownership switching, this works well
4834 * enough in most cases but there are some pathological cases. For
4835 * example, let's say there are two cgroups A and B which keep writing to
4836 * different but confined parts of the same inode. B owns the inode and
4837 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4838 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4839 * triggering background writeback. A will be slowed down without a way to
4840 * make writeback of the dirty pages happen.
4841 *
f0953a1b 4842 * Conditions like the above can lead to a cgroup getting repeatedly and
97b27821 4843 * severely throttled after making some progress after each
f0953a1b 4844 * dirty_expire_interval while the underlying IO device is almost
97b27821
TH
4845 * completely idle.
4846 *
4847 * Solving this problem completely requires matching the ownership tracking
4848 * granularities between memcg and writeback in either direction. However,
4849 * the more egregious behaviors can be avoided by simply remembering the
4850 * most recent foreign dirtying events and initiating remote flushes on
4851 * them when local writeback isn't enough to keep the memory clean enough.
4852 *
4853 * The following two functions implement such mechanism. When a foreign
4854 * page - a page whose memcg and writeback ownerships don't match - is
4855 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4856 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4857 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4858 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4859 * foreign bdi_writebacks which haven't expired. Both the numbers of
4860 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4861 * limited to MEMCG_CGWB_FRN_CNT.
4862 *
4863 * The mechanism only remembers IDs and doesn't hold any object references.
4864 * As being wrong occasionally doesn't matter, updates and accesses to the
4865 * records are lockless and racy.
4866 */
9d8053fc 4867void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
97b27821
TH
4868 struct bdi_writeback *wb)
4869{
9d8053fc 4870 struct mem_cgroup *memcg = folio_memcg(folio);
97b27821
TH
4871 struct memcg_cgwb_frn *frn;
4872 u64 now = get_jiffies_64();
4873 u64 oldest_at = now;
4874 int oldest = -1;
4875 int i;
4876
9d8053fc 4877 trace_track_foreign_dirty(folio, wb);
3a8e9ac8 4878
97b27821
TH
4879 /*
4880 * Pick the slot to use. If there is already a slot for @wb, keep
4881 * using it. If not replace the oldest one which isn't being
4882 * written out.
4883 */
4884 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4885 frn = &memcg->cgwb_frn[i];
4886 if (frn->bdi_id == wb->bdi->id &&
4887 frn->memcg_id == wb->memcg_css->id)
4888 break;
4889 if (time_before64(frn->at, oldest_at) &&
4890 atomic_read(&frn->done.cnt) == 1) {
4891 oldest = i;
4892 oldest_at = frn->at;
4893 }
4894 }
4895
4896 if (i < MEMCG_CGWB_FRN_CNT) {
4897 /*
4898 * Re-using an existing one. Update timestamp lazily to
4899 * avoid making the cacheline hot. We want them to be
4900 * reasonably up-to-date and significantly shorter than
4901 * dirty_expire_interval as that's what expires the record.
4902 * Use the shorter of 1s and dirty_expire_interval / 8.
4903 */
4904 unsigned long update_intv =
4905 min_t(unsigned long, HZ,
4906 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4907
4908 if (time_before64(frn->at, now - update_intv))
4909 frn->at = now;
4910 } else if (oldest >= 0) {
4911 /* replace the oldest free one */
4912 frn = &memcg->cgwb_frn[oldest];
4913 frn->bdi_id = wb->bdi->id;
4914 frn->memcg_id = wb->memcg_css->id;
4915 frn->at = now;
4916 }
4917}
4918
4919/* issue foreign writeback flushes for recorded foreign dirtying events */
4920void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4921{
4922 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4923 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4924 u64 now = jiffies_64;
4925 int i;
4926
4927 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4928 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4929
4930 /*
4931 * If the record is older than dirty_expire_interval,
4932 * writeback on it has already started. No need to kick it
4933 * off again. Also, don't start a new one if there's
4934 * already one in flight.
4935 */
4936 if (time_after64(frn->at, now - intv) &&
4937 atomic_read(&frn->done.cnt) == 1) {
4938 frn->at = 0;
3a8e9ac8 4939 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
7490a2d2 4940 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
97b27821
TH
4941 WB_REASON_FOREIGN_FLUSH,
4942 &frn->done);
4943 }
4944 }
4945}
4946
841710aa
TH
4947#else /* CONFIG_CGROUP_WRITEBACK */
4948
4949static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4950{
4951 return 0;
4952}
4953
4954static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4955{
4956}
4957
2529bb3a
TH
4958static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4959{
4960}
4961
52ebea74
TH
4962#endif /* CONFIG_CGROUP_WRITEBACK */
4963
3bc942f3
TH
4964/*
4965 * DO NOT USE IN NEW FILES.
4966 *
4967 * "cgroup.event_control" implementation.
4968 *
4969 * This is way over-engineered. It tries to support fully configurable
4970 * events for each user. Such level of flexibility is completely
4971 * unnecessary especially in the light of the planned unified hierarchy.
4972 *
4973 * Please deprecate this and replace with something simpler if at all
4974 * possible.
4975 */
4976
79bd9814
TH
4977/*
4978 * Unregister event and free resources.
4979 *
4980 * Gets called from workqueue.
4981 */
3bc942f3 4982static void memcg_event_remove(struct work_struct *work)
79bd9814 4983{
3bc942f3
TH
4984 struct mem_cgroup_event *event =
4985 container_of(work, struct mem_cgroup_event, remove);
59b6f873 4986 struct mem_cgroup *memcg = event->memcg;
79bd9814
TH
4987
4988 remove_wait_queue(event->wqh, &event->wait);
4989
59b6f873 4990 event->unregister_event(memcg, event->eventfd);
79bd9814
TH
4991
4992 /* Notify userspace the event is going away. */
3652117f 4993 eventfd_signal(event->eventfd);
79bd9814
TH
4994
4995 eventfd_ctx_put(event->eventfd);
4996 kfree(event);
59b6f873 4997 css_put(&memcg->css);
79bd9814
TH
4998}
4999
5000/*
a9a08845 5001 * Gets called on EPOLLHUP on eventfd when user closes it.
79bd9814
TH
5002 *
5003 * Called with wqh->lock held and interrupts disabled.
5004 */
ac6424b9 5005static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3bc942f3 5006 int sync, void *key)
79bd9814 5007{
3bc942f3
TH
5008 struct mem_cgroup_event *event =
5009 container_of(wait, struct mem_cgroup_event, wait);
59b6f873 5010 struct mem_cgroup *memcg = event->memcg;
3ad6f93e 5011 __poll_t flags = key_to_poll(key);
79bd9814 5012
a9a08845 5013 if (flags & EPOLLHUP) {
79bd9814
TH
5014 /*
5015 * If the event has been detached at cgroup removal, we
5016 * can simply return knowing the other side will cleanup
5017 * for us.
5018 *
5019 * We can't race against event freeing since the other
5020 * side will require wqh->lock via remove_wait_queue(),
5021 * which we hold.
5022 */
fba94807 5023 spin_lock(&memcg->event_list_lock);
79bd9814
TH
5024 if (!list_empty(&event->list)) {
5025 list_del_init(&event->list);
5026 /*
5027 * We are in atomic context, but cgroup_event_remove()
5028 * may sleep, so we have to call it in workqueue.
5029 */
5030 schedule_work(&event->remove);
5031 }
fba94807 5032 spin_unlock(&memcg->event_list_lock);
79bd9814
TH
5033 }
5034
5035 return 0;
5036}
5037
3bc942f3 5038static void memcg_event_ptable_queue_proc(struct file *file,
79bd9814
TH
5039 wait_queue_head_t *wqh, poll_table *pt)
5040{
3bc942f3
TH
5041 struct mem_cgroup_event *event =
5042 container_of(pt, struct mem_cgroup_event, pt);
79bd9814
TH
5043
5044 event->wqh = wqh;
5045 add_wait_queue(wqh, &event->wait);
5046}
5047
5048/*
3bc942f3
TH
5049 * DO NOT USE IN NEW FILES.
5050 *
79bd9814
TH
5051 * Parse input and register new cgroup event handler.
5052 *
5053 * Input must be in format '<event_fd> <control_fd> <args>'.
5054 * Interpretation of args is defined by control file implementation.
5055 */
451af504
TH
5056static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5057 char *buf, size_t nbytes, loff_t off)
79bd9814 5058{
451af504 5059 struct cgroup_subsys_state *css = of_css(of);
fba94807 5060 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5061 struct mem_cgroup_event *event;
79bd9814
TH
5062 struct cgroup_subsys_state *cfile_css;
5063 unsigned int efd, cfd;
5064 struct fd efile;
5065 struct fd cfile;
4a7ba45b 5066 struct dentry *cdentry;
fba94807 5067 const char *name;
79bd9814
TH
5068 char *endp;
5069 int ret;
5070
2343e88d
SAS
5071 if (IS_ENABLED(CONFIG_PREEMPT_RT))
5072 return -EOPNOTSUPP;
5073
451af504
TH
5074 buf = strstrip(buf);
5075
5076 efd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
5077 if (*endp != ' ')
5078 return -EINVAL;
451af504 5079 buf = endp + 1;
79bd9814 5080
451af504 5081 cfd = simple_strtoul(buf, &endp, 10);
79bd9814
TH
5082 if ((*endp != ' ') && (*endp != '\0'))
5083 return -EINVAL;
451af504 5084 buf = endp + 1;
79bd9814
TH
5085
5086 event = kzalloc(sizeof(*event), GFP_KERNEL);
5087 if (!event)
5088 return -ENOMEM;
5089
59b6f873 5090 event->memcg = memcg;
79bd9814 5091 INIT_LIST_HEAD(&event->list);
3bc942f3
TH
5092 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5093 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5094 INIT_WORK(&event->remove, memcg_event_remove);
79bd9814
TH
5095
5096 efile = fdget(efd);
5097 if (!efile.file) {
5098 ret = -EBADF;
5099 goto out_kfree;
5100 }
5101
5102 event->eventfd = eventfd_ctx_fileget(efile.file);
5103 if (IS_ERR(event->eventfd)) {
5104 ret = PTR_ERR(event->eventfd);
5105 goto out_put_efile;
5106 }
5107
5108 cfile = fdget(cfd);
5109 if (!cfile.file) {
5110 ret = -EBADF;
5111 goto out_put_eventfd;
5112 }
5113
5114 /* the process need read permission on control file */
5115 /* AV: shouldn't we check that it's been opened for read instead? */
02f92b38 5116 ret = file_permission(cfile.file, MAY_READ);
79bd9814
TH
5117 if (ret < 0)
5118 goto out_put_cfile;
5119
4a7ba45b
TH
5120 /*
5121 * The control file must be a regular cgroup1 file. As a regular cgroup
5122 * file can't be renamed, it's safe to access its name afterwards.
5123 */
5124 cdentry = cfile.file->f_path.dentry;
5125 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5126 ret = -EINVAL;
5127 goto out_put_cfile;
5128 }
5129
fba94807
TH
5130 /*
5131 * Determine the event callbacks and set them in @event. This used
5132 * to be done via struct cftype but cgroup core no longer knows
5133 * about these events. The following is crude but the whole thing
5134 * is for compatibility anyway.
3bc942f3
TH
5135 *
5136 * DO NOT ADD NEW FILES.
fba94807 5137 */
4a7ba45b 5138 name = cdentry->d_name.name;
fba94807
TH
5139
5140 if (!strcmp(name, "memory.usage_in_bytes")) {
5141 event->register_event = mem_cgroup_usage_register_event;
5142 event->unregister_event = mem_cgroup_usage_unregister_event;
5143 } else if (!strcmp(name, "memory.oom_control")) {
5144 event->register_event = mem_cgroup_oom_register_event;
5145 event->unregister_event = mem_cgroup_oom_unregister_event;
5146 } else if (!strcmp(name, "memory.pressure_level")) {
5147 event->register_event = vmpressure_register_event;
5148 event->unregister_event = vmpressure_unregister_event;
5149 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
347c4a87
TH
5150 event->register_event = memsw_cgroup_usage_register_event;
5151 event->unregister_event = memsw_cgroup_usage_unregister_event;
fba94807
TH
5152 } else {
5153 ret = -EINVAL;
5154 goto out_put_cfile;
5155 }
5156
79bd9814 5157 /*
b5557c4c
TH
5158 * Verify @cfile should belong to @css. Also, remaining events are
5159 * automatically removed on cgroup destruction but the removal is
5160 * asynchronous, so take an extra ref on @css.
79bd9814 5161 */
4a7ba45b 5162 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
ec903c0c 5163 &memory_cgrp_subsys);
79bd9814 5164 ret = -EINVAL;
5a17f543 5165 if (IS_ERR(cfile_css))
79bd9814 5166 goto out_put_cfile;
5a17f543
TH
5167 if (cfile_css != css) {
5168 css_put(cfile_css);
79bd9814 5169 goto out_put_cfile;
5a17f543 5170 }
79bd9814 5171
451af504 5172 ret = event->register_event(memcg, event->eventfd, buf);
79bd9814
TH
5173 if (ret)
5174 goto out_put_css;
5175
9965ed17 5176 vfs_poll(efile.file, &event->pt);
79bd9814 5177
4ba9515d 5178 spin_lock_irq(&memcg->event_list_lock);
fba94807 5179 list_add(&event->list, &memcg->event_list);
4ba9515d 5180 spin_unlock_irq(&memcg->event_list_lock);
79bd9814
TH
5181
5182 fdput(cfile);
5183 fdput(efile);
5184
451af504 5185 return nbytes;
79bd9814
TH
5186
5187out_put_css:
b5557c4c 5188 css_put(css);
79bd9814
TH
5189out_put_cfile:
5190 fdput(cfile);
5191out_put_eventfd:
5192 eventfd_ctx_put(event->eventfd);
5193out_put_efile:
5194 fdput(efile);
5195out_kfree:
5196 kfree(event);
5197
5198 return ret;
5199}
5200
bc3dcb85 5201#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
c29b5b3d
MS
5202static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5203{
5204 /*
5205 * Deprecated.
df4ae285 5206 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
c29b5b3d
MS
5207 */
5208 return 0;
5209}
5210#endif
5211
dddb44ff
YA
5212static int memory_stat_show(struct seq_file *m, void *v);
5213
241994ed 5214static struct cftype mem_cgroup_legacy_files[] = {
8cdea7c0 5215 {
0eea1030 5216 .name = "usage_in_bytes",
8c7c6e34 5217 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
791badbd 5218 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5219 },
c84872e1
PE
5220 {
5221 .name = "max_usage_in_bytes",
8c7c6e34 5222 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6770c64e 5223 .write = mem_cgroup_reset,
791badbd 5224 .read_u64 = mem_cgroup_read_u64,
c84872e1 5225 },
8cdea7c0 5226 {
0eea1030 5227 .name = "limit_in_bytes",
8c7c6e34 5228 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
451af504 5229 .write = mem_cgroup_write,
791badbd 5230 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5231 },
296c81d8
BS
5232 {
5233 .name = "soft_limit_in_bytes",
5234 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
451af504 5235 .write = mem_cgroup_write,
791badbd 5236 .read_u64 = mem_cgroup_read_u64,
296c81d8 5237 },
8cdea7c0
BS
5238 {
5239 .name = "failcnt",
8c7c6e34 5240 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6770c64e 5241 .write = mem_cgroup_reset,
791badbd 5242 .read_u64 = mem_cgroup_read_u64,
8cdea7c0 5243 },
d2ceb9b7
KH
5244 {
5245 .name = "stat",
dddb44ff 5246 .seq_show = memory_stat_show,
d2ceb9b7 5247 },
c1e862c1
KH
5248 {
5249 .name = "force_empty",
6770c64e 5250 .write = mem_cgroup_force_empty_write,
c1e862c1 5251 },
18f59ea7
BS
5252 {
5253 .name = "use_hierarchy",
5254 .write_u64 = mem_cgroup_hierarchy_write,
5255 .read_u64 = mem_cgroup_hierarchy_read,
5256 },
79bd9814 5257 {
3bc942f3 5258 .name = "cgroup.event_control", /* XXX: for compat */
451af504 5259 .write = memcg_write_event_control,
7dbdb199 5260 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
79bd9814 5261 },
a7885eb8
KM
5262 {
5263 .name = "swappiness",
5264 .read_u64 = mem_cgroup_swappiness_read,
5265 .write_u64 = mem_cgroup_swappiness_write,
5266 },
7dc74be0
DN
5267 {
5268 .name = "move_charge_at_immigrate",
5269 .read_u64 = mem_cgroup_move_charge_read,
5270 .write_u64 = mem_cgroup_move_charge_write,
5271 },
9490ff27
KH
5272 {
5273 .name = "oom_control",
2da8ca82 5274 .seq_show = mem_cgroup_oom_control_read,
3c11ecf4 5275 .write_u64 = mem_cgroup_oom_control_write,
9490ff27 5276 },
70ddf637
AV
5277 {
5278 .name = "pressure_level",
6b0ba2ab 5279 .seq_show = mem_cgroup_dummy_seq_show,
70ddf637 5280 },
406eb0c9
YH
5281#ifdef CONFIG_NUMA
5282 {
5283 .name = "numa_stat",
2da8ca82 5284 .seq_show = memcg_numa_stat_show,
406eb0c9
YH
5285 },
5286#endif
4597648f
MH
5287 {
5288 .name = "kmem.limit_in_bytes",
5289 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5290 .write = mem_cgroup_write,
5291 .read_u64 = mem_cgroup_read_u64,
5292 },
510fc4e1
GC
5293 {
5294 .name = "kmem.usage_in_bytes",
5295 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
791badbd 5296 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5297 },
5298 {
5299 .name = "kmem.failcnt",
5300 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6770c64e 5301 .write = mem_cgroup_reset,
791badbd 5302 .read_u64 = mem_cgroup_read_u64,
510fc4e1
GC
5303 },
5304 {
5305 .name = "kmem.max_usage_in_bytes",
5306 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6770c64e 5307 .write = mem_cgroup_reset,
791badbd 5308 .read_u64 = mem_cgroup_read_u64,
510fc4e1 5309 },
bc3dcb85 5310#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
749c5415
GC
5311 {
5312 .name = "kmem.slabinfo",
c29b5b3d 5313 .seq_show = mem_cgroup_slab_show,
749c5415
GC
5314 },
5315#endif
d55f90bf
VD
5316 {
5317 .name = "kmem.tcp.limit_in_bytes",
5318 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5319 .write = mem_cgroup_write,
5320 .read_u64 = mem_cgroup_read_u64,
5321 },
5322 {
5323 .name = "kmem.tcp.usage_in_bytes",
5324 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5325 .read_u64 = mem_cgroup_read_u64,
5326 },
5327 {
5328 .name = "kmem.tcp.failcnt",
5329 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5330 .write = mem_cgroup_reset,
5331 .read_u64 = mem_cgroup_read_u64,
5332 },
5333 {
5334 .name = "kmem.tcp.max_usage_in_bytes",
5335 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5336 .write = mem_cgroup_reset,
5337 .read_u64 = mem_cgroup_read_u64,
5338 },
6bc10349 5339 { }, /* terminate */
af36f906 5340};
8c7c6e34 5341
73f576c0
JW
5342/*
5343 * Private memory cgroup IDR
5344 *
5345 * Swap-out records and page cache shadow entries need to store memcg
5346 * references in constrained space, so we maintain an ID space that is
5347 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5348 * memory-controlled cgroups to 64k.
5349 *
b8f2935f 5350 * However, there usually are many references to the offline CSS after
73f576c0
JW
5351 * the cgroup has been destroyed, such as page cache or reclaimable
5352 * slab objects, that don't need to hang on to the ID. We want to keep
5353 * those dead CSS from occupying IDs, or we might quickly exhaust the
5354 * relatively small ID space and prevent the creation of new cgroups
5355 * even when there are much fewer than 64k cgroups - possibly none.
5356 *
5357 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5358 * be freed and recycled when it's no longer needed, which is usually
5359 * when the CSS is offlined.
5360 *
5361 * The only exception to that are records of swapped out tmpfs/shmem
5362 * pages that need to be attributed to live ancestors on swapin. But
5363 * those references are manageable from userspace.
5364 */
5365
60b1e24c 5366#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
73f576c0
JW
5367static DEFINE_IDR(mem_cgroup_idr);
5368
7e97de0b
KT
5369static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5370{
5371 if (memcg->id.id > 0) {
5372 idr_remove(&mem_cgroup_idr, memcg->id.id);
5373 memcg->id.id = 0;
5374 }
5375}
5376
c1514c0a
VF
5377static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5378 unsigned int n)
73f576c0 5379{
1c2d479a 5380 refcount_add(n, &memcg->id.ref);
73f576c0
JW
5381}
5382
615d66c3 5383static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
73f576c0 5384{
1c2d479a 5385 if (refcount_sub_and_test(n, &memcg->id.ref)) {
7e97de0b 5386 mem_cgroup_id_remove(memcg);
73f576c0
JW
5387
5388 /* Memcg ID pins CSS */
5389 css_put(&memcg->css);
5390 }
5391}
5392
615d66c3
VD
5393static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5394{
5395 mem_cgroup_id_put_many(memcg, 1);
5396}
5397
73f576c0
JW
5398/**
5399 * mem_cgroup_from_id - look up a memcg from a memcg id
5400 * @id: the memcg id to look up
5401 *
5402 * Caller must hold rcu_read_lock().
5403 */
5404struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5405{
5406 WARN_ON_ONCE(!rcu_read_lock_held());
5407 return idr_find(&mem_cgroup_idr, id);
5408}
5409
c15187a4
RG
5410#ifdef CONFIG_SHRINKER_DEBUG
5411struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5412{
5413 struct cgroup *cgrp;
5414 struct cgroup_subsys_state *css;
5415 struct mem_cgroup *memcg;
5416
5417 cgrp = cgroup_get_from_id(ino);
fa7e439c 5418 if (IS_ERR(cgrp))
c0f2df49 5419 return ERR_CAST(cgrp);
c15187a4
RG
5420
5421 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5422 if (css)
5423 memcg = container_of(css, struct mem_cgroup, css);
5424 else
5425 memcg = ERR_PTR(-ENOENT);
5426
5427 cgroup_put(cgrp);
5428
5429 return memcg;
5430}
5431#endif
5432
ef8f2327 5433static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
5434{
5435 struct mem_cgroup_per_node *pn;
8c9bb398
WY
5436
5437 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
6d12e2d8
KH
5438 if (!pn)
5439 return 1;
1ecaab2b 5440
7e1c0d6f
SB
5441 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5442 GFP_KERNEL_ACCOUNT);
5443 if (!pn->lruvec_stats_percpu) {
00f3ca2c
JW
5444 kfree(pn);
5445 return 1;
5446 }
5447
ef8f2327 5448 lruvec_init(&pn->lruvec);
ef8f2327
MG
5449 pn->memcg = memcg;
5450
54f72fe0 5451 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
5452 return 0;
5453}
5454
ef8f2327 5455static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1ecaab2b 5456{
00f3ca2c
JW
5457 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5458
4eaf431f
MH
5459 if (!pn)
5460 return;
5461
7e1c0d6f 5462 free_percpu(pn->lruvec_stats_percpu);
00f3ca2c 5463 kfree(pn);
1ecaab2b
KH
5464}
5465
40e952f9 5466static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 5467{
c8b2a36f 5468 int node;
59927fb9 5469
675d6c9b
RG
5470 if (memcg->orig_objcg)
5471 obj_cgroup_put(memcg->orig_objcg);
5472
c8b2a36f 5473 for_each_node(node)
ef8f2327 5474 free_mem_cgroup_per_node_info(memcg, node);
410f8e82 5475 kfree(memcg->vmstats);
871789d4 5476 free_percpu(memcg->vmstats_percpu);
8ff69e2c 5477 kfree(memcg);
59927fb9 5478}
3afe36b1 5479
40e952f9
TE
5480static void mem_cgroup_free(struct mem_cgroup *memcg)
5481{
ec1c86b2 5482 lru_gen_exit_memcg(memcg);
40e952f9
TE
5483 memcg_wb_domain_exit(memcg);
5484 __mem_cgroup_free(memcg);
5485}
5486
9cee7e8e 5487static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
8cdea7c0 5488{
9cee7e8e 5489 struct memcg_vmstats_percpu *statc, *pstatc;
d142e3e6 5490 struct mem_cgroup *memcg;
9cee7e8e 5491 int node, cpu;
97b27821 5492 int __maybe_unused i;
11d67612 5493 long error = -ENOMEM;
8cdea7c0 5494
06b2c3b0 5495 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
c0ff4b85 5496 if (!memcg)
11d67612 5497 return ERR_PTR(error);
0b8f73e1 5498
73f576c0 5499 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
be740503 5500 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
11d67612
YS
5501 if (memcg->id.id < 0) {
5502 error = memcg->id.id;
73f576c0 5503 goto fail;
11d67612 5504 }
73f576c0 5505
410f8e82
SB
5506 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5507 if (!memcg->vmstats)
5508 goto fail;
5509
3e38e0aa
RG
5510 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5511 GFP_KERNEL_ACCOUNT);
871789d4 5512 if (!memcg->vmstats_percpu)
0b8f73e1 5513 goto fail;
78fb7466 5514
9cee7e8e
YA
5515 for_each_possible_cpu(cpu) {
5516 if (parent)
5517 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5518 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5519 statc->parent = parent ? pstatc : NULL;
5520 statc->vmstats = memcg->vmstats;
5521 }
5522
3ed28fa1 5523 for_each_node(node)
ef8f2327 5524 if (alloc_mem_cgroup_per_node_info(memcg, node))
0b8f73e1 5525 goto fail;
f64c3f54 5526
0b8f73e1
JW
5527 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5528 goto fail;
28dbc4b6 5529
f7e1cb6e 5530 INIT_WORK(&memcg->high_work, high_work_func);
d142e3e6 5531 INIT_LIST_HEAD(&memcg->oom_notify);
d142e3e6
GC
5532 mutex_init(&memcg->thresholds_lock);
5533 spin_lock_init(&memcg->move_lock);
70ddf637 5534 vmpressure_init(&memcg->vmpressure);
fba94807
TH
5535 INIT_LIST_HEAD(&memcg->event_list);
5536 spin_lock_init(&memcg->event_list_lock);
d886f4e4 5537 memcg->socket_pressure = jiffies;
84c07d11 5538#ifdef CONFIG_MEMCG_KMEM
900a38f0 5539 memcg->kmemcg_id = -1;
bf4f0599 5540 INIT_LIST_HEAD(&memcg->objcg_list);
900a38f0 5541#endif
52ebea74
TH
5542#ifdef CONFIG_CGROUP_WRITEBACK
5543 INIT_LIST_HEAD(&memcg->cgwb_list);
97b27821
TH
5544 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5545 memcg->cgwb_frn[i].done =
5546 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
87eaceb3
YS
5547#endif
5548#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5549 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5550 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5551 memcg->deferred_split_queue.split_queue_len = 0;
52ebea74 5552#endif
ec1c86b2 5553 lru_gen_init_memcg(memcg);
0b8f73e1
JW
5554 return memcg;
5555fail:
7e97de0b 5556 mem_cgroup_id_remove(memcg);
40e952f9 5557 __mem_cgroup_free(memcg);
11d67612 5558 return ERR_PTR(error);
d142e3e6
GC
5559}
5560
0b8f73e1
JW
5561static struct cgroup_subsys_state * __ref
5562mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
d142e3e6 5563{
0b8f73e1 5564 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
b87d8cef 5565 struct mem_cgroup *memcg, *old_memcg;
d142e3e6 5566
b87d8cef 5567 old_memcg = set_active_memcg(parent);
9cee7e8e 5568 memcg = mem_cgroup_alloc(parent);
b87d8cef 5569 set_active_memcg(old_memcg);
11d67612
YS
5570 if (IS_ERR(memcg))
5571 return ERR_CAST(memcg);
d142e3e6 5572
d1663a90 5573 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5574 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
f4840ccf
JW
5575#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5576 memcg->zswap_max = PAGE_COUNTER_MAX;
501a06fe
NP
5577 WRITE_ONCE(memcg->zswap_writeback,
5578 !parent || READ_ONCE(parent->zswap_writeback));
f4840ccf 5579#endif
4b82ab4f 5580 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
0b8f73e1 5581 if (parent) {
82b3aa26 5582 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
17c56de6 5583 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
bef8620c 5584
3e32cb2e 5585 page_counter_init(&memcg->memory, &parent->memory);
37e84351 5586 page_counter_init(&memcg->swap, &parent->swap);
3e32cb2e 5587 page_counter_init(&memcg->kmem, &parent->kmem);
0db15298 5588 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
18f59ea7 5589 } else {
8278f1c7 5590 init_memcg_events();
bef8620c
RG
5591 page_counter_init(&memcg->memory, NULL);
5592 page_counter_init(&memcg->swap, NULL);
5593 page_counter_init(&memcg->kmem, NULL);
5594 page_counter_init(&memcg->tcpmem, NULL);
d6441637 5595
0b8f73e1
JW
5596 root_mem_cgroup = memcg;
5597 return &memcg->css;
5598 }
5599
f7e1cb6e 5600 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5601 static_branch_inc(&memcg_sockets_enabled_key);
f7e1cb6e 5602
b6c1a8af
YS
5603#if defined(CONFIG_MEMCG_KMEM)
5604 if (!cgroup_memory_nobpf)
5605 static_branch_inc(&memcg_bpf_enabled_key);
5606#endif
5607
0b8f73e1 5608 return &memcg->css;
0b8f73e1
JW
5609}
5610
73f576c0 5611static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
0b8f73e1 5612{
58fa2a55
VD
5613 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5614
da0efe30
MS
5615 if (memcg_online_kmem(memcg))
5616 goto remove_id;
5617
0a4465d3 5618 /*
e4262c4f 5619 * A memcg must be visible for expand_shrinker_info()
0a4465d3
KT
5620 * by the time the maps are allocated. So, we allocate maps
5621 * here, when for_each_mem_cgroup() can't skip it.
5622 */
da0efe30
MS
5623 if (alloc_shrinker_info(memcg))
5624 goto offline_kmem;
0a4465d3 5625
13ef7424 5626 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
aa48e47e 5627 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
396faf88 5628 FLUSH_TIME);
e4dde56c 5629 lru_gen_online_memcg(memcg);
6f0df8e1
JW
5630
5631 /* Online state pins memcg ID, memcg ID pins CSS */
5632 refcount_set(&memcg->id.ref, 1);
5633 css_get(css);
5634
5635 /*
5636 * Ensure mem_cgroup_from_id() works once we're fully online.
5637 *
5638 * We could do this earlier and require callers to filter with
5639 * css_tryget_online(). But right now there are no users that
5640 * need earlier access, and the workingset code relies on the
5641 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5642 * publish it here at the end of onlining. This matches the
5643 * regular ID destruction during offlining.
5644 */
5645 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5646
2f7dd7a4 5647 return 0;
da0efe30
MS
5648offline_kmem:
5649 memcg_offline_kmem(memcg);
5650remove_id:
5651 mem_cgroup_id_remove(memcg);
5652 return -ENOMEM;
8cdea7c0
BS
5653}
5654
eb95419b 5655static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
df878fb0 5656{
eb95419b 5657 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3bc942f3 5658 struct mem_cgroup_event *event, *tmp;
79bd9814
TH
5659
5660 /*
5661 * Unregister events and notify userspace.
5662 * Notify userspace about cgroup removing only after rmdir of cgroup
5663 * directory to avoid race between userspace and kernelspace.
5664 */
4ba9515d 5665 spin_lock_irq(&memcg->event_list_lock);
fba94807 5666 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
79bd9814
TH
5667 list_del_init(&event->list);
5668 schedule_work(&event->remove);
5669 }
4ba9515d 5670 spin_unlock_irq(&memcg->event_list_lock);
ec64f515 5671
bf8d5d52 5672 page_counter_set_min(&memcg->memory, 0);
23067153 5673 page_counter_set_low(&memcg->memory, 0);
63677c74 5674
a65b0e76
DC
5675 zswap_memcg_offline_cleanup(memcg);
5676
567e9ab2 5677 memcg_offline_kmem(memcg);
a178015c 5678 reparent_shrinker_deferred(memcg);
52ebea74 5679 wb_memcg_offline(memcg);
e4dde56c 5680 lru_gen_offline_memcg(memcg);
73f576c0 5681
591edfb1
RG
5682 drain_all_stock(memcg);
5683
73f576c0 5684 mem_cgroup_id_put(memcg);
df878fb0
KH
5685}
5686
6df38689
VD
5687static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5688{
5689 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5690
5691 invalidate_reclaim_iterators(memcg);
e4dde56c 5692 lru_gen_release_memcg(memcg);
6df38689
VD
5693}
5694
eb95419b 5695static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
8cdea7c0 5696{
eb95419b 5697 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
97b27821 5698 int __maybe_unused i;
c268e994 5699
97b27821
TH
5700#ifdef CONFIG_CGROUP_WRITEBACK
5701 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5702 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5703#endif
f7e1cb6e 5704 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
ef12947c 5705 static_branch_dec(&memcg_sockets_enabled_key);
127424c8 5706
0db15298 5707 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
d55f90bf 5708 static_branch_dec(&memcg_sockets_enabled_key);
3893e302 5709
b6c1a8af
YS
5710#if defined(CONFIG_MEMCG_KMEM)
5711 if (!cgroup_memory_nobpf)
5712 static_branch_dec(&memcg_bpf_enabled_key);
5713#endif
5714
0b8f73e1
JW
5715 vmpressure_cleanup(&memcg->vmpressure);
5716 cancel_work_sync(&memcg->high_work);
5717 mem_cgroup_remove_from_trees(memcg);
e4262c4f 5718 free_shrinker_info(memcg);
0b8f73e1 5719 mem_cgroup_free(memcg);
8cdea7c0
BS
5720}
5721
1ced953b
TH
5722/**
5723 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5724 * @css: the target css
5725 *
5726 * Reset the states of the mem_cgroup associated with @css. This is
5727 * invoked when the userland requests disabling on the default hierarchy
5728 * but the memcg is pinned through dependency. The memcg should stop
5729 * applying policies and should revert to the vanilla state as it may be
5730 * made visible again.
5731 *
5732 * The current implementation only resets the essential configurations.
5733 * This needs to be expanded to cover all the visible parts.
5734 */
5735static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5736{
5737 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5738
bbec2e15
RG
5739 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5740 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
bbec2e15
RG
5741 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5742 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
bf8d5d52 5743 page_counter_set_min(&memcg->memory, 0);
23067153 5744 page_counter_set_low(&memcg->memory, 0);
d1663a90 5745 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
2178e20c 5746 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
4b82ab4f 5747 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
2529bb3a 5748 memcg_wb_domain_size_changed(memcg);
1ced953b
TH
5749}
5750
2d146aa3
JW
5751static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5752{
5753 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5754 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5755 struct memcg_vmstats_percpu *statc;
f82e6bf9 5756 long delta, delta_cpu, v;
7e1c0d6f 5757 int i, nid;
2d146aa3
JW
5758
5759 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5760
5761 for (i = 0; i < MEMCG_NR_STAT; i++) {
5762 /*
5763 * Collect the aggregated propagation counts of groups
5764 * below us. We're in a per-cpu loop here and this is
5765 * a global counter, so the first cycle will get them.
5766 */
410f8e82 5767 delta = memcg->vmstats->state_pending[i];
2d146aa3 5768 if (delta)
410f8e82 5769 memcg->vmstats->state_pending[i] = 0;
2d146aa3
JW
5770
5771 /* Add CPU changes on this level since the last flush */
f82e6bf9 5772 delta_cpu = 0;
2d146aa3
JW
5773 v = READ_ONCE(statc->state[i]);
5774 if (v != statc->state_prev[i]) {
f82e6bf9
YA
5775 delta_cpu = v - statc->state_prev[i];
5776 delta += delta_cpu;
2d146aa3
JW
5777 statc->state_prev[i] = v;
5778 }
5779
2d146aa3 5780 /* Aggregate counts on this level and propagate upwards */
f82e6bf9
YA
5781 if (delta_cpu)
5782 memcg->vmstats->state_local[i] += delta_cpu;
5783
5784 if (delta) {
5785 memcg->vmstats->state[i] += delta;
5786 if (parent)
5787 parent->vmstats->state_pending[i] += delta;
5788 }
2d146aa3
JW
5789 }
5790
8278f1c7 5791 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
410f8e82 5792 delta = memcg->vmstats->events_pending[i];
2d146aa3 5793 if (delta)
410f8e82 5794 memcg->vmstats->events_pending[i] = 0;
2d146aa3 5795
f82e6bf9 5796 delta_cpu = 0;
2d146aa3
JW
5797 v = READ_ONCE(statc->events[i]);
5798 if (v != statc->events_prev[i]) {
f82e6bf9
YA
5799 delta_cpu = v - statc->events_prev[i];
5800 delta += delta_cpu;
2d146aa3
JW
5801 statc->events_prev[i] = v;
5802 }
5803
f82e6bf9
YA
5804 if (delta_cpu)
5805 memcg->vmstats->events_local[i] += delta_cpu;
2d146aa3 5806
f82e6bf9
YA
5807 if (delta) {
5808 memcg->vmstats->events[i] += delta;
5809 if (parent)
5810 parent->vmstats->events_pending[i] += delta;
5811 }
2d146aa3 5812 }
7e1c0d6f
SB
5813
5814 for_each_node_state(nid, N_MEMORY) {
5815 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5816 struct mem_cgroup_per_node *ppn = NULL;
5817 struct lruvec_stats_percpu *lstatc;
5818
5819 if (parent)
5820 ppn = parent->nodeinfo[nid];
5821
5822 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5823
5824 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5825 delta = pn->lruvec_stats.state_pending[i];
5826 if (delta)
5827 pn->lruvec_stats.state_pending[i] = 0;
5828
f82e6bf9 5829 delta_cpu = 0;
7e1c0d6f
SB
5830 v = READ_ONCE(lstatc->state[i]);
5831 if (v != lstatc->state_prev[i]) {
f82e6bf9
YA
5832 delta_cpu = v - lstatc->state_prev[i];
5833 delta += delta_cpu;
7e1c0d6f
SB
5834 lstatc->state_prev[i] = v;
5835 }
5836
f82e6bf9
YA
5837 if (delta_cpu)
5838 pn->lruvec_stats.state_local[i] += delta_cpu;
7e1c0d6f 5839
f82e6bf9
YA
5840 if (delta) {
5841 pn->lruvec_stats.state[i] += delta;
5842 if (ppn)
5843 ppn->lruvec_stats.state_pending[i] += delta;
5844 }
7e1c0d6f
SB
5845 }
5846 }
8d59d221
YA
5847 statc->stats_updates = 0;
5848 /* We are in a per-cpu loop here, only do the atomic write once */
5849 if (atomic64_read(&memcg->vmstats->stats_updates))
5850 atomic64_set(&memcg->vmstats->stats_updates, 0);
2d146aa3
JW
5851}
5852
02491447 5853#ifdef CONFIG_MMU
7dc74be0 5854/* Handlers for move charge at task migration. */
854ffa8d 5855static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 5856{
05b84301 5857 int ret;
9476db97 5858
d0164adc
MG
5859 /* Try a single bulk charge without reclaim first, kswapd may wake */
5860 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
9476db97 5861 if (!ret) {
854ffa8d 5862 mc.precharge += count;
854ffa8d
DN
5863 return ret;
5864 }
9476db97 5865
3674534b 5866 /* Try charges one by one with reclaim, but do not retry */
854ffa8d 5867 while (count--) {
3674534b 5868 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
38c5d72f 5869 if (ret)
38c5d72f 5870 return ret;
854ffa8d 5871 mc.precharge++;
9476db97 5872 cond_resched();
854ffa8d 5873 }
9476db97 5874 return 0;
4ffef5fe
DN
5875}
5876
4ffef5fe 5877union mc_target {
b46777da 5878 struct folio *folio;
02491447 5879 swp_entry_t ent;
4ffef5fe
DN
5880};
5881
4ffef5fe 5882enum mc_target_type {
8d32ff84 5883 MC_TARGET_NONE = 0,
4ffef5fe 5884 MC_TARGET_PAGE,
02491447 5885 MC_TARGET_SWAP,
c733a828 5886 MC_TARGET_DEVICE,
4ffef5fe
DN
5887};
5888
90254a65
DN
5889static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5890 unsigned long addr, pte_t ptent)
4ffef5fe 5891{
25b2995a 5892 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 5893
58f341f7 5894 if (!page)
90254a65
DN
5895 return NULL;
5896 if (PageAnon(page)) {
1dfab5ab 5897 if (!(mc.flags & MOVE_ANON))
90254a65 5898 return NULL;
1dfab5ab
JW
5899 } else {
5900 if (!(mc.flags & MOVE_FILE))
5901 return NULL;
5902 }
58f341f7 5903 get_page(page);
90254a65
DN
5904
5905 return page;
5906}
5907
c733a828 5908#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
90254a65 5909static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5910 pte_t ptent, swp_entry_t *entry)
90254a65 5911{
90254a65
DN
5912 struct page *page = NULL;
5913 swp_entry_t ent = pte_to_swp_entry(ptent);
5914
9a137153 5915 if (!(mc.flags & MOVE_ANON))
90254a65 5916 return NULL;
c733a828
JG
5917
5918 /*
27674ef6
CH
5919 * Handle device private pages that are not accessible by the CPU, but
5920 * stored as special swap entries in the page table.
c733a828
JG
5921 */
5922 if (is_device_private_entry(ent)) {
af5cdaf8 5923 page = pfn_swap_entry_to_page(ent);
27674ef6 5924 if (!get_page_unless_zero(page))
c733a828
JG
5925 return NULL;
5926 return page;
5927 }
5928
9a137153
RC
5929 if (non_swap_entry(ent))
5930 return NULL;
5931
4b91355e 5932 /*
cb691e2f 5933 * Because swap_cache_get_folio() updates some statistics counter,
4b91355e
KH
5934 * we call find_get_page() with swapper_space directly.
5935 */
f6ab1f7f 5936 page = find_get_page(swap_address_space(ent), swp_offset(ent));
2d1c4980 5937 entry->val = ent.val;
90254a65
DN
5938
5939 return page;
5940}
4b91355e
KH
5941#else
5942static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
48406ef8 5943 pte_t ptent, swp_entry_t *entry)
4b91355e
KH
5944{
5945 return NULL;
5946}
5947#endif
90254a65 5948
87946a72 5949static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
48384b0b 5950 unsigned long addr, pte_t ptent)
87946a72 5951{
524984ff
MWO
5952 unsigned long index;
5953 struct folio *folio;
5954
87946a72
DN
5955 if (!vma->vm_file) /* anonymous vma */
5956 return NULL;
1dfab5ab 5957 if (!(mc.flags & MOVE_FILE))
87946a72
DN
5958 return NULL;
5959
524984ff 5960 /* folio is moved even if it's not RSS of this task(page-faulted). */
aa3b1895 5961 /* shmem/tmpfs may report page out on swap: account for that too. */
524984ff
MWO
5962 index = linear_page_index(vma, addr);
5963 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
66dabbb6 5964 if (IS_ERR(folio))
524984ff
MWO
5965 return NULL;
5966 return folio_file_page(folio, index);
87946a72
DN
5967}
5968
b1b0deab 5969/**
b267e1a3
MWO
5970 * mem_cgroup_move_account - move account of the folio
5971 * @folio: The folio.
25843c2b 5972 * @compound: charge the page as compound or small page
b267e1a3
MWO
5973 * @from: mem_cgroup which the folio is moved from.
5974 * @to: mem_cgroup which the folio is moved to. @from != @to.
b1b0deab 5975 *
b267e1a3 5976 * The folio must be locked and not on the LRU.
b1b0deab
CG
5977 *
5978 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5979 * from old cgroup.
5980 */
b267e1a3 5981static int mem_cgroup_move_account(struct folio *folio,
f627c2f5 5982 bool compound,
b1b0deab
CG
5983 struct mem_cgroup *from,
5984 struct mem_cgroup *to)
5985{
ae8af438
KK
5986 struct lruvec *from_vec, *to_vec;
5987 struct pglist_data *pgdat;
fcce4672 5988 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
8e88bd2d 5989 int nid, ret;
b1b0deab
CG
5990
5991 VM_BUG_ON(from == to);
4e0cf05f 5992 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
fcce4672 5993 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
9c325215 5994 VM_BUG_ON(compound && !folio_test_large(folio));
b1b0deab 5995
b1b0deab 5996 ret = -EINVAL;
fcce4672 5997 if (folio_memcg(folio) != from)
4e0cf05f 5998 goto out;
b1b0deab 5999
fcce4672 6000 pgdat = folio_pgdat(folio);
867e5e1d
JW
6001 from_vec = mem_cgroup_lruvec(from, pgdat);
6002 to_vec = mem_cgroup_lruvec(to, pgdat);
ae8af438 6003
fcce4672 6004 folio_memcg_lock(folio);
b1b0deab 6005
fcce4672
MWO
6006 if (folio_test_anon(folio)) {
6007 if (folio_mapped(folio)) {
be5d0a74
JW
6008 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6009 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6199277b 6010 if (folio_test_pmd_mappable(folio)) {
69473e5d
MS
6011 __mod_lruvec_state(from_vec, NR_ANON_THPS,
6012 -nr_pages);
6013 __mod_lruvec_state(to_vec, NR_ANON_THPS,
6014 nr_pages);
468c3982 6015 }
be5d0a74
JW
6016 }
6017 } else {
0d1c2072
JW
6018 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6019 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6020
fcce4672 6021 if (folio_test_swapbacked(folio)) {
0d1c2072
JW
6022 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6023 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6024 }
6025
fcce4672 6026 if (folio_mapped(folio)) {
49e50d27
JW
6027 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6028 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6029 }
b1b0deab 6030
fcce4672
MWO
6031 if (folio_test_dirty(folio)) {
6032 struct address_space *mapping = folio_mapping(folio);
c4843a75 6033
f56753ac 6034 if (mapping_can_writeback(mapping)) {
49e50d27
JW
6035 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6036 -nr_pages);
6037 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6038 nr_pages);
6039 }
c4843a75
GT
6040 }
6041 }
6042
c449deb2
HD
6043#ifdef CONFIG_SWAP
6044 if (folio_test_swapcache(folio)) {
6045 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6046 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6047 }
6048#endif
fcce4672 6049 if (folio_test_writeback(folio)) {
ae8af438
KK
6050 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6051 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
b1b0deab
CG
6052 }
6053
6054 /*
abb242f5
JW
6055 * All state has been migrated, let's switch to the new memcg.
6056 *
bcfe06bf 6057 * It is safe to change page's memcg here because the page
abb242f5
JW
6058 * is referenced, charged, isolated, and locked: we can't race
6059 * with (un)charging, migration, LRU putback, or anything else
bcfe06bf 6060 * that would rely on a stable page's memory cgroup.
abb242f5 6061 *
6c77b607 6062 * Note that folio_memcg_lock is a memcg lock, not a page lock,
bcfe06bf 6063 * to save space. As soon as we switch page's memory cgroup to a
abb242f5
JW
6064 * new memcg that isn't locked, the above state can change
6065 * concurrently again. Make sure we're truly done with it.
b1b0deab 6066 */
abb242f5 6067 smp_mb();
b1b0deab 6068
1a3e1f40
JW
6069 css_get(&to->css);
6070 css_put(&from->css);
6071
fcce4672 6072 folio->memcg_data = (unsigned long)to;
87eaceb3 6073
f70ad448 6074 __folio_memcg_unlock(from);
b1b0deab
CG
6075
6076 ret = 0;
fcce4672 6077 nid = folio_nid(folio);
b1b0deab
CG
6078
6079 local_irq_disable();
6e0110c2 6080 mem_cgroup_charge_statistics(to, nr_pages);
8e88bd2d 6081 memcg_check_events(to, nid);
6e0110c2 6082 mem_cgroup_charge_statistics(from, -nr_pages);
8e88bd2d 6083 memcg_check_events(from, nid);
b1b0deab 6084 local_irq_enable();
b1b0deab
CG
6085out:
6086 return ret;
6087}
6088
7cf7806c
LR
6089/**
6090 * get_mctgt_type - get target type of moving charge
6091 * @vma: the vma the pte to be checked belongs
6092 * @addr: the address corresponding to the pte to be checked
6093 * @ptent: the pte to be checked
6094 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6095 *
853f62a3
MWO
6096 * Context: Called with pte lock held.
6097 * Return:
6098 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6099 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
b46777da 6100 * move charge. If @target is not NULL, the folio is stored in target->folio
853f62a3
MWO
6101 * with extra refcnt taken (Caller should release it).
6102 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6103 * target for charge migration. If @target is not NULL, the entry is
6104 * stored in target->ent.
6105 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6106 * thus not on the lru. For now such page is charged like a regular page
6107 * would be as it is just special memory taking the place of a regular page.
6108 * See Documentations/vm/hmm.txt and include/linux/hmm.h
7cf7806c 6109 */
8d32ff84 6110static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
6111 unsigned long addr, pte_t ptent, union mc_target *target)
6112{
6113 struct page *page = NULL;
b67fa6e4 6114 struct folio *folio;
8d32ff84 6115 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
6116 swp_entry_t ent = { .val = 0 };
6117
6118 if (pte_present(ptent))
6119 page = mc_handle_present_pte(vma, addr, ptent);
5c041f5d
PX
6120 else if (pte_none_mostly(ptent))
6121 /*
6122 * PTE markers should be treated as a none pte here, separated
6123 * from other swap handling below.
6124 */
6125 page = mc_handle_file_pte(vma, addr, ptent);
90254a65 6126 else if (is_swap_pte(ptent))
48406ef8 6127 page = mc_handle_swap_pte(vma, ptent, &ent);
90254a65 6128
b67fa6e4
MWO
6129 if (page)
6130 folio = page_folio(page);
4e0cf05f 6131 if (target && page) {
b67fa6e4
MWO
6132 if (!folio_trylock(folio)) {
6133 folio_put(folio);
4e0cf05f
JW
6134 return ret;
6135 }
6136 /*
6137 * page_mapped() must be stable during the move. This
6138 * pte is locked, so if it's present, the page cannot
6139 * become unmapped. If it isn't, we have only partial
6140 * control over the mapped state: the page lock will
6141 * prevent new faults against pagecache and swapcache,
6142 * so an unmapped page cannot become mapped. However,
6143 * if the page is already mapped elsewhere, it can
6144 * unmap, and there is nothing we can do about it.
6145 * Alas, skip moving the page in this case.
6146 */
6147 if (!pte_present(ptent) && page_mapped(page)) {
b67fa6e4
MWO
6148 folio_unlock(folio);
6149 folio_put(folio);
4e0cf05f
JW
6150 return ret;
6151 }
6152 }
6153
90254a65 6154 if (!page && !ent.val)
8d32ff84 6155 return ret;
02491447 6156 if (page) {
02491447 6157 /*
0a31bc97 6158 * Do only loose check w/o serialization.
1306a85a 6159 * mem_cgroup_move_account() checks the page is valid or
0a31bc97 6160 * not under LRU exclusion.
02491447 6161 */
b67fa6e4 6162 if (folio_memcg(folio) == mc.from) {
02491447 6163 ret = MC_TARGET_PAGE;
b67fa6e4
MWO
6164 if (folio_is_device_private(folio) ||
6165 folio_is_device_coherent(folio))
c733a828 6166 ret = MC_TARGET_DEVICE;
02491447 6167 if (target)
b67fa6e4 6168 target->folio = folio;
02491447 6169 }
4e0cf05f
JW
6170 if (!ret || !target) {
6171 if (target)
b67fa6e4
MWO
6172 folio_unlock(folio);
6173 folio_put(folio);
4e0cf05f 6174 }
02491447 6175 }
3e14a57b
YH
6176 /*
6177 * There is a swap entry and a page doesn't exist or isn't charged.
6178 * But we cannot move a tail-page in a THP.
6179 */
6180 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
34c00c31 6181 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
6182 ret = MC_TARGET_SWAP;
6183 if (target)
6184 target->ent = ent;
4ffef5fe 6185 }
4ffef5fe
DN
6186 return ret;
6187}
6188
12724850
NH
6189#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6190/*
d6810d73
YH
6191 * We don't consider PMD mapped swapping or file mapped pages because THP does
6192 * not support them for now.
12724850
NH
6193 * Caller should make sure that pmd_trans_huge(pmd) is true.
6194 */
6195static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6196 unsigned long addr, pmd_t pmd, union mc_target *target)
6197{
6198 struct page *page = NULL;
f6c7590b 6199 struct folio *folio;
12724850
NH
6200 enum mc_target_type ret = MC_TARGET_NONE;
6201
84c3fc4e
ZY
6202 if (unlikely(is_swap_pmd(pmd))) {
6203 VM_BUG_ON(thp_migration_supported() &&
6204 !is_pmd_migration_entry(pmd));
6205 return ret;
6206 }
12724850 6207 page = pmd_page(pmd);
309381fe 6208 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
f6c7590b 6209 folio = page_folio(page);
1dfab5ab 6210 if (!(mc.flags & MOVE_ANON))
12724850 6211 return ret;
f6c7590b 6212 if (folio_memcg(folio) == mc.from) {
12724850
NH
6213 ret = MC_TARGET_PAGE;
6214 if (target) {
f6c7590b
MWO
6215 folio_get(folio);
6216 if (!folio_trylock(folio)) {
6217 folio_put(folio);
4e0cf05f
JW
6218 return MC_TARGET_NONE;
6219 }
f6c7590b 6220 target->folio = folio;
12724850
NH
6221 }
6222 }
6223 return ret;
6224}
6225#else
6226static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6227 unsigned long addr, pmd_t pmd, union mc_target *target)
6228{
6229 return MC_TARGET_NONE;
6230}
6231#endif
6232
4ffef5fe
DN
6233static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6234 unsigned long addr, unsigned long end,
6235 struct mm_walk *walk)
6236{
26bcd64a 6237 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6238 pte_t *pte;
6239 spinlock_t *ptl;
6240
b6ec57f4
KS
6241 ptl = pmd_trans_huge_lock(pmd, vma);
6242 if (ptl) {
c733a828
JG
6243 /*
6244 * Note their can not be MC_TARGET_DEVICE for now as we do not
25b2995a
CH
6245 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6246 * this might change.
c733a828 6247 */
12724850
NH
6248 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6249 mc.precharge += HPAGE_PMD_NR;
bf929152 6250 spin_unlock(ptl);
1a5a9906 6251 return 0;
12724850 6252 }
03319327 6253
4ffef5fe 6254 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
04dee9e8
HD
6255 if (!pte)
6256 return 0;
4ffef5fe 6257 for (; addr != end; pte++, addr += PAGE_SIZE)
c33c7948 6258 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
4ffef5fe
DN
6259 mc.precharge++; /* increment precharge temporarily */
6260 pte_unmap_unlock(pte - 1, ptl);
6261 cond_resched();
6262
7dc74be0
DN
6263 return 0;
6264}
6265
7b86ac33
CH
6266static const struct mm_walk_ops precharge_walk_ops = {
6267 .pmd_entry = mem_cgroup_count_precharge_pte_range,
49b06385 6268 .walk_lock = PGWALK_RDLOCK,
7b86ac33
CH
6269};
6270
4ffef5fe
DN
6271static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6272{
6273 unsigned long precharge;
4ffef5fe 6274
d8ed45c5 6275 mmap_read_lock(mm);
ba0aff8e 6276 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
d8ed45c5 6277 mmap_read_unlock(mm);
4ffef5fe
DN
6278
6279 precharge = mc.precharge;
6280 mc.precharge = 0;
6281
6282 return precharge;
6283}
6284
4ffef5fe
DN
6285static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6286{
dfe076b0
DN
6287 unsigned long precharge = mem_cgroup_count_precharge(mm);
6288
6289 VM_BUG_ON(mc.moving_task);
6290 mc.moving_task = current;
6291 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
6292}
6293
dfe076b0
DN
6294/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6295static void __mem_cgroup_clear_mc(void)
4ffef5fe 6296{
2bd9bb20
KH
6297 struct mem_cgroup *from = mc.from;
6298 struct mem_cgroup *to = mc.to;
6299
4ffef5fe 6300 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d 6301 if (mc.precharge) {
4b569387 6302 mem_cgroup_cancel_charge(mc.to, mc.precharge);
854ffa8d
DN
6303 mc.precharge = 0;
6304 }
6305 /*
6306 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6307 * we must uncharge here.
6308 */
6309 if (mc.moved_charge) {
4b569387 6310 mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
854ffa8d 6311 mc.moved_charge = 0;
4ffef5fe 6312 }
483c30b5
DN
6313 /* we must fixup refcnts and charges */
6314 if (mc.moved_swap) {
483c30b5 6315 /* uncharge swap account from the old cgroup */
ce00a967 6316 if (!mem_cgroup_is_root(mc.from))
3e32cb2e 6317 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
483c30b5 6318
615d66c3
VD
6319 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6320
05b84301 6321 /*
3e32cb2e
JW
6322 * we charged both to->memory and to->memsw, so we
6323 * should uncharge to->memory.
05b84301 6324 */
ce00a967 6325 if (!mem_cgroup_is_root(mc.to))
3e32cb2e
JW
6326 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6327
483c30b5
DN
6328 mc.moved_swap = 0;
6329 }
dfe076b0
DN
6330 memcg_oom_recover(from);
6331 memcg_oom_recover(to);
6332 wake_up_all(&mc.waitq);
6333}
6334
6335static void mem_cgroup_clear_mc(void)
6336{
264a0ae1
TH
6337 struct mm_struct *mm = mc.mm;
6338
dfe076b0
DN
6339 /*
6340 * we must clear moving_task before waking up waiters at the end of
6341 * task migration.
6342 */
6343 mc.moving_task = NULL;
6344 __mem_cgroup_clear_mc();
2bd9bb20 6345 spin_lock(&mc.lock);
4ffef5fe
DN
6346 mc.from = NULL;
6347 mc.to = NULL;
264a0ae1 6348 mc.mm = NULL;
2bd9bb20 6349 spin_unlock(&mc.lock);
264a0ae1
TH
6350
6351 mmput(mm);
4ffef5fe
DN
6352}
6353
1f7dd3e5 6354static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
7dc74be0 6355{
1f7dd3e5 6356 struct cgroup_subsys_state *css;
eed67d75 6357 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
9f2115f9 6358 struct mem_cgroup *from;
4530eddb 6359 struct task_struct *leader, *p;
9f2115f9 6360 struct mm_struct *mm;
1dfab5ab 6361 unsigned long move_flags;
9f2115f9 6362 int ret = 0;
7dc74be0 6363
1f7dd3e5
TH
6364 /* charge immigration isn't supported on the default hierarchy */
6365 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
9f2115f9
TH
6366 return 0;
6367
4530eddb
TH
6368 /*
6369 * Multi-process migrations only happen on the default hierarchy
6370 * where charge immigration is not used. Perform charge
6371 * immigration if @tset contains a leader and whine if there are
6372 * multiple.
6373 */
6374 p = NULL;
1f7dd3e5 6375 cgroup_taskset_for_each_leader(leader, css, tset) {
4530eddb
TH
6376 WARN_ON_ONCE(p);
6377 p = leader;
1f7dd3e5 6378 memcg = mem_cgroup_from_css(css);
4530eddb
TH
6379 }
6380 if (!p)
6381 return 0;
6382
1f7dd3e5 6383 /*
f0953a1b 6384 * We are now committed to this value whatever it is. Changes in this
1f7dd3e5
TH
6385 * tunable will only affect upcoming migrations, not the current one.
6386 * So we need to save it, and keep it going.
6387 */
6388 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6389 if (!move_flags)
6390 return 0;
6391
9f2115f9
TH
6392 from = mem_cgroup_from_task(p);
6393
6394 VM_BUG_ON(from == memcg);
6395
6396 mm = get_task_mm(p);
6397 if (!mm)
6398 return 0;
6399 /* We move charges only when we move a owner of the mm */
6400 if (mm->owner == p) {
6401 VM_BUG_ON(mc.from);
6402 VM_BUG_ON(mc.to);
6403 VM_BUG_ON(mc.precharge);
6404 VM_BUG_ON(mc.moved_charge);
6405 VM_BUG_ON(mc.moved_swap);
6406
6407 spin_lock(&mc.lock);
264a0ae1 6408 mc.mm = mm;
9f2115f9
TH
6409 mc.from = from;
6410 mc.to = memcg;
6411 mc.flags = move_flags;
6412 spin_unlock(&mc.lock);
6413 /* We set mc.moving_task later */
6414
6415 ret = mem_cgroup_precharge_mc(mm);
6416 if (ret)
6417 mem_cgroup_clear_mc();
264a0ae1
TH
6418 } else {
6419 mmput(mm);
7dc74be0
DN
6420 }
6421 return ret;
6422}
6423
1f7dd3e5 6424static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
7dc74be0 6425{
4e2f245d
JW
6426 if (mc.to)
6427 mem_cgroup_clear_mc();
7dc74be0
DN
6428}
6429
4ffef5fe
DN
6430static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6431 unsigned long addr, unsigned long end,
6432 struct mm_walk *walk)
7dc74be0 6433{
4ffef5fe 6434 int ret = 0;
26bcd64a 6435 struct vm_area_struct *vma = walk->vma;
4ffef5fe
DN
6436 pte_t *pte;
6437 spinlock_t *ptl;
12724850
NH
6438 enum mc_target_type target_type;
6439 union mc_target target;
b267e1a3 6440 struct folio *folio;
4ffef5fe 6441
b6ec57f4
KS
6442 ptl = pmd_trans_huge_lock(pmd, vma);
6443 if (ptl) {
62ade86a 6444 if (mc.precharge < HPAGE_PMD_NR) {
bf929152 6445 spin_unlock(ptl);
12724850
NH
6446 return 0;
6447 }
6448 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6449 if (target_type == MC_TARGET_PAGE) {
b46777da 6450 folio = target.folio;
b267e1a3
MWO
6451 if (folio_isolate_lru(folio)) {
6452 if (!mem_cgroup_move_account(folio, true,
1306a85a 6453 mc.from, mc.to)) {
12724850
NH
6454 mc.precharge -= HPAGE_PMD_NR;
6455 mc.moved_charge += HPAGE_PMD_NR;
6456 }
b267e1a3 6457 folio_putback_lru(folio);
12724850 6458 }
b267e1a3
MWO
6459 folio_unlock(folio);
6460 folio_put(folio);
c733a828 6461 } else if (target_type == MC_TARGET_DEVICE) {
b46777da 6462 folio = target.folio;
b267e1a3 6463 if (!mem_cgroup_move_account(folio, true,
c733a828
JG
6464 mc.from, mc.to)) {
6465 mc.precharge -= HPAGE_PMD_NR;
6466 mc.moved_charge += HPAGE_PMD_NR;
6467 }
b267e1a3
MWO
6468 folio_unlock(folio);
6469 folio_put(folio);
12724850 6470 }
bf929152 6471 spin_unlock(ptl);
1a5a9906 6472 return 0;
12724850
NH
6473 }
6474
4ffef5fe
DN
6475retry:
6476 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
04dee9e8
HD
6477 if (!pte)
6478 return 0;
4ffef5fe 6479 for (; addr != end; addr += PAGE_SIZE) {
c33c7948 6480 pte_t ptent = ptep_get(pte++);
c733a828 6481 bool device = false;
02491447 6482 swp_entry_t ent;
4ffef5fe
DN
6483
6484 if (!mc.precharge)
6485 break;
6486
8d32ff84 6487 switch (get_mctgt_type(vma, addr, ptent, &target)) {
c733a828
JG
6488 case MC_TARGET_DEVICE:
6489 device = true;
e4a9bc58 6490 fallthrough;
4ffef5fe 6491 case MC_TARGET_PAGE:
b46777da 6492 folio = target.folio;
53f9263b
KS
6493 /*
6494 * We can have a part of the split pmd here. Moving it
6495 * can be done but it would be too convoluted so simply
6496 * ignore such a partial THP and keep it in original
6497 * memcg. There should be somebody mapping the head.
6498 */
b267e1a3 6499 if (folio_test_large(folio))
53f9263b 6500 goto put;
b267e1a3 6501 if (!device && !folio_isolate_lru(folio))
4ffef5fe 6502 goto put;
b267e1a3 6503 if (!mem_cgroup_move_account(folio, false,
f627c2f5 6504 mc.from, mc.to)) {
4ffef5fe 6505 mc.precharge--;
854ffa8d
DN
6506 /* we uncharge from mc.from later. */
6507 mc.moved_charge++;
4ffef5fe 6508 }
c733a828 6509 if (!device)
b267e1a3 6510 folio_putback_lru(folio);
4e0cf05f 6511put: /* get_mctgt_type() gets & locks the page */
b267e1a3
MWO
6512 folio_unlock(folio);
6513 folio_put(folio);
4ffef5fe 6514 break;
02491447
DN
6515 case MC_TARGET_SWAP:
6516 ent = target.ent;
e91cbb42 6517 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 6518 mc.precharge--;
8d22a935
HD
6519 mem_cgroup_id_get_many(mc.to, 1);
6520 /* we fixup other refcnts and charges later. */
483c30b5
DN
6521 mc.moved_swap++;
6522 }
02491447 6523 break;
4ffef5fe
DN
6524 default:
6525 break;
6526 }
6527 }
6528 pte_unmap_unlock(pte - 1, ptl);
6529 cond_resched();
6530
6531 if (addr != end) {
6532 /*
6533 * We have consumed all precharges we got in can_attach().
6534 * We try charge one by one, but don't do any additional
6535 * charges to mc.to if we have failed in charge once in attach()
6536 * phase.
6537 */
854ffa8d 6538 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
6539 if (!ret)
6540 goto retry;
6541 }
6542
6543 return ret;
6544}
6545
7b86ac33
CH
6546static const struct mm_walk_ops charge_walk_ops = {
6547 .pmd_entry = mem_cgroup_move_charge_pte_range,
49b06385 6548 .walk_lock = PGWALK_RDLOCK,
7b86ac33
CH
6549};
6550
264a0ae1 6551static void mem_cgroup_move_charge(void)
4ffef5fe 6552{
4ffef5fe 6553 lru_add_drain_all();
312722cb 6554 /*
6c77b607 6555 * Signal folio_memcg_lock() to take the memcg's move_lock
81f8c3a4
JW
6556 * while we're moving its pages to another memcg. Then wait
6557 * for already started RCU-only updates to finish.
312722cb
JW
6558 */
6559 atomic_inc(&mc.from->moving_account);
6560 synchronize_rcu();
dfe076b0 6561retry:
d8ed45c5 6562 if (unlikely(!mmap_read_trylock(mc.mm))) {
dfe076b0 6563 /*
c1e8d7c6 6564 * Someone who are holding the mmap_lock might be waiting in
dfe076b0
DN
6565 * waitq. So we cancel all extra charges, wake up all waiters,
6566 * and retry. Because we cancel precharges, we might not be able
6567 * to move enough charges, but moving charge is a best-effort
6568 * feature anyway, so it wouldn't be a big problem.
6569 */
6570 __mem_cgroup_clear_mc();
6571 cond_resched();
6572 goto retry;
6573 }
26bcd64a
NH
6574 /*
6575 * When we have consumed all precharges and failed in doing
6576 * additional charge, the page walk just aborts.
6577 */
ba0aff8e 6578 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
d8ed45c5 6579 mmap_read_unlock(mc.mm);
312722cb 6580 atomic_dec(&mc.from->moving_account);
7dc74be0
DN
6581}
6582
264a0ae1 6583static void mem_cgroup_move_task(void)
67e465a7 6584{
264a0ae1
TH
6585 if (mc.to) {
6586 mem_cgroup_move_charge();
a433658c 6587 mem_cgroup_clear_mc();
264a0ae1 6588 }
67e465a7 6589}
1aacbd35 6590
5cfb80a7 6591#else /* !CONFIG_MMU */
1f7dd3e5 6592static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6593{
6594 return 0;
6595}
1f7dd3e5 6596static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5cfb80a7
DN
6597{
6598}
264a0ae1 6599static void mem_cgroup_move_task(void)
5cfb80a7
DN
6600{
6601}
6602#endif
67e465a7 6603
1aacbd35
RG
6604#ifdef CONFIG_MEMCG_KMEM
6605static void mem_cgroup_fork(struct task_struct *task)
6606{
6607 /*
6608 * Set the update flag to cause task->objcg to be initialized lazily
6609 * on the first allocation. It can be done without any synchronization
6610 * because it's always performed on the current task, so does
6611 * current_objcg_update().
6612 */
6613 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6614}
6615
6616static void mem_cgroup_exit(struct task_struct *task)
6617{
6618 struct obj_cgroup *objcg = task->objcg;
6619
6620 objcg = (struct obj_cgroup *)
6621 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6622 if (objcg)
6623 obj_cgroup_put(objcg);
6624
6625 /*
6626 * Some kernel allocations can happen after this point,
6627 * but let's ignore them. It can be done without any synchronization
6628 * because it's always performed on the current task, so does
6629 * current_objcg_update().
6630 */
6631 task->objcg = NULL;
6632}
6633#endif
6634
bd74fdae 6635#ifdef CONFIG_LRU_GEN
1aacbd35 6636static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
bd74fdae
YZ
6637{
6638 struct task_struct *task;
6639 struct cgroup_subsys_state *css;
6640
6641 /* find the first leader if there is any */
6642 cgroup_taskset_for_each_leader(task, css, tset)
6643 break;
6644
6645 if (!task)
6646 return;
6647
6648 task_lock(task);
6649 if (task->mm && READ_ONCE(task->mm->owner) == task)
6650 lru_gen_migrate_mm(task->mm);
6651 task_unlock(task);
6652}
6653#else
1aacbd35
RG
6654static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6655#endif /* CONFIG_LRU_GEN */
6656
6657#ifdef CONFIG_MEMCG_KMEM
6658static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6659{
6660 struct task_struct *task;
6661 struct cgroup_subsys_state *css;
6662
6663 cgroup_taskset_for_each(task, css, tset) {
6664 /* atomically set the update bit */
6665 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6666 }
6667}
6668#else
6669static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6670#endif /* CONFIG_MEMCG_KMEM */
6671
6672#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
bd74fdae
YZ
6673static void mem_cgroup_attach(struct cgroup_taskset *tset)
6674{
1aacbd35
RG
6675 mem_cgroup_lru_gen_attach(tset);
6676 mem_cgroup_kmem_attach(tset);
bd74fdae 6677}
1aacbd35 6678#endif
bd74fdae 6679
677dc973
CD
6680static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6681{
6682 if (value == PAGE_COUNTER_MAX)
6683 seq_puts(m, "max\n");
6684 else
6685 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6686
6687 return 0;
6688}
6689
241994ed
JW
6690static u64 memory_current_read(struct cgroup_subsys_state *css,
6691 struct cftype *cft)
6692{
f5fc3c5d
JW
6693 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6694
6695 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
241994ed
JW
6696}
6697
8e20d4b3
GR
6698static u64 memory_peak_read(struct cgroup_subsys_state *css,
6699 struct cftype *cft)
6700{
6701 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6702
6703 return (u64)memcg->memory.watermark * PAGE_SIZE;
6704}
6705
bf8d5d52
RG
6706static int memory_min_show(struct seq_file *m, void *v)
6707{
677dc973
CD
6708 return seq_puts_memcg_tunable(m,
6709 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
bf8d5d52
RG
6710}
6711
6712static ssize_t memory_min_write(struct kernfs_open_file *of,
6713 char *buf, size_t nbytes, loff_t off)
6714{
6715 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6716 unsigned long min;
6717 int err;
6718
6719 buf = strstrip(buf);
6720 err = page_counter_memparse(buf, "max", &min);
6721 if (err)
6722 return err;
6723
6724 page_counter_set_min(&memcg->memory, min);
6725
6726 return nbytes;
6727}
6728
241994ed
JW
6729static int memory_low_show(struct seq_file *m, void *v)
6730{
677dc973
CD
6731 return seq_puts_memcg_tunable(m,
6732 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
241994ed
JW
6733}
6734
6735static ssize_t memory_low_write(struct kernfs_open_file *of,
6736 char *buf, size_t nbytes, loff_t off)
6737{
6738 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6739 unsigned long low;
6740 int err;
6741
6742 buf = strstrip(buf);
d2973697 6743 err = page_counter_memparse(buf, "max", &low);
241994ed
JW
6744 if (err)
6745 return err;
6746
23067153 6747 page_counter_set_low(&memcg->memory, low);
241994ed
JW
6748
6749 return nbytes;
6750}
6751
6752static int memory_high_show(struct seq_file *m, void *v)
6753{
d1663a90
JK
6754 return seq_puts_memcg_tunable(m,
6755 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
241994ed
JW
6756}
6757
6758static ssize_t memory_high_write(struct kernfs_open_file *of,
6759 char *buf, size_t nbytes, loff_t off)
6760{
6761 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6762 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
8c8c383c 6763 bool drained = false;
241994ed
JW
6764 unsigned long high;
6765 int err;
6766
6767 buf = strstrip(buf);
d2973697 6768 err = page_counter_memparse(buf, "max", &high);
241994ed
JW
6769 if (err)
6770 return err;
6771
e82553c1
JW
6772 page_counter_set_high(&memcg->memory, high);
6773
8c8c383c
JW
6774 for (;;) {
6775 unsigned long nr_pages = page_counter_read(&memcg->memory);
6776 unsigned long reclaimed;
6777
6778 if (nr_pages <= high)
6779 break;
6780
6781 if (signal_pending(current))
6782 break;
6783
6784 if (!drained) {
6785 drain_all_stock(memcg);
6786 drained = true;
6787 continue;
6788 }
6789
6790 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
55ab834a 6791 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
8c8c383c
JW
6792
6793 if (!reclaimed && !nr_retries--)
6794 break;
6795 }
588083bb 6796
19ce33ac 6797 memcg_wb_domain_size_changed(memcg);
241994ed
JW
6798 return nbytes;
6799}
6800
6801static int memory_max_show(struct seq_file *m, void *v)
6802{
677dc973
CD
6803 return seq_puts_memcg_tunable(m,
6804 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
241994ed
JW
6805}
6806
6807static ssize_t memory_max_write(struct kernfs_open_file *of,
6808 char *buf, size_t nbytes, loff_t off)
6809{
6810 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
d977aa93 6811 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
b6e6edcf 6812 bool drained = false;
241994ed
JW
6813 unsigned long max;
6814 int err;
6815
6816 buf = strstrip(buf);
d2973697 6817 err = page_counter_memparse(buf, "max", &max);
241994ed
JW
6818 if (err)
6819 return err;
6820
bbec2e15 6821 xchg(&memcg->memory.max, max);
b6e6edcf
JW
6822
6823 for (;;) {
6824 unsigned long nr_pages = page_counter_read(&memcg->memory);
6825
6826 if (nr_pages <= max)
6827 break;
6828
7249c9f0 6829 if (signal_pending(current))
b6e6edcf 6830 break;
b6e6edcf
JW
6831
6832 if (!drained) {
6833 drain_all_stock(memcg);
6834 drained = true;
6835 continue;
6836 }
6837
6838 if (nr_reclaims) {
6839 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
55ab834a 6840 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
b6e6edcf
JW
6841 nr_reclaims--;
6842 continue;
6843 }
6844
e27be240 6845 memcg_memory_event(memcg, MEMCG_OOM);
b6e6edcf
JW
6846 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6847 break;
6848 }
241994ed 6849
2529bb3a 6850 memcg_wb_domain_size_changed(memcg);
241994ed
JW
6851 return nbytes;
6852}
6853
664dc218
DR
6854/*
6855 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6856 * if any new events become available.
6857 */
1e577f97
SB
6858static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6859{
6860 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6861 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6862 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6863 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6864 seq_printf(m, "oom_kill %lu\n",
6865 atomic_long_read(&events[MEMCG_OOM_KILL]));
b6bf9abb
DS
6866 seq_printf(m, "oom_group_kill %lu\n",
6867 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
1e577f97
SB
6868}
6869
241994ed
JW
6870static int memory_events_show(struct seq_file *m, void *v)
6871{
aa9694bb 6872 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6873
1e577f97
SB
6874 __memory_events_show(m, memcg->memory_events);
6875 return 0;
6876}
6877
6878static int memory_events_local_show(struct seq_file *m, void *v)
6879{
6880 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
241994ed 6881
1e577f97 6882 __memory_events_show(m, memcg->memory_events_local);
241994ed
JW
6883 return 0;
6884}
6885
587d9f72
JW
6886static int memory_stat_show(struct seq_file *m, void *v)
6887{
aa9694bb 6888 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
68aaee14 6889 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
5b42360c 6890 struct seq_buf s;
1ff9e6e1 6891
c8713d0b
JW
6892 if (!buf)
6893 return -ENOMEM;
5b42360c
YA
6894 seq_buf_init(&s, buf, PAGE_SIZE);
6895 memory_stat_format(memcg, &s);
c8713d0b
JW
6896 seq_puts(m, buf);
6897 kfree(buf);
587d9f72
JW
6898 return 0;
6899}
6900
5f9a4f4a 6901#ifdef CONFIG_NUMA
fff66b79
MS
6902static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6903 int item)
6904{
ff841a06
YA
6905 return lruvec_page_state(lruvec, item) *
6906 memcg_page_state_output_unit(item);
fff66b79
MS
6907}
6908
5f9a4f4a
MS
6909static int memory_numa_stat_show(struct seq_file *m, void *v)
6910{
6911 int i;
6912 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6913
7d7ef0a4 6914 mem_cgroup_flush_stats(memcg);
7e1c0d6f 6915
5f9a4f4a
MS
6916 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6917 int nid;
6918
6919 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6920 continue;
6921
6922 seq_printf(m, "%s", memory_stats[i].name);
6923 for_each_node_state(nid, N_MEMORY) {
6924 u64 size;
6925 struct lruvec *lruvec;
6926
6927 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
fff66b79
MS
6928 size = lruvec_page_state_output(lruvec,
6929 memory_stats[i].idx);
5f9a4f4a
MS
6930 seq_printf(m, " N%d=%llu", nid, size);
6931 }
6932 seq_putc(m, '\n');
6933 }
6934
6935 return 0;
6936}
6937#endif
6938
3d8b38eb
RG
6939static int memory_oom_group_show(struct seq_file *m, void *v)
6940{
aa9694bb 6941 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3d8b38eb 6942
eaf7b66b 6943 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
3d8b38eb
RG
6944
6945 return 0;
6946}
6947
6948static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6949 char *buf, size_t nbytes, loff_t off)
6950{
6951 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6952 int ret, oom_group;
6953
6954 buf = strstrip(buf);
6955 if (!buf)
6956 return -EINVAL;
6957
6958 ret = kstrtoint(buf, 0, &oom_group);
6959 if (ret)
6960 return ret;
6961
6962 if (oom_group != 0 && oom_group != 1)
6963 return -EINVAL;
6964
eaf7b66b 6965 WRITE_ONCE(memcg->oom_group, oom_group);
3d8b38eb
RG
6966
6967 return nbytes;
6968}
6969
94968384
SB
6970static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6971 size_t nbytes, loff_t off)
6972{
6973 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6974 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6975 unsigned long nr_to_reclaim, nr_reclaimed = 0;
55ab834a
MH
6976 unsigned int reclaim_options;
6977 int err;
12a5d395
MA
6978
6979 buf = strstrip(buf);
55ab834a
MH
6980 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6981 if (err)
6982 return err;
12a5d395 6983
55ab834a 6984 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
94968384 6985 while (nr_reclaimed < nr_to_reclaim) {
287d5fed
M
6986 /* Will converge on zero, but reclaim enforces a minimum */
6987 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
94968384
SB
6988 unsigned long reclaimed;
6989
6990 if (signal_pending(current))
6991 return -EINTR;
6992
6993 /*
6994 * This is the final attempt, drain percpu lru caches in the
6995 * hope of introducing more evictable pages for
6996 * try_to_free_mem_cgroup_pages().
6997 */
6998 if (!nr_retries)
6999 lru_add_drain_all();
7000
7001 reclaimed = try_to_free_mem_cgroup_pages(memcg,
287d5fed 7002 batch_size, GFP_KERNEL, reclaim_options);
94968384
SB
7003
7004 if (!reclaimed && !nr_retries--)
7005 return -EAGAIN;
7006
7007 nr_reclaimed += reclaimed;
7008 }
7009
7010 return nbytes;
7011}
7012
241994ed
JW
7013static struct cftype memory_files[] = {
7014 {
7015 .name = "current",
f5fc3c5d 7016 .flags = CFTYPE_NOT_ON_ROOT,
241994ed
JW
7017 .read_u64 = memory_current_read,
7018 },
8e20d4b3
GR
7019 {
7020 .name = "peak",
7021 .flags = CFTYPE_NOT_ON_ROOT,
7022 .read_u64 = memory_peak_read,
7023 },
bf8d5d52
RG
7024 {
7025 .name = "min",
7026 .flags = CFTYPE_NOT_ON_ROOT,
7027 .seq_show = memory_min_show,
7028 .write = memory_min_write,
7029 },
241994ed
JW
7030 {
7031 .name = "low",
7032 .flags = CFTYPE_NOT_ON_ROOT,
7033 .seq_show = memory_low_show,
7034 .write = memory_low_write,
7035 },
7036 {
7037 .name = "high",
7038 .flags = CFTYPE_NOT_ON_ROOT,
7039 .seq_show = memory_high_show,
7040 .write = memory_high_write,
7041 },
7042 {
7043 .name = "max",
7044 .flags = CFTYPE_NOT_ON_ROOT,
7045 .seq_show = memory_max_show,
7046 .write = memory_max_write,
7047 },
7048 {
7049 .name = "events",
7050 .flags = CFTYPE_NOT_ON_ROOT,
472912a2 7051 .file_offset = offsetof(struct mem_cgroup, events_file),
241994ed
JW
7052 .seq_show = memory_events_show,
7053 },
1e577f97
SB
7054 {
7055 .name = "events.local",
7056 .flags = CFTYPE_NOT_ON_ROOT,
7057 .file_offset = offsetof(struct mem_cgroup, events_local_file),
7058 .seq_show = memory_events_local_show,
7059 },
587d9f72
JW
7060 {
7061 .name = "stat",
587d9f72
JW
7062 .seq_show = memory_stat_show,
7063 },
5f9a4f4a
MS
7064#ifdef CONFIG_NUMA
7065 {
7066 .name = "numa_stat",
7067 .seq_show = memory_numa_stat_show,
7068 },
7069#endif
3d8b38eb
RG
7070 {
7071 .name = "oom.group",
7072 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7073 .seq_show = memory_oom_group_show,
7074 .write = memory_oom_group_write,
7075 },
94968384
SB
7076 {
7077 .name = "reclaim",
7078 .flags = CFTYPE_NS_DELEGATABLE,
7079 .write = memory_reclaim,
7080 },
241994ed
JW
7081 { } /* terminate */
7082};
7083
073219e9 7084struct cgroup_subsys memory_cgrp_subsys = {
92fb9748 7085 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 7086 .css_online = mem_cgroup_css_online,
92fb9748 7087 .css_offline = mem_cgroup_css_offline,
6df38689 7088 .css_released = mem_cgroup_css_released,
92fb9748 7089 .css_free = mem_cgroup_css_free,
1ced953b 7090 .css_reset = mem_cgroup_css_reset,
2d146aa3 7091 .css_rstat_flush = mem_cgroup_css_rstat_flush,
7dc74be0 7092 .can_attach = mem_cgroup_can_attach,
1aacbd35 7093#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
bd74fdae 7094 .attach = mem_cgroup_attach,
1aacbd35 7095#endif
7dc74be0 7096 .cancel_attach = mem_cgroup_cancel_attach,
264a0ae1 7097 .post_attach = mem_cgroup_move_task,
1aacbd35
RG
7098#ifdef CONFIG_MEMCG_KMEM
7099 .fork = mem_cgroup_fork,
7100 .exit = mem_cgroup_exit,
7101#endif
241994ed
JW
7102 .dfl_cftypes = memory_files,
7103 .legacy_cftypes = mem_cgroup_legacy_files,
6d12e2d8 7104 .early_init = 0,
8cdea7c0 7105};
c077719b 7106
bc50bcc6
JW
7107/*
7108 * This function calculates an individual cgroup's effective
7109 * protection which is derived from its own memory.min/low, its
7110 * parent's and siblings' settings, as well as the actual memory
7111 * distribution in the tree.
7112 *
7113 * The following rules apply to the effective protection values:
7114 *
7115 * 1. At the first level of reclaim, effective protection is equal to
7116 * the declared protection in memory.min and memory.low.
7117 *
7118 * 2. To enable safe delegation of the protection configuration, at
7119 * subsequent levels the effective protection is capped to the
7120 * parent's effective protection.
7121 *
7122 * 3. To make complex and dynamic subtrees easier to configure, the
7123 * user is allowed to overcommit the declared protection at a given
7124 * level. If that is the case, the parent's effective protection is
7125 * distributed to the children in proportion to how much protection
7126 * they have declared and how much of it they are utilizing.
7127 *
7128 * This makes distribution proportional, but also work-conserving:
7129 * if one cgroup claims much more protection than it uses memory,
7130 * the unused remainder is available to its siblings.
7131 *
7132 * 4. Conversely, when the declared protection is undercommitted at a
7133 * given level, the distribution of the larger parental protection
7134 * budget is NOT proportional. A cgroup's protection from a sibling
7135 * is capped to its own memory.min/low setting.
7136 *
8a931f80
JW
7137 * 5. However, to allow protecting recursive subtrees from each other
7138 * without having to declare each individual cgroup's fixed share
7139 * of the ancestor's claim to protection, any unutilized -
7140 * "floating" - protection from up the tree is distributed in
7141 * proportion to each cgroup's *usage*. This makes the protection
7142 * neutral wrt sibling cgroups and lets them compete freely over
7143 * the shared parental protection budget, but it protects the
7144 * subtree as a whole from neighboring subtrees.
7145 *
7146 * Note that 4. and 5. are not in conflict: 4. is about protecting
7147 * against immediate siblings whereas 5. is about protecting against
7148 * neighboring subtrees.
bc50bcc6
JW
7149 */
7150static unsigned long effective_protection(unsigned long usage,
8a931f80 7151 unsigned long parent_usage,
bc50bcc6
JW
7152 unsigned long setting,
7153 unsigned long parent_effective,
7154 unsigned long siblings_protected)
7155{
7156 unsigned long protected;
8a931f80 7157 unsigned long ep;
bc50bcc6
JW
7158
7159 protected = min(usage, setting);
7160 /*
7161 * If all cgroups at this level combined claim and use more
08e0f49e 7162 * protection than what the parent affords them, distribute
bc50bcc6
JW
7163 * shares in proportion to utilization.
7164 *
7165 * We are using actual utilization rather than the statically
7166 * claimed protection in order to be work-conserving: claimed
7167 * but unused protection is available to siblings that would
7168 * otherwise get a smaller chunk than what they claimed.
7169 */
7170 if (siblings_protected > parent_effective)
7171 return protected * parent_effective / siblings_protected;
7172
7173 /*
7174 * Ok, utilized protection of all children is within what the
7175 * parent affords them, so we know whatever this child claims
7176 * and utilizes is effectively protected.
7177 *
7178 * If there is unprotected usage beyond this value, reclaim
7179 * will apply pressure in proportion to that amount.
7180 *
7181 * If there is unutilized protection, the cgroup will be fully
7182 * shielded from reclaim, but we do return a smaller value for
7183 * protection than what the group could enjoy in theory. This
7184 * is okay. With the overcommit distribution above, effective
7185 * protection is always dependent on how memory is actually
7186 * consumed among the siblings anyway.
7187 */
8a931f80
JW
7188 ep = protected;
7189
7190 /*
7191 * If the children aren't claiming (all of) the protection
7192 * afforded to them by the parent, distribute the remainder in
7193 * proportion to the (unprotected) memory of each cgroup. That
7194 * way, cgroups that aren't explicitly prioritized wrt each
7195 * other compete freely over the allowance, but they are
7196 * collectively protected from neighboring trees.
7197 *
7198 * We're using unprotected memory for the weight so that if
7199 * some cgroups DO claim explicit protection, we don't protect
7200 * the same bytes twice.
cd324edc
JW
7201 *
7202 * Check both usage and parent_usage against the respective
7203 * protected values. One should imply the other, but they
7204 * aren't read atomically - make sure the division is sane.
8a931f80
JW
7205 */
7206 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7207 return ep;
cd324edc
JW
7208 if (parent_effective > siblings_protected &&
7209 parent_usage > siblings_protected &&
7210 usage > protected) {
8a931f80
JW
7211 unsigned long unclaimed;
7212
7213 unclaimed = parent_effective - siblings_protected;
7214 unclaimed *= usage - protected;
7215 unclaimed /= parent_usage - siblings_protected;
7216
7217 ep += unclaimed;
7218 }
7219
7220 return ep;
bc50bcc6
JW
7221}
7222
241994ed 7223/**
05395718 7224 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
34c81057 7225 * @root: the top ancestor of the sub-tree being checked
241994ed
JW
7226 * @memcg: the memory cgroup to check
7227 *
23067153
RG
7228 * WARNING: This function is not stateless! It can only be used as part
7229 * of a top-down tree iteration, not for isolated queries.
241994ed 7230 */
45c7f7e1
CD
7231void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7232 struct mem_cgroup *memcg)
241994ed 7233{
8a931f80 7234 unsigned long usage, parent_usage;
23067153
RG
7235 struct mem_cgroup *parent;
7236
241994ed 7237 if (mem_cgroup_disabled())
45c7f7e1 7238 return;
241994ed 7239
34c81057
SC
7240 if (!root)
7241 root = root_mem_cgroup;
22f7496f
YS
7242
7243 /*
7244 * Effective values of the reclaim targets are ignored so they
7245 * can be stale. Have a look at mem_cgroup_protection for more
7246 * details.
7247 * TODO: calculation should be more robust so that we do not need
7248 * that special casing.
7249 */
34c81057 7250 if (memcg == root)
45c7f7e1 7251 return;
241994ed 7252
23067153 7253 usage = page_counter_read(&memcg->memory);
bf8d5d52 7254 if (!usage)
45c7f7e1 7255 return;
bf8d5d52 7256
bf8d5d52 7257 parent = parent_mem_cgroup(memcg);
df2a4196 7258
bc50bcc6 7259 if (parent == root) {
c3d53200 7260 memcg->memory.emin = READ_ONCE(memcg->memory.min);
03960e33 7261 memcg->memory.elow = READ_ONCE(memcg->memory.low);
45c7f7e1 7262 return;
bf8d5d52
RG
7263 }
7264
8a931f80
JW
7265 parent_usage = page_counter_read(&parent->memory);
7266
b3a7822e 7267 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
c3d53200
CD
7268 READ_ONCE(memcg->memory.min),
7269 READ_ONCE(parent->memory.emin),
b3a7822e 7270 atomic_long_read(&parent->memory.children_min_usage)));
23067153 7271
b3a7822e 7272 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
03960e33
CD
7273 READ_ONCE(memcg->memory.low),
7274 READ_ONCE(parent->memory.elow),
b3a7822e 7275 atomic_long_read(&parent->memory.children_low_usage)));
241994ed
JW
7276}
7277
8f425e4e
MWO
7278static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7279 gfp_t gfp)
0add0c77 7280{
0add0c77
SB
7281 int ret;
7282
4b569387 7283 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
0add0c77
SB
7284 if (ret)
7285 goto out;
7286
4b569387 7287 mem_cgroup_commit_charge(folio, memcg);
0add0c77
SB
7288out:
7289 return ret;
7290}
7291
8f425e4e 7292int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
00501b53 7293{
0add0c77
SB
7294 struct mem_cgroup *memcg;
7295 int ret;
00501b53 7296
0add0c77 7297 memcg = get_mem_cgroup_from_mm(mm);
8f425e4e 7298 ret = charge_memcg(folio, memcg, gfp);
0add0c77 7299 css_put(&memcg->css);
2d1c4980 7300
0add0c77
SB
7301 return ret;
7302}
e993d905 7303
8cba9576
NP
7304/**
7305 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7306 * @memcg: memcg to charge.
7307 * @gfp: reclaim mode.
7308 * @nr_pages: number of pages to charge.
7309 *
7310 * This function is called when allocating a huge page folio to determine if
7311 * the memcg has the capacity for it. It does not commit the charge yet,
7312 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7313 *
7314 * Once we have obtained the hugetlb folio, we can call
7315 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7316 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7317 * of try_charge().
7318 *
7319 * Returns 0 on success. Otherwise, an error code is returned.
7320 */
7321int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7322 long nr_pages)
7323{
7324 /*
7325 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7326 * but do not attempt to commit charge later (or cancel on error) either.
7327 */
7328 if (mem_cgroup_disabled() || !memcg ||
7329 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7330 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7331 return -EOPNOTSUPP;
7332
7333 if (try_charge(memcg, gfp, nr_pages))
7334 return -ENOMEM;
7335
7336 return 0;
7337}
7338
0add0c77 7339/**
65995918
MWO
7340 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7341 * @folio: folio to charge.
0add0c77
SB
7342 * @mm: mm context of the victim
7343 * @gfp: reclaim mode
65995918 7344 * @entry: swap entry for which the folio is allocated
0add0c77 7345 *
65995918
MWO
7346 * This function charges a folio allocated for swapin. Please call this before
7347 * adding the folio to the swapcache.
0add0c77
SB
7348 *
7349 * Returns 0 on success. Otherwise, an error code is returned.
7350 */
65995918 7351int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
0add0c77
SB
7352 gfp_t gfp, swp_entry_t entry)
7353{
7354 struct mem_cgroup *memcg;
7355 unsigned short id;
7356 int ret;
00501b53 7357
0add0c77
SB
7358 if (mem_cgroup_disabled())
7359 return 0;
00501b53 7360
0add0c77
SB
7361 id = lookup_swap_cgroup_id(entry);
7362 rcu_read_lock();
7363 memcg = mem_cgroup_from_id(id);
7364 if (!memcg || !css_tryget_online(&memcg->css))
7365 memcg = get_mem_cgroup_from_mm(mm);
7366 rcu_read_unlock();
00501b53 7367
8f425e4e 7368 ret = charge_memcg(folio, memcg, gfp);
6abb5a86 7369
0add0c77
SB
7370 css_put(&memcg->css);
7371 return ret;
7372}
00501b53 7373
0add0c77
SB
7374/*
7375 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7376 * @entry: swap entry for which the page is charged
7377 *
7378 * Call this function after successfully adding the charged page to swapcache.
7379 *
7380 * Note: This function assumes the page for which swap slot is being uncharged
7381 * is order 0 page.
7382 */
7383void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7384{
cae3af62
MS
7385 /*
7386 * Cgroup1's unified memory+swap counter has been charged with the
7387 * new swapcache page, finish the transfer by uncharging the swap
7388 * slot. The swap slot would also get uncharged when it dies, but
7389 * it can stick around indefinitely and we'd count the page twice
7390 * the entire time.
7391 *
7392 * Cgroup2 has separate resource counters for memory and swap,
7393 * so this is a non-issue here. Memory and swap charge lifetimes
7394 * correspond 1:1 to page and swap slot lifetimes: we charge the
7395 * page to memory here, and uncharge swap when the slot is freed.
7396 */
0add0c77 7397 if (!mem_cgroup_disabled() && do_memsw_account()) {
00501b53
JW
7398 /*
7399 * The swap entry might not get freed for a long time,
7400 * let's not wait for it. The page already received a
7401 * memory+swap charge, drop the swap entry duplicate.
7402 */
0add0c77 7403 mem_cgroup_uncharge_swap(entry, 1);
00501b53 7404 }
3fea5a49
JW
7405}
7406
a9d5adee
JG
7407struct uncharge_gather {
7408 struct mem_cgroup *memcg;
b4e0b68f 7409 unsigned long nr_memory;
a9d5adee 7410 unsigned long pgpgout;
a9d5adee 7411 unsigned long nr_kmem;
8e88bd2d 7412 int nid;
a9d5adee
JG
7413};
7414
7415static inline void uncharge_gather_clear(struct uncharge_gather *ug)
747db954 7416{
a9d5adee
JG
7417 memset(ug, 0, sizeof(*ug));
7418}
7419
7420static void uncharge_batch(const struct uncharge_gather *ug)
7421{
747db954
JW
7422 unsigned long flags;
7423
b4e0b68f
MS
7424 if (ug->nr_memory) {
7425 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7941d214 7426 if (do_memsw_account())
b4e0b68f 7427 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
a8c49af3
YA
7428 if (ug->nr_kmem)
7429 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
a9d5adee 7430 memcg_oom_recover(ug->memcg);
ce00a967 7431 }
747db954
JW
7432
7433 local_irq_save(flags);
c9019e9b 7434 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
b4e0b68f 7435 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
8e88bd2d 7436 memcg_check_events(ug->memcg, ug->nid);
747db954 7437 local_irq_restore(flags);
f1796544 7438
c4ed6ebf 7439 /* drop reference from uncharge_folio */
f1796544 7440 css_put(&ug->memcg->css);
a9d5adee
JG
7441}
7442
c4ed6ebf 7443static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
a9d5adee 7444{
c4ed6ebf 7445 long nr_pages;
b4e0b68f
MS
7446 struct mem_cgroup *memcg;
7447 struct obj_cgroup *objcg;
9f762dbe 7448
c4ed6ebf 7449 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
a9d5adee 7450
a9d5adee
JG
7451 /*
7452 * Nobody should be changing or seriously looking at
c4ed6ebf
MWO
7453 * folio memcg or objcg at this point, we have fully
7454 * exclusive access to the folio.
a9d5adee 7455 */
fead2b86 7456 if (folio_memcg_kmem(folio)) {
1b7e4464 7457 objcg = __folio_objcg(folio);
b4e0b68f
MS
7458 /*
7459 * This get matches the put at the end of the function and
7460 * kmem pages do not hold memcg references anymore.
7461 */
7462 memcg = get_mem_cgroup_from_objcg(objcg);
7463 } else {
1b7e4464 7464 memcg = __folio_memcg(folio);
b4e0b68f 7465 }
a9d5adee 7466
b4e0b68f
MS
7467 if (!memcg)
7468 return;
7469
7470 if (ug->memcg != memcg) {
a9d5adee
JG
7471 if (ug->memcg) {
7472 uncharge_batch(ug);
7473 uncharge_gather_clear(ug);
7474 }
b4e0b68f 7475 ug->memcg = memcg;
c4ed6ebf 7476 ug->nid = folio_nid(folio);
f1796544
MH
7477
7478 /* pairs with css_put in uncharge_batch */
b4e0b68f 7479 css_get(&memcg->css);
a9d5adee
JG
7480 }
7481
c4ed6ebf 7482 nr_pages = folio_nr_pages(folio);
a9d5adee 7483
fead2b86 7484 if (folio_memcg_kmem(folio)) {
b4e0b68f 7485 ug->nr_memory += nr_pages;
9f762dbe 7486 ug->nr_kmem += nr_pages;
b4e0b68f 7487
c4ed6ebf 7488 folio->memcg_data = 0;
b4e0b68f
MS
7489 obj_cgroup_put(objcg);
7490 } else {
7491 /* LRU pages aren't accounted at the root level */
7492 if (!mem_cgroup_is_root(memcg))
7493 ug->nr_memory += nr_pages;
18b2db3b 7494 ug->pgpgout++;
a9d5adee 7495
c4ed6ebf 7496 folio->memcg_data = 0;
b4e0b68f
MS
7497 }
7498
7499 css_put(&memcg->css);
747db954
JW
7500}
7501
bbc6b703 7502void __mem_cgroup_uncharge(struct folio *folio)
0a31bc97 7503{
a9d5adee
JG
7504 struct uncharge_gather ug;
7505
bbc6b703
MWO
7506 /* Don't touch folio->lru of any random page, pre-check: */
7507 if (!folio_memcg(folio))
0a31bc97
JW
7508 return;
7509
a9d5adee 7510 uncharge_gather_clear(&ug);
bbc6b703 7511 uncharge_folio(folio, &ug);
a9d5adee 7512 uncharge_batch(&ug);
747db954 7513}
0a31bc97 7514
747db954 7515/**
2c8d8f97 7516 * __mem_cgroup_uncharge_list - uncharge a list of page
747db954
JW
7517 * @page_list: list of pages to uncharge
7518 *
7519 * Uncharge a list of pages previously charged with
2c8d8f97 7520 * __mem_cgroup_charge().
747db954 7521 */
2c8d8f97 7522void __mem_cgroup_uncharge_list(struct list_head *page_list)
747db954 7523{
c41a40b6 7524 struct uncharge_gather ug;
c4ed6ebf 7525 struct folio *folio;
c41a40b6 7526
c41a40b6 7527 uncharge_gather_clear(&ug);
c4ed6ebf
MWO
7528 list_for_each_entry(folio, page_list, lru)
7529 uncharge_folio(folio, &ug);
c41a40b6
MS
7530 if (ug.memcg)
7531 uncharge_batch(&ug);
0a31bc97
JW
7532}
7533
7534/**
85ce2c51 7535 * mem_cgroup_replace_folio - Charge a folio's replacement.
d21bba2b
MWO
7536 * @old: Currently circulating folio.
7537 * @new: Replacement folio.
0a31bc97 7538 *
d21bba2b 7539 * Charge @new as a replacement folio for @old. @old will
85ce2c51
NP
7540 * be uncharged upon free. This is only used by the page cache
7541 * (in replace_page_cache_folio()).
0a31bc97 7542 *
d21bba2b 7543 * Both folios must be locked, @new->mapping must be set up.
0a31bc97 7544 */
85ce2c51 7545void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
0a31bc97 7546{
29833315 7547 struct mem_cgroup *memcg;
d21bba2b 7548 long nr_pages = folio_nr_pages(new);
d93c4130 7549 unsigned long flags;
0a31bc97 7550
d21bba2b
MWO
7551 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7552 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7553 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7554 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
0a31bc97
JW
7555
7556 if (mem_cgroup_disabled())
7557 return;
7558
d21bba2b
MWO
7559 /* Page cache replacement: new folio already charged? */
7560 if (folio_memcg(new))
0a31bc97
JW
7561 return;
7562
d21bba2b
MWO
7563 memcg = folio_memcg(old);
7564 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
29833315 7565 if (!memcg)
0a31bc97
JW
7566 return;
7567
44b7a8d3 7568 /* Force-charge the new page. The old one will be freed soon */
8dc87c7d
MS
7569 if (!mem_cgroup_is_root(memcg)) {
7570 page_counter_charge(&memcg->memory, nr_pages);
7571 if (do_memsw_account())
7572 page_counter_charge(&memcg->memsw, nr_pages);
7573 }
0a31bc97 7574
1a3e1f40 7575 css_get(&memcg->css);
d21bba2b 7576 commit_charge(new, memcg);
44b7a8d3 7577
d93c4130 7578 local_irq_save(flags);
6e0110c2 7579 mem_cgroup_charge_statistics(memcg, nr_pages);
d21bba2b 7580 memcg_check_events(memcg, folio_nid(new));
d93c4130 7581 local_irq_restore(flags);
0a31bc97
JW
7582}
7583
85ce2c51
NP
7584/**
7585 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7586 * @old: Currently circulating folio.
7587 * @new: Replacement folio.
7588 *
7589 * Transfer the memcg data from the old folio to the new folio for migration.
7590 * The old folio's data info will be cleared. Note that the memory counters
7591 * will remain unchanged throughout the process.
7592 *
7593 * Both folios must be locked, @new->mapping must be set up.
7594 */
7595void mem_cgroup_migrate(struct folio *old, struct folio *new)
7596{
7597 struct mem_cgroup *memcg;
7598
7599 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7600 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7601 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7602 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7603
7604 if (mem_cgroup_disabled())
7605 return;
7606
7607 memcg = folio_memcg(old);
8cba9576
NP
7608 /*
7609 * Note that it is normal to see !memcg for a hugetlb folio.
7610 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7611 * was not selected.
7612 */
7613 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
85ce2c51
NP
7614 if (!memcg)
7615 return;
7616
7617 /* Transfer the charge and the css ref */
7618 commit_charge(new, memcg);
9bcef597
BW
7619 /*
7620 * If the old folio is a large folio and is in the split queue, it needs
7621 * to be removed from the split queue now, in case getting an incorrect
7622 * split queue in destroy_large_folio() after the memcg of the old folio
7623 * is cleared.
7624 *
7625 * In addition, the old folio is about to be freed after migration, so
7626 * removing from the split queue a bit earlier seems reasonable.
7627 */
7628 if (folio_test_large(old) && folio_test_large_rmappable(old))
7629 folio_undo_large_rmappable(old);
85ce2c51
NP
7630 old->memcg_data = 0;
7631}
7632
ef12947c 7633DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
11092087
JW
7634EXPORT_SYMBOL(memcg_sockets_enabled_key);
7635
2d758073 7636void mem_cgroup_sk_alloc(struct sock *sk)
11092087
JW
7637{
7638 struct mem_cgroup *memcg;
7639
2d758073
JW
7640 if (!mem_cgroup_sockets_enabled)
7641 return;
7642
e876ecc6 7643 /* Do not associate the sock with unrelated interrupted task's memcg. */
086f694a 7644 if (!in_task())
e876ecc6
SB
7645 return;
7646
11092087
JW
7647 rcu_read_lock();
7648 memcg = mem_cgroup_from_task(current);
7848ed62 7649 if (mem_cgroup_is_root(memcg))
f7e1cb6e 7650 goto out;
0db15298 7651 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
f7e1cb6e 7652 goto out;
8965aa28 7653 if (css_tryget(&memcg->css))
11092087 7654 sk->sk_memcg = memcg;
f7e1cb6e 7655out:
11092087
JW
7656 rcu_read_unlock();
7657}
11092087 7658
2d758073 7659void mem_cgroup_sk_free(struct sock *sk)
11092087 7660{
2d758073
JW
7661 if (sk->sk_memcg)
7662 css_put(&sk->sk_memcg->css);
11092087
JW
7663}
7664
7665/**
7666 * mem_cgroup_charge_skmem - charge socket memory
7667 * @memcg: memcg to charge
7668 * @nr_pages: number of pages to charge
4b1327be 7669 * @gfp_mask: reclaim mode
11092087
JW
7670 *
7671 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4b1327be 7672 * @memcg's configured limit, %false if it doesn't.
11092087 7673 */
4b1327be
WW
7674bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7675 gfp_t gfp_mask)
11092087 7676{
f7e1cb6e 7677 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7678 struct page_counter *fail;
f7e1cb6e 7679
0db15298
JW
7680 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7681 memcg->tcpmem_pressure = 0;
f7e1cb6e
JW
7682 return true;
7683 }
0db15298 7684 memcg->tcpmem_pressure = 1;
4b1327be
WW
7685 if (gfp_mask & __GFP_NOFAIL) {
7686 page_counter_charge(&memcg->tcpmem, nr_pages);
7687 return true;
7688 }
f7e1cb6e 7689 return false;
11092087 7690 }
d886f4e4 7691
4b1327be
WW
7692 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7693 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
f7e1cb6e 7694 return true;
4b1327be 7695 }
f7e1cb6e 7696
11092087
JW
7697 return false;
7698}
7699
7700/**
7701 * mem_cgroup_uncharge_skmem - uncharge socket memory
b7701a5f
MR
7702 * @memcg: memcg to uncharge
7703 * @nr_pages: number of pages to uncharge
11092087
JW
7704 */
7705void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7706{
f7e1cb6e 7707 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
0db15298 7708 page_counter_uncharge(&memcg->tcpmem, nr_pages);
f7e1cb6e
JW
7709 return;
7710 }
d886f4e4 7711
c9019e9b 7712 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
b2807f07 7713
475d0487 7714 refill_stock(memcg, nr_pages);
11092087
JW
7715}
7716
f7e1cb6e
JW
7717static int __init cgroup_memory(char *s)
7718{
7719 char *token;
7720
7721 while ((token = strsep(&s, ",")) != NULL) {
7722 if (!*token)
7723 continue;
7724 if (!strcmp(token, "nosocket"))
7725 cgroup_memory_nosocket = true;
04823c83
VD
7726 if (!strcmp(token, "nokmem"))
7727 cgroup_memory_nokmem = true;
b6c1a8af
YS
7728 if (!strcmp(token, "nobpf"))
7729 cgroup_memory_nobpf = true;
f7e1cb6e 7730 }
460a79e1 7731 return 1;
f7e1cb6e
JW
7732}
7733__setup("cgroup.memory=", cgroup_memory);
11092087 7734
2d11085e 7735/*
1081312f
MH
7736 * subsys_initcall() for memory controller.
7737 *
308167fc
SAS
7738 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7739 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7740 * basically everything that doesn't depend on a specific mem_cgroup structure
7741 * should be initialized from here.
2d11085e
MH
7742 */
7743static int __init mem_cgroup_init(void)
7744{
95a045f6
JW
7745 int cpu, node;
7746
f3344adf
MS
7747 /*
7748 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7749 * used for per-memcg-per-cpu caching of per-node statistics. In order
7750 * to work fine, we should make sure that the overfill threshold can't
7751 * exceed S32_MAX / PAGE_SIZE.
7752 */
7753 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7754
308167fc
SAS
7755 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7756 memcg_hotplug_cpu_dead);
95a045f6
JW
7757
7758 for_each_possible_cpu(cpu)
7759 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7760 drain_local_stock);
7761
7762 for_each_node(node) {
7763 struct mem_cgroup_tree_per_node *rtpn;
95a045f6 7764
91f0dcce 7765 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
95a045f6 7766
ef8f2327 7767 rtpn->rb_root = RB_ROOT;
fa90b2fd 7768 rtpn->rb_rightmost = NULL;
ef8f2327 7769 spin_lock_init(&rtpn->lock);
95a045f6
JW
7770 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7771 }
7772
2d11085e
MH
7773 return 0;
7774}
7775subsys_initcall(mem_cgroup_init);
21afa38e 7776
e55b9f96 7777#ifdef CONFIG_SWAP
358c07fc
AB
7778static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7779{
1c2d479a 7780 while (!refcount_inc_not_zero(&memcg->id.ref)) {
358c07fc
AB
7781 /*
7782 * The root cgroup cannot be destroyed, so it's refcount must
7783 * always be >= 1.
7784 */
7848ed62 7785 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
358c07fc
AB
7786 VM_BUG_ON(1);
7787 break;
7788 }
7789 memcg = parent_mem_cgroup(memcg);
7790 if (!memcg)
7791 memcg = root_mem_cgroup;
7792 }
7793 return memcg;
7794}
7795
21afa38e
JW
7796/**
7797 * mem_cgroup_swapout - transfer a memsw charge to swap
3ecb0087 7798 * @folio: folio whose memsw charge to transfer
21afa38e
JW
7799 * @entry: swap entry to move the charge to
7800 *
3ecb0087 7801 * Transfer the memsw charge of @folio to @entry.
21afa38e 7802 */
3ecb0087 7803void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
21afa38e 7804{
1f47b61f 7805 struct mem_cgroup *memcg, *swap_memcg;
d6810d73 7806 unsigned int nr_entries;
21afa38e
JW
7807 unsigned short oldid;
7808
3ecb0087
MWO
7809 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7810 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
21afa38e 7811
76358ab5
AS
7812 if (mem_cgroup_disabled())
7813 return;
7814
b94c4e94 7815 if (!do_memsw_account())
21afa38e
JW
7816 return;
7817
3ecb0087 7818 memcg = folio_memcg(folio);
21afa38e 7819
3ecb0087 7820 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
21afa38e
JW
7821 if (!memcg)
7822 return;
7823
1f47b61f
VD
7824 /*
7825 * In case the memcg owning these pages has been offlined and doesn't
7826 * have an ID allocated to it anymore, charge the closest online
7827 * ancestor for the swap instead and transfer the memory+swap charge.
7828 */
7829 swap_memcg = mem_cgroup_id_get_online(memcg);
3ecb0087 7830 nr_entries = folio_nr_pages(folio);
d6810d73
YH
7831 /* Get references for the tail pages, too */
7832 if (nr_entries > 1)
7833 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7834 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7835 nr_entries);
3ecb0087 7836 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 7837 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
21afa38e 7838
3ecb0087 7839 folio->memcg_data = 0;
21afa38e
JW
7840
7841 if (!mem_cgroup_is_root(memcg))
d6810d73 7842 page_counter_uncharge(&memcg->memory, nr_entries);
21afa38e 7843
b25806dc 7844 if (memcg != swap_memcg) {
1f47b61f 7845 if (!mem_cgroup_is_root(swap_memcg))
d6810d73
YH
7846 page_counter_charge(&swap_memcg->memsw, nr_entries);
7847 page_counter_uncharge(&memcg->memsw, nr_entries);
1f47b61f
VD
7848 }
7849
ce9ce665
SAS
7850 /*
7851 * Interrupts should be disabled here because the caller holds the
b93b0163 7852 * i_pages lock which is taken with interrupts-off. It is
ce9ce665 7853 * important here to have the interrupts disabled because it is the
b93b0163 7854 * only synchronisation we have for updating the per-CPU variables.
ce9ce665 7855 */
be3e67b5 7856 memcg_stats_lock();
6e0110c2 7857 mem_cgroup_charge_statistics(memcg, -nr_entries);
be3e67b5 7858 memcg_stats_unlock();
3ecb0087 7859 memcg_check_events(memcg, folio_nid(folio));
73f576c0 7860
1a3e1f40 7861 css_put(&memcg->css);
21afa38e
JW
7862}
7863
38d8b4e6 7864/**
e2e3fdc7
MWO
7865 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7866 * @folio: folio being added to swap
37e84351
VD
7867 * @entry: swap entry to charge
7868 *
e2e3fdc7 7869 * Try to charge @folio's memcg for the swap space at @entry.
37e84351
VD
7870 *
7871 * Returns 0 on success, -ENOMEM on failure.
7872 */
e2e3fdc7 7873int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
37e84351 7874{
e2e3fdc7 7875 unsigned int nr_pages = folio_nr_pages(folio);
37e84351 7876 struct page_counter *counter;
38d8b4e6 7877 struct mem_cgroup *memcg;
37e84351
VD
7878 unsigned short oldid;
7879
b94c4e94 7880 if (do_memsw_account())
37e84351
VD
7881 return 0;
7882
e2e3fdc7 7883 memcg = folio_memcg(folio);
37e84351 7884
e2e3fdc7 7885 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
37e84351
VD
7886 if (!memcg)
7887 return 0;
7888
f3a53a3a
TH
7889 if (!entry.val) {
7890 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
bb98f2c5 7891 return 0;
f3a53a3a 7892 }
bb98f2c5 7893
1f47b61f
VD
7894 memcg = mem_cgroup_id_get_online(memcg);
7895
b25806dc 7896 if (!mem_cgroup_is_root(memcg) &&
38d8b4e6 7897 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
f3a53a3a
TH
7898 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7899 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
1f47b61f 7900 mem_cgroup_id_put(memcg);
37e84351 7901 return -ENOMEM;
1f47b61f 7902 }
37e84351 7903
38d8b4e6
YH
7904 /* Get references for the tail pages, too */
7905 if (nr_pages > 1)
7906 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7907 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
e2e3fdc7 7908 VM_BUG_ON_FOLIO(oldid, folio);
c9019e9b 7909 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
37e84351 7910
37e84351
VD
7911 return 0;
7912}
7913
21afa38e 7914/**
01c4b28c 7915 * __mem_cgroup_uncharge_swap - uncharge swap space
21afa38e 7916 * @entry: swap entry to uncharge
38d8b4e6 7917 * @nr_pages: the amount of swap space to uncharge
21afa38e 7918 */
01c4b28c 7919void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
21afa38e
JW
7920{
7921 struct mem_cgroup *memcg;
7922 unsigned short id;
7923
38d8b4e6 7924 id = swap_cgroup_record(entry, 0, nr_pages);
21afa38e 7925 rcu_read_lock();
adbe427b 7926 memcg = mem_cgroup_from_id(id);
21afa38e 7927 if (memcg) {
b25806dc 7928 if (!mem_cgroup_is_root(memcg)) {
b94c4e94 7929 if (do_memsw_account())
38d8b4e6 7930 page_counter_uncharge(&memcg->memsw, nr_pages);
b94c4e94
JW
7931 else
7932 page_counter_uncharge(&memcg->swap, nr_pages);
37e84351 7933 }
c9019e9b 7934 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
38d8b4e6 7935 mem_cgroup_id_put_many(memcg, nr_pages);
21afa38e
JW
7936 }
7937 rcu_read_unlock();
7938}
7939
d8b38438
VD
7940long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7941{
7942 long nr_swap_pages = get_nr_swap_pages();
7943
b25806dc 7944 if (mem_cgroup_disabled() || do_memsw_account())
d8b38438 7945 return nr_swap_pages;
7848ed62 7946 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
d8b38438 7947 nr_swap_pages = min_t(long, nr_swap_pages,
bbec2e15 7948 READ_ONCE(memcg->swap.max) -
d8b38438
VD
7949 page_counter_read(&memcg->swap));
7950 return nr_swap_pages;
7951}
7952
9202d527 7953bool mem_cgroup_swap_full(struct folio *folio)
5ccc5aba
VD
7954{
7955 struct mem_cgroup *memcg;
7956
9202d527 7957 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5ccc5aba
VD
7958
7959 if (vm_swap_full())
7960 return true;
b25806dc 7961 if (do_memsw_account())
5ccc5aba
VD
7962 return false;
7963
9202d527 7964 memcg = folio_memcg(folio);
5ccc5aba
VD
7965 if (!memcg)
7966 return false;
7967
7848ed62 7968 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
4b82ab4f
JK
7969 unsigned long usage = page_counter_read(&memcg->swap);
7970
7971 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7972 usage * 2 >= READ_ONCE(memcg->swap.max))
5ccc5aba 7973 return true;
4b82ab4f 7974 }
5ccc5aba
VD
7975
7976 return false;
7977}
7978
eccb52e7 7979static int __init setup_swap_account(char *s)
21afa38e 7980{
118642d7
JW
7981 bool res;
7982
7983 if (!kstrtobool(s, &res) && !res)
7984 pr_warn_once("The swapaccount=0 commandline option is deprecated "
7985 "in favor of configuring swap control via cgroupfs. "
7986 "Please report your usecase to [email protected] if you "
7987 "depend on this functionality.\n");
21afa38e
JW
7988 return 1;
7989}
eccb52e7 7990__setup("swapaccount=", setup_swap_account);
21afa38e 7991
37e84351
VD
7992static u64 swap_current_read(struct cgroup_subsys_state *css,
7993 struct cftype *cft)
7994{
7995 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7996
7997 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7998}
7999
e0e0b412
LD
8000static u64 swap_peak_read(struct cgroup_subsys_state *css,
8001 struct cftype *cft)
8002{
8003 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8004
8005 return (u64)memcg->swap.watermark * PAGE_SIZE;
8006}
8007
4b82ab4f
JK
8008static int swap_high_show(struct seq_file *m, void *v)
8009{
8010 return seq_puts_memcg_tunable(m,
8011 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8012}
8013
8014static ssize_t swap_high_write(struct kernfs_open_file *of,
8015 char *buf, size_t nbytes, loff_t off)
8016{
8017 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8018 unsigned long high;
8019 int err;
8020
8021 buf = strstrip(buf);
8022 err = page_counter_memparse(buf, "max", &high);
8023 if (err)
8024 return err;
8025
8026 page_counter_set_high(&memcg->swap, high);
8027
8028 return nbytes;
8029}
8030
37e84351
VD
8031static int swap_max_show(struct seq_file *m, void *v)
8032{
677dc973
CD
8033 return seq_puts_memcg_tunable(m,
8034 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
37e84351
VD
8035}
8036
8037static ssize_t swap_max_write(struct kernfs_open_file *of,
8038 char *buf, size_t nbytes, loff_t off)
8039{
8040 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8041 unsigned long max;
8042 int err;
8043
8044 buf = strstrip(buf);
8045 err = page_counter_memparse(buf, "max", &max);
8046 if (err)
8047 return err;
8048
be09102b 8049 xchg(&memcg->swap.max, max);
37e84351
VD
8050
8051 return nbytes;
8052}
8053
f3a53a3a
TH
8054static int swap_events_show(struct seq_file *m, void *v)
8055{
aa9694bb 8056 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
f3a53a3a 8057
4b82ab4f
JK
8058 seq_printf(m, "high %lu\n",
8059 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
f3a53a3a
TH
8060 seq_printf(m, "max %lu\n",
8061 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8062 seq_printf(m, "fail %lu\n",
8063 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8064
8065 return 0;
8066}
8067
37e84351
VD
8068static struct cftype swap_files[] = {
8069 {
8070 .name = "swap.current",
8071 .flags = CFTYPE_NOT_ON_ROOT,
8072 .read_u64 = swap_current_read,
8073 },
4b82ab4f
JK
8074 {
8075 .name = "swap.high",
8076 .flags = CFTYPE_NOT_ON_ROOT,
8077 .seq_show = swap_high_show,
8078 .write = swap_high_write,
8079 },
37e84351
VD
8080 {
8081 .name = "swap.max",
8082 .flags = CFTYPE_NOT_ON_ROOT,
8083 .seq_show = swap_max_show,
8084 .write = swap_max_write,
8085 },
e0e0b412
LD
8086 {
8087 .name = "swap.peak",
8088 .flags = CFTYPE_NOT_ON_ROOT,
8089 .read_u64 = swap_peak_read,
8090 },
f3a53a3a
TH
8091 {
8092 .name = "swap.events",
8093 .flags = CFTYPE_NOT_ON_ROOT,
8094 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
8095 .seq_show = swap_events_show,
8096 },
37e84351
VD
8097 { } /* terminate */
8098};
8099
eccb52e7 8100static struct cftype memsw_files[] = {
21afa38e
JW
8101 {
8102 .name = "memsw.usage_in_bytes",
8103 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8104 .read_u64 = mem_cgroup_read_u64,
8105 },
8106 {
8107 .name = "memsw.max_usage_in_bytes",
8108 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8109 .write = mem_cgroup_reset,
8110 .read_u64 = mem_cgroup_read_u64,
8111 },
8112 {
8113 .name = "memsw.limit_in_bytes",
8114 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8115 .write = mem_cgroup_write,
8116 .read_u64 = mem_cgroup_read_u64,
8117 },
8118 {
8119 .name = "memsw.failcnt",
8120 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8121 .write = mem_cgroup_reset,
8122 .read_u64 = mem_cgroup_read_u64,
8123 },
8124 { }, /* terminate */
8125};
8126
f4840ccf
JW
8127#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8128/**
8129 * obj_cgroup_may_zswap - check if this cgroup can zswap
8130 * @objcg: the object cgroup
8131 *
8132 * Check if the hierarchical zswap limit has been reached.
8133 *
8134 * This doesn't check for specific headroom, and it is not atomic
8135 * either. But with zswap, the size of the allocation is only known
be16dd76 8136 * once compression has occurred, and this optimistic pre-check avoids
f4840ccf
JW
8137 * spending cycles on compression when there is already no room left
8138 * or zswap is disabled altogether somewhere in the hierarchy.
8139 */
8140bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8141{
8142 struct mem_cgroup *memcg, *original_memcg;
8143 bool ret = true;
8144
8145 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8146 return true;
8147
8148 original_memcg = get_mem_cgroup_from_objcg(objcg);
7848ed62 8149 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
f4840ccf
JW
8150 memcg = parent_mem_cgroup(memcg)) {
8151 unsigned long max = READ_ONCE(memcg->zswap_max);
8152 unsigned long pages;
8153
8154 if (max == PAGE_COUNTER_MAX)
8155 continue;
8156 if (max == 0) {
8157 ret = false;
8158 break;
8159 }
8160
7d7ef0a4
YA
8161 /*
8162 * mem_cgroup_flush_stats() ignores small changes. Use
8163 * do_flush_stats() directly to get accurate stats for charging.
8164 */
8165 do_flush_stats(memcg);
f4840ccf
JW
8166 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8167 if (pages < max)
8168 continue;
8169 ret = false;
8170 break;
8171 }
8172 mem_cgroup_put(original_memcg);
8173 return ret;
8174}
8175
8176/**
8177 * obj_cgroup_charge_zswap - charge compression backend memory
8178 * @objcg: the object cgroup
8179 * @size: size of compressed object
8180 *
3a1060c2 8181 * This forces the charge after obj_cgroup_may_zswap() allowed
f4840ccf
JW
8182 * compression and storage in zwap for this cgroup to go ahead.
8183 */
8184void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8185{
8186 struct mem_cgroup *memcg;
8187
8188 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8189 return;
8190
8191 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8192
8193 /* PF_MEMALLOC context, charging must succeed */
8194 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8195 VM_WARN_ON_ONCE(1);
8196
8197 rcu_read_lock();
8198 memcg = obj_cgroup_memcg(objcg);
8199 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8200 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8201 rcu_read_unlock();
8202}
8203
8204/**
8205 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8206 * @objcg: the object cgroup
8207 * @size: size of compressed object
8208 *
8209 * Uncharges zswap memory on page in.
8210 */
8211void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8212{
8213 struct mem_cgroup *memcg;
8214
8215 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8216 return;
8217
8218 obj_cgroup_uncharge(objcg, size);
8219
8220 rcu_read_lock();
8221 memcg = obj_cgroup_memcg(objcg);
8222 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8223 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8224 rcu_read_unlock();
8225}
8226
501a06fe
NP
8227bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8228{
8229 /* if zswap is disabled, do not block pages going to the swapping device */
8230 return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8231}
8232
f4840ccf
JW
8233static u64 zswap_current_read(struct cgroup_subsys_state *css,
8234 struct cftype *cft)
8235{
7d7ef0a4
YA
8236 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8237
8238 mem_cgroup_flush_stats(memcg);
8239 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
f4840ccf
JW
8240}
8241
8242static int zswap_max_show(struct seq_file *m, void *v)
8243{
8244 return seq_puts_memcg_tunable(m,
8245 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8246}
8247
8248static ssize_t zswap_max_write(struct kernfs_open_file *of,
8249 char *buf, size_t nbytes, loff_t off)
8250{
8251 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8252 unsigned long max;
8253 int err;
8254
8255 buf = strstrip(buf);
8256 err = page_counter_memparse(buf, "max", &max);
8257 if (err)
8258 return err;
8259
8260 xchg(&memcg->zswap_max, max);
8261
8262 return nbytes;
8263}
8264
501a06fe
NP
8265static int zswap_writeback_show(struct seq_file *m, void *v)
8266{
8267 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8268
8269 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8270 return 0;
8271}
8272
8273static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8274 char *buf, size_t nbytes, loff_t off)
8275{
8276 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8277 int zswap_writeback;
8278 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8279
8280 if (parse_ret)
8281 return parse_ret;
8282
8283 if (zswap_writeback != 0 && zswap_writeback != 1)
8284 return -EINVAL;
8285
8286 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8287 return nbytes;
8288}
8289
f4840ccf
JW
8290static struct cftype zswap_files[] = {
8291 {
8292 .name = "zswap.current",
8293 .flags = CFTYPE_NOT_ON_ROOT,
8294 .read_u64 = zswap_current_read,
8295 },
8296 {
8297 .name = "zswap.max",
8298 .flags = CFTYPE_NOT_ON_ROOT,
8299 .seq_show = zswap_max_show,
8300 .write = zswap_max_write,
8301 },
501a06fe
NP
8302 {
8303 .name = "zswap.writeback",
8304 .seq_show = zswap_writeback_show,
8305 .write = zswap_writeback_write,
8306 },
f4840ccf
JW
8307 { } /* terminate */
8308};
8309#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8310
21afa38e
JW
8311static int __init mem_cgroup_swap_init(void)
8312{
2d1c4980 8313 if (mem_cgroup_disabled())
eccb52e7
JW
8314 return 0;
8315
8316 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8317 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
f4840ccf
JW
8318#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8319 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8320#endif
21afa38e
JW
8321 return 0;
8322}
b25806dc 8323subsys_initcall(mem_cgroup_swap_init);
21afa38e 8324
e55b9f96 8325#endif /* CONFIG_SWAP */
This page took 3.804403 seconds and 4 git commands to generate.