]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3e32cb2e JW |
2 | /* |
3 | * Lockless hierarchical page accounting & limiting | |
4 | * | |
5 | * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner | |
6 | */ | |
7 | ||
8 | #include <linux/page_counter.h> | |
9 | #include <linux/atomic.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/bug.h> | |
14 | #include <asm/page.h> | |
15 | ||
bf8d5d52 RG |
16 | static void propagate_protected_usage(struct page_counter *c, |
17 | unsigned long usage) | |
23067153 | 18 | { |
bf8d5d52 | 19 | unsigned long protected, old_protected; |
c3d53200 | 20 | unsigned long low, min; |
23067153 RG |
21 | long delta; |
22 | ||
23 | if (!c->parent) | |
24 | return; | |
25 | ||
c3d53200 CD |
26 | min = READ_ONCE(c->min); |
27 | if (min || atomic_long_read(&c->min_usage)) { | |
28 | protected = min(usage, min); | |
bf8d5d52 RG |
29 | old_protected = atomic_long_xchg(&c->min_usage, protected); |
30 | delta = protected - old_protected; | |
31 | if (delta) | |
32 | atomic_long_add(delta, &c->parent->children_min_usage); | |
33 | } | |
23067153 | 34 | |
f86b810c CD |
35 | low = READ_ONCE(c->low); |
36 | if (low || atomic_long_read(&c->low_usage)) { | |
37 | protected = min(usage, low); | |
bf8d5d52 RG |
38 | old_protected = atomic_long_xchg(&c->low_usage, protected); |
39 | delta = protected - old_protected; | |
40 | if (delta) | |
41 | atomic_long_add(delta, &c->parent->children_low_usage); | |
42 | } | |
23067153 RG |
43 | } |
44 | ||
3e32cb2e JW |
45 | /** |
46 | * page_counter_cancel - take pages out of the local counter | |
47 | * @counter: counter | |
48 | * @nr_pages: number of pages to cancel | |
3e32cb2e | 49 | */ |
64f21993 | 50 | void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) |
3e32cb2e JW |
51 | { |
52 | long new; | |
53 | ||
bbec2e15 | 54 | new = atomic_long_sub_return(nr_pages, &counter->usage); |
bf8d5d52 | 55 | propagate_protected_usage(counter, new); |
3e32cb2e JW |
56 | /* More uncharges than charges? */ |
57 | WARN_ON_ONCE(new < 0); | |
3e32cb2e JW |
58 | } |
59 | ||
60 | /** | |
61 | * page_counter_charge - hierarchically charge pages | |
62 | * @counter: counter | |
63 | * @nr_pages: number of pages to charge | |
64 | * | |
65 | * NOTE: This does not consider any configured counter limits. | |
66 | */ | |
67 | void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) | |
68 | { | |
69 | struct page_counter *c; | |
70 | ||
71 | for (c = counter; c; c = c->parent) { | |
72 | long new; | |
73 | ||
bbec2e15 | 74 | new = atomic_long_add_return(nr_pages, &c->usage); |
a6f23d14 | 75 | propagate_protected_usage(c, new); |
3e32cb2e JW |
76 | /* |
77 | * This is indeed racy, but we can live with some | |
78 | * inaccuracy in the watermark. | |
79 | */ | |
6e4bd50f QC |
80 | if (new > READ_ONCE(c->watermark)) |
81 | WRITE_ONCE(c->watermark, new); | |
3e32cb2e JW |
82 | } |
83 | } | |
84 | ||
85 | /** | |
86 | * page_counter_try_charge - try to hierarchically charge pages | |
87 | * @counter: counter | |
88 | * @nr_pages: number of pages to charge | |
89 | * @fail: points first counter to hit its limit, if any | |
90 | * | |
6071ca52 JW |
91 | * Returns %true on success, or %false and @fail if the counter or one |
92 | * of its ancestors has hit its configured limit. | |
3e32cb2e | 93 | */ |
6071ca52 JW |
94 | bool page_counter_try_charge(struct page_counter *counter, |
95 | unsigned long nr_pages, | |
96 | struct page_counter **fail) | |
3e32cb2e JW |
97 | { |
98 | struct page_counter *c; | |
99 | ||
100 | for (c = counter; c; c = c->parent) { | |
101 | long new; | |
102 | /* | |
103 | * Charge speculatively to avoid an expensive CAS. If | |
104 | * a bigger charge fails, it might falsely lock out a | |
105 | * racing smaller charge and send it into reclaim | |
106 | * early, but the error is limited to the difference | |
107 | * between the two sizes, which is less than 2M/4M in | |
108 | * case of a THP locking out a regular page charge. | |
109 | * | |
110 | * The atomic_long_add_return() implies a full memory | |
111 | * barrier between incrementing the count and reading | |
d437024e | 112 | * the limit. When racing with page_counter_set_max(), |
3e32cb2e JW |
113 | * we either see the new limit or the setter sees the |
114 | * counter has changed and retries. | |
115 | */ | |
bbec2e15 RG |
116 | new = atomic_long_add_return(nr_pages, &c->usage); |
117 | if (new > c->max) { | |
118 | atomic_long_sub(nr_pages, &c->usage); | |
a6f23d14 | 119 | propagate_protected_usage(c, new); |
3e32cb2e JW |
120 | /* |
121 | * This is racy, but we can live with some | |
6e4bd50f QC |
122 | * inaccuracy in the failcnt which is only used |
123 | * to report stats. | |
3e32cb2e | 124 | */ |
6e4bd50f | 125 | data_race(c->failcnt++); |
3e32cb2e JW |
126 | *fail = c; |
127 | goto failed; | |
128 | } | |
a6f23d14 | 129 | propagate_protected_usage(c, new); |
3e32cb2e JW |
130 | /* |
131 | * Just like with failcnt, we can live with some | |
132 | * inaccuracy in the watermark. | |
133 | */ | |
6e4bd50f QC |
134 | if (new > READ_ONCE(c->watermark)) |
135 | WRITE_ONCE(c->watermark, new); | |
3e32cb2e | 136 | } |
6071ca52 | 137 | return true; |
3e32cb2e JW |
138 | |
139 | failed: | |
140 | for (c = counter; c != *fail; c = c->parent) | |
141 | page_counter_cancel(c, nr_pages); | |
142 | ||
6071ca52 | 143 | return false; |
3e32cb2e JW |
144 | } |
145 | ||
146 | /** | |
147 | * page_counter_uncharge - hierarchically uncharge pages | |
148 | * @counter: counter | |
149 | * @nr_pages: number of pages to uncharge | |
3e32cb2e | 150 | */ |
64f21993 | 151 | void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) |
3e32cb2e JW |
152 | { |
153 | struct page_counter *c; | |
3e32cb2e | 154 | |
64f21993 JW |
155 | for (c = counter; c; c = c->parent) |
156 | page_counter_cancel(c, nr_pages); | |
3e32cb2e JW |
157 | } |
158 | ||
159 | /** | |
bbec2e15 | 160 | * page_counter_set_max - set the maximum number of pages allowed |
3e32cb2e | 161 | * @counter: counter |
bbec2e15 | 162 | * @nr_pages: limit to set |
3e32cb2e JW |
163 | * |
164 | * Returns 0 on success, -EBUSY if the current number of pages on the | |
165 | * counter already exceeds the specified limit. | |
166 | * | |
167 | * The caller must serialize invocations on the same counter. | |
168 | */ | |
bbec2e15 | 169 | int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) |
3e32cb2e JW |
170 | { |
171 | for (;;) { | |
172 | unsigned long old; | |
bbec2e15 | 173 | long usage; |
3e32cb2e JW |
174 | |
175 | /* | |
176 | * Update the limit while making sure that it's not | |
177 | * below the concurrently-changing counter value. | |
178 | * | |
179 | * The xchg implies two full memory barriers before | |
180 | * and after, so the read-swap-read is ordered and | |
181 | * ensures coherency with page_counter_try_charge(): | |
182 | * that function modifies the count before checking | |
183 | * the limit, so if it sees the old limit, we see the | |
184 | * modified counter and retry. | |
185 | */ | |
13064781 | 186 | usage = page_counter_read(counter); |
3e32cb2e | 187 | |
bbec2e15 | 188 | if (usage > nr_pages) |
3e32cb2e JW |
189 | return -EBUSY; |
190 | ||
bbec2e15 | 191 | old = xchg(&counter->max, nr_pages); |
3e32cb2e | 192 | |
13064781 | 193 | if (page_counter_read(counter) <= usage) |
3e32cb2e JW |
194 | return 0; |
195 | ||
bbec2e15 | 196 | counter->max = old; |
3e32cb2e JW |
197 | cond_resched(); |
198 | } | |
199 | } | |
200 | ||
bf8d5d52 RG |
201 | /** |
202 | * page_counter_set_min - set the amount of protected memory | |
203 | * @counter: counter | |
204 | * @nr_pages: value to set | |
205 | * | |
206 | * The caller must serialize invocations on the same counter. | |
207 | */ | |
208 | void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) | |
209 | { | |
210 | struct page_counter *c; | |
211 | ||
c3d53200 | 212 | WRITE_ONCE(counter->min, nr_pages); |
bf8d5d52 RG |
213 | |
214 | for (c = counter; c; c = c->parent) | |
215 | propagate_protected_usage(c, atomic_long_read(&c->usage)); | |
216 | } | |
217 | ||
23067153 RG |
218 | /** |
219 | * page_counter_set_low - set the amount of protected memory | |
220 | * @counter: counter | |
221 | * @nr_pages: value to set | |
222 | * | |
223 | * The caller must serialize invocations on the same counter. | |
224 | */ | |
225 | void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) | |
226 | { | |
227 | struct page_counter *c; | |
228 | ||
f86b810c | 229 | WRITE_ONCE(counter->low, nr_pages); |
23067153 RG |
230 | |
231 | for (c = counter; c; c = c->parent) | |
bf8d5d52 | 232 | propagate_protected_usage(c, atomic_long_read(&c->usage)); |
23067153 RG |
233 | } |
234 | ||
3e32cb2e JW |
235 | /** |
236 | * page_counter_memparse - memparse() for page counter limits | |
237 | * @buf: string to parse | |
650c5e56 | 238 | * @max: string meaning maximum possible value |
3e32cb2e JW |
239 | * @nr_pages: returns the result in number of pages |
240 | * | |
241 | * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be | |
242 | * limited to %PAGE_COUNTER_MAX. | |
243 | */ | |
650c5e56 JW |
244 | int page_counter_memparse(const char *buf, const char *max, |
245 | unsigned long *nr_pages) | |
3e32cb2e | 246 | { |
3e32cb2e JW |
247 | char *end; |
248 | u64 bytes; | |
249 | ||
650c5e56 | 250 | if (!strcmp(buf, max)) { |
3e32cb2e JW |
251 | *nr_pages = PAGE_COUNTER_MAX; |
252 | return 0; | |
253 | } | |
254 | ||
255 | bytes = memparse(buf, &end); | |
256 | if (*end != '\0') | |
257 | return -EINVAL; | |
258 | ||
259 | *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); | |
260 | ||
261 | return 0; | |
262 | } |