]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/mlock.c | |
4 | * | |
5 | * (C) Copyright 1995 Linus Torvalds | |
6 | * (C) Copyright 2002 Christoph Hellwig | |
7 | */ | |
8 | ||
c59ede7b | 9 | #include <linux/capability.h> |
1da177e4 LT |
10 | #include <linux/mman.h> |
11 | #include <linux/mm.h> | |
8703e8a4 | 12 | #include <linux/sched/user.h> |
b291f000 NP |
13 | #include <linux/swap.h> |
14 | #include <linux/swapops.h> | |
15 | #include <linux/pagemap.h> | |
7225522b | 16 | #include <linux/pagevec.h> |
34b67923 | 17 | #include <linux/pagewalk.h> |
1da177e4 LT |
18 | #include <linux/mempolicy.h> |
19 | #include <linux/syscalls.h> | |
e8edc6e0 | 20 | #include <linux/sched.h> |
b95f1b31 | 21 | #include <linux/export.h> |
b291f000 NP |
22 | #include <linux/rmap.h> |
23 | #include <linux/mmzone.h> | |
24 | #include <linux/hugetlb.h> | |
7225522b VB |
25 | #include <linux/memcontrol.h> |
26 | #include <linux/mm_inline.h> | |
1507f512 | 27 | #include <linux/secretmem.h> |
b291f000 NP |
28 | |
29 | #include "internal.h" | |
1da177e4 | 30 | |
adb11e78 SAS |
31 | struct mlock_pvec { |
32 | local_lock_t lock; | |
33 | struct pagevec vec; | |
34 | }; | |
35 | ||
36 | static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = { | |
37 | .lock = INIT_LOCAL_LOCK(lock), | |
38 | }; | |
2fbb0c10 | 39 | |
7f43add4 | 40 | bool can_do_mlock(void) |
e8edc6e0 | 41 | { |
59e99e5b | 42 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
7f43add4 | 43 | return true; |
a5a6579d | 44 | if (capable(CAP_IPC_LOCK)) |
7f43add4 WX |
45 | return true; |
46 | return false; | |
e8edc6e0 AD |
47 | } |
48 | EXPORT_SYMBOL(can_do_mlock); | |
1da177e4 | 49 | |
b291f000 NP |
50 | /* |
51 | * Mlocked pages are marked with PageMlocked() flag for efficient testing | |
52 | * in vmscan and, possibly, the fault path; and to support semi-accurate | |
53 | * statistics. | |
54 | * | |
55 | * An mlocked page [PageMlocked(page)] is unevictable. As such, it will | |
56 | * be placed on the LRU "unevictable" list, rather than the [in]active lists. | |
57 | * The unevictable list is an LRU sibling list to the [in]active lists. | |
58 | * PageUnevictable is set to indicate the unevictable state. | |
b291f000 NP |
59 | */ |
60 | ||
2fbb0c10 | 61 | static struct lruvec *__mlock_page(struct page *page, struct lruvec *lruvec) |
b291f000 | 62 | { |
2fbb0c10 HD |
63 | /* There is nothing more we can do while it's off LRU */ |
64 | if (!TestClearPageLRU(page)) | |
65 | return lruvec; | |
0964730b | 66 | |
2fbb0c10 | 67 | lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec); |
b291f000 | 68 | |
2fbb0c10 | 69 | if (unlikely(page_evictable(page))) { |
b291f000 | 70 | /* |
2fbb0c10 HD |
71 | * This is a little surprising, but quite possible: |
72 | * PageMlocked must have got cleared already by another CPU. | |
73 | * Could this page be on the Unevictable LRU? I'm not sure, | |
74 | * but move it now if so. | |
b291f000 | 75 | */ |
2fbb0c10 HD |
76 | if (PageUnevictable(page)) { |
77 | del_page_from_lru_list(page, lruvec); | |
78 | ClearPageUnevictable(page); | |
79 | add_page_to_lru_list(page, lruvec); | |
80 | __count_vm_events(UNEVICTABLE_PGRESCUED, | |
81 | thp_nr_pages(page)); | |
82 | } | |
83 | goto out; | |
b291f000 | 84 | } |
07ca7606 | 85 | |
07ca7606 | 86 | if (PageUnevictable(page)) { |
2fbb0c10 HD |
87 | if (PageMlocked(page)) |
88 | page->mlock_count++; | |
07ca7606 | 89 | goto out; |
5344b7e6 | 90 | } |
07ca7606 HD |
91 | |
92 | del_page_from_lru_list(page, lruvec); | |
93 | ClearPageActive(page); | |
94 | SetPageUnevictable(page); | |
2fbb0c10 | 95 | page->mlock_count = !!PageMlocked(page); |
07ca7606 | 96 | add_page_to_lru_list(page, lruvec); |
2fbb0c10 | 97 | __count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page)); |
07ca7606 HD |
98 | out: |
99 | SetPageLRU(page); | |
2fbb0c10 | 100 | return lruvec; |
b291f000 NP |
101 | } |
102 | ||
2fbb0c10 | 103 | static struct lruvec *__mlock_new_page(struct page *page, struct lruvec *lruvec) |
b291f000 | 104 | { |
2fbb0c10 | 105 | VM_BUG_ON_PAGE(PageLRU(page), page); |
b291f000 | 106 | |
2fbb0c10 | 107 | lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec); |
e90309c9 | 108 | |
2fbb0c10 HD |
109 | /* As above, this is a little surprising, but possible */ |
110 | if (unlikely(page_evictable(page))) | |
111 | goto out; | |
0964730b | 112 | |
2fbb0c10 HD |
113 | SetPageUnevictable(page); |
114 | page->mlock_count = !!PageMlocked(page); | |
115 | __count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page)); | |
116 | out: | |
117 | add_page_to_lru_list(page, lruvec); | |
118 | SetPageLRU(page); | |
119 | return lruvec; | |
b291f000 NP |
120 | } |
121 | ||
2fbb0c10 | 122 | static struct lruvec *__munlock_page(struct page *page, struct lruvec *lruvec) |
7225522b | 123 | { |
07ca7606 | 124 | int nr_pages = thp_nr_pages(page); |
2fbb0c10 HD |
125 | bool isolated = false; |
126 | ||
127 | if (!TestClearPageLRU(page)) | |
128 | goto munlock; | |
7225522b | 129 | |
2fbb0c10 HD |
130 | isolated = true; |
131 | lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec); | |
7225522b | 132 | |
2fbb0c10 | 133 | if (PageUnevictable(page)) { |
07ca7606 HD |
134 | /* Then mlock_count is maintained, but might undercount */ |
135 | if (page->mlock_count) | |
136 | page->mlock_count--; | |
137 | if (page->mlock_count) | |
138 | goto out; | |
139 | } | |
140 | /* else assume that was the last mlock: reclaim will fix it if not */ | |
141 | ||
2fbb0c10 | 142 | munlock: |
ebcbc6ea | 143 | if (TestClearPageMlocked(page)) { |
07ca7606 | 144 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
2fbb0c10 | 145 | if (isolated || !PageUnevictable(page)) |
07ca7606 HD |
146 | __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); |
147 | else | |
148 | __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); | |
149 | } | |
150 | ||
151 | /* page_evictable() has to be checked *after* clearing Mlocked */ | |
2fbb0c10 | 152 | if (isolated && PageUnevictable(page) && page_evictable(page)) { |
07ca7606 HD |
153 | del_page_from_lru_list(page, lruvec); |
154 | ClearPageUnevictable(page); | |
155 | add_page_to_lru_list(page, lruvec); | |
156 | __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); | |
7225522b | 157 | } |
07ca7606 | 158 | out: |
2fbb0c10 HD |
159 | if (isolated) |
160 | SetPageLRU(page); | |
161 | return lruvec; | |
7225522b VB |
162 | } |
163 | ||
164 | /* | |
2fbb0c10 | 165 | * Flags held in the low bits of a struct page pointer on the mlock_pvec. |
7225522b | 166 | */ |
2fbb0c10 HD |
167 | #define LRU_PAGE 0x1 |
168 | #define NEW_PAGE 0x2 | |
169 | static inline struct page *mlock_lru(struct page *page) | |
7225522b | 170 | { |
2fbb0c10 HD |
171 | return (struct page *)((unsigned long)page + LRU_PAGE); |
172 | } | |
0964730b | 173 | |
2fbb0c10 HD |
174 | static inline struct page *mlock_new(struct page *page) |
175 | { | |
176 | return (struct page *)((unsigned long)page + NEW_PAGE); | |
7225522b VB |
177 | } |
178 | ||
2fbb0c10 HD |
179 | /* |
180 | * mlock_pagevec() is derived from pagevec_lru_move_fn(): | |
181 | * perhaps that can make use of such page pointer flags in future, | |
182 | * but for now just keep it for mlock. We could use three separate | |
183 | * pagevecs instead, but one feels better (munlocking a full pagevec | |
184 | * does not need to drain mlocking pagevecs first). | |
b291f000 | 185 | */ |
2fbb0c10 | 186 | static void mlock_pagevec(struct pagevec *pvec) |
b291f000 | 187 | { |
2fbb0c10 HD |
188 | struct lruvec *lruvec = NULL; |
189 | unsigned long mlock; | |
190 | struct page *page; | |
191 | int i; | |
e90309c9 | 192 | |
2fbb0c10 HD |
193 | for (i = 0; i < pagevec_count(pvec); i++) { |
194 | page = pvec->pages[i]; | |
195 | mlock = (unsigned long)page & (LRU_PAGE | NEW_PAGE); | |
196 | page = (struct page *)((unsigned long)page - mlock); | |
197 | pvec->pages[i] = page; | |
198 | ||
199 | if (mlock & LRU_PAGE) | |
200 | lruvec = __mlock_page(page, lruvec); | |
201 | else if (mlock & NEW_PAGE) | |
202 | lruvec = __mlock_new_page(page, lruvec); | |
203 | else | |
204 | lruvec = __munlock_page(page, lruvec); | |
655548bf | 205 | } |
01cc2e58 | 206 | |
2fbb0c10 HD |
207 | if (lruvec) |
208 | unlock_page_lruvec_irq(lruvec); | |
209 | release_pages(pvec->pages, pvec->nr); | |
210 | pagevec_reinit(pvec); | |
211 | } | |
01cc2e58 | 212 | |
adb11e78 SAS |
213 | void mlock_page_drain_local(void) |
214 | { | |
215 | struct pagevec *pvec; | |
216 | ||
217 | local_lock(&mlock_pvec.lock); | |
218 | pvec = this_cpu_ptr(&mlock_pvec.vec); | |
219 | if (pagevec_count(pvec)) | |
220 | mlock_pagevec(pvec); | |
221 | local_unlock(&mlock_pvec.lock); | |
222 | } | |
223 | ||
224 | void mlock_page_drain_remote(int cpu) | |
2fbb0c10 HD |
225 | { |
226 | struct pagevec *pvec; | |
01cc2e58 | 227 | |
adb11e78 SAS |
228 | WARN_ON_ONCE(cpu_online(cpu)); |
229 | pvec = &per_cpu(mlock_pvec.vec, cpu); | |
2fbb0c10 HD |
230 | if (pagevec_count(pvec)) |
231 | mlock_pagevec(pvec); | |
b291f000 NP |
232 | } |
233 | ||
2fbb0c10 | 234 | bool need_mlock_page_drain(int cpu) |
9978ad58 | 235 | { |
adb11e78 | 236 | return pagevec_count(&per_cpu(mlock_pvec.vec, cpu)); |
b291f000 NP |
237 | } |
238 | ||
2fbb0c10 | 239 | /** |
dcc5d337 MWO |
240 | * mlock_folio - mlock a folio already on (or temporarily off) LRU |
241 | * @folio: folio to be mlocked. | |
56afe477 | 242 | */ |
dcc5d337 | 243 | void mlock_folio(struct folio *folio) |
56afe477 | 244 | { |
adb11e78 SAS |
245 | struct pagevec *pvec; |
246 | ||
247 | local_lock(&mlock_pvec.lock); | |
248 | pvec = this_cpu_ptr(&mlock_pvec.vec); | |
56afe477 | 249 | |
dcc5d337 MWO |
250 | if (!folio_test_set_mlocked(folio)) { |
251 | int nr_pages = folio_nr_pages(folio); | |
2fbb0c10 | 252 | |
dcc5d337 | 253 | zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); |
2fbb0c10 | 254 | __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); |
56afe477 VB |
255 | } |
256 | ||
dcc5d337 MWO |
257 | folio_get(folio); |
258 | if (!pagevec_add(pvec, mlock_lru(&folio->page)) || | |
259 | folio_test_large(folio) || lru_cache_disabled()) | |
2fbb0c10 | 260 | mlock_pagevec(pvec); |
adb11e78 | 261 | local_unlock(&mlock_pvec.lock); |
56afe477 VB |
262 | } |
263 | ||
2fbb0c10 HD |
264 | /** |
265 | * mlock_new_page - mlock a newly allocated page not yet on LRU | |
266 | * @page: page to be mlocked, either a normal page or a THP head. | |
56afe477 | 267 | */ |
2fbb0c10 | 268 | void mlock_new_page(struct page *page) |
56afe477 | 269 | { |
adb11e78 | 270 | struct pagevec *pvec; |
2fbb0c10 HD |
271 | int nr_pages = thp_nr_pages(page); |
272 | ||
adb11e78 SAS |
273 | local_lock(&mlock_pvec.lock); |
274 | pvec = this_cpu_ptr(&mlock_pvec.vec); | |
2fbb0c10 HD |
275 | SetPageMlocked(page); |
276 | mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); | |
277 | __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); | |
278 | ||
279 | get_page(page); | |
280 | if (!pagevec_add(pvec, mlock_new(page)) || | |
281 | PageHead(page) || lru_cache_disabled()) | |
282 | mlock_pagevec(pvec); | |
adb11e78 | 283 | local_unlock(&mlock_pvec.lock); |
56afe477 VB |
284 | } |
285 | ||
2fbb0c10 HD |
286 | /** |
287 | * munlock_page - munlock a page | |
288 | * @page: page to be munlocked, either a normal page or a THP head. | |
7225522b | 289 | */ |
2fbb0c10 | 290 | void munlock_page(struct page *page) |
7225522b | 291 | { |
adb11e78 | 292 | struct pagevec *pvec; |
56afe477 | 293 | |
adb11e78 SAS |
294 | local_lock(&mlock_pvec.lock); |
295 | pvec = this_cpu_ptr(&mlock_pvec.vec); | |
5b40998a | 296 | /* |
2fbb0c10 HD |
297 | * TestClearPageMlocked(page) must be left to __munlock_page(), |
298 | * which will check whether the page is multiply mlocked. | |
5b40998a | 299 | */ |
2fbb0c10 HD |
300 | |
301 | get_page(page); | |
302 | if (!pagevec_add(pvec, page) || | |
303 | PageHead(page) || lru_cache_disabled()) | |
304 | mlock_pagevec(pvec); | |
adb11e78 | 305 | local_unlock(&mlock_pvec.lock); |
7a8010cd VB |
306 | } |
307 | ||
34b67923 HD |
308 | static int mlock_pte_range(pmd_t *pmd, unsigned long addr, |
309 | unsigned long end, struct mm_walk *walk) | |
310 | ||
7a8010cd | 311 | { |
34b67923 | 312 | struct vm_area_struct *vma = walk->vma; |
7a8010cd | 313 | spinlock_t *ptl; |
34b67923 HD |
314 | pte_t *start_pte, *pte; |
315 | struct page *page; | |
7a8010cd | 316 | |
34b67923 HD |
317 | ptl = pmd_trans_huge_lock(pmd, vma); |
318 | if (ptl) { | |
319 | if (!pmd_present(*pmd)) | |
320 | goto out; | |
321 | if (is_huge_zero_pmd(*pmd)) | |
322 | goto out; | |
323 | page = pmd_page(*pmd); | |
324 | if (vma->vm_flags & VM_LOCKED) | |
dcc5d337 | 325 | mlock_folio(page_folio(page)); |
34b67923 HD |
326 | else |
327 | munlock_page(page); | |
328 | goto out; | |
329 | } | |
56afe477 | 330 | |
34b67923 HD |
331 | start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
332 | for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { | |
333 | if (!pte_present(*pte)) | |
334 | continue; | |
335 | page = vm_normal_page(vma, addr, *pte); | |
336 | if (!page) | |
337 | continue; | |
e90309c9 | 338 | if (PageTransCompound(page)) |
34b67923 HD |
339 | continue; |
340 | if (vma->vm_flags & VM_LOCKED) | |
dcc5d337 | 341 | mlock_folio(page_folio(page)); |
34b67923 HD |
342 | else |
343 | munlock_page(page); | |
7a8010cd | 344 | } |
34b67923 HD |
345 | pte_unmap(start_pte); |
346 | out: | |
347 | spin_unlock(ptl); | |
348 | cond_resched(); | |
349 | return 0; | |
7225522b VB |
350 | } |
351 | ||
b291f000 | 352 | /* |
34b67923 HD |
353 | * mlock_vma_pages_range() - mlock any pages already in the range, |
354 | * or munlock all pages in the range. | |
355 | * @vma - vma containing range to be mlock()ed or munlock()ed | |
ba470de4 | 356 | * @start - start address in @vma of the range |
34b67923 HD |
357 | * @end - end of range in @vma |
358 | * @newflags - the new set of flags for @vma. | |
ba470de4 | 359 | * |
34b67923 HD |
360 | * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED; |
361 | * called for munlock() and munlockall(), to clear VM_LOCKED from @vma. | |
b291f000 | 362 | */ |
34b67923 HD |
363 | static void mlock_vma_pages_range(struct vm_area_struct *vma, |
364 | unsigned long start, unsigned long end, vm_flags_t newflags) | |
b291f000 | 365 | { |
34b67923 HD |
366 | static const struct mm_walk_ops mlock_walk_ops = { |
367 | .pmd_entry = mlock_pte_range, | |
368 | }; | |
408e82b7 | 369 | |
34b67923 HD |
370 | /* |
371 | * There is a slight chance that concurrent page migration, | |
372 | * or page reclaim finding a page of this now-VM_LOCKED vma, | |
373 | * will call mlock_vma_page() and raise page's mlock_count: | |
374 | * double counting, leaving the page unevictable indefinitely. | |
375 | * Communicate this danger to mlock_vma_page() with VM_IO, | |
376 | * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas. | |
377 | * mmap_lock is held in write mode here, so this weird | |
378 | * combination should not be visible to other mmap_lock users; | |
379 | * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED. | |
380 | */ | |
381 | if (newflags & VM_LOCKED) | |
382 | newflags |= VM_IO; | |
383 | WRITE_ONCE(vma->vm_flags, newflags); | |
ff6a6da6 | 384 | |
34b67923 HD |
385 | lru_add_drain(); |
386 | walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); | |
387 | lru_add_drain(); | |
408e82b7 | 388 | |
34b67923 HD |
389 | if (newflags & VM_IO) { |
390 | newflags &= ~VM_IO; | |
391 | WRITE_ONCE(vma->vm_flags, newflags); | |
408e82b7 | 392 | } |
b291f000 NP |
393 | } |
394 | ||
395 | /* | |
396 | * mlock_fixup - handle mlock[all]/munlock[all] requests. | |
397 | * | |
398 | * Filters out "special" vmas -- VM_LOCKED never gets set for these, and | |
399 | * munlock is a no-op. However, for some special vmas, we go ahead and | |
cea10a19 | 400 | * populate the ptes. |
b291f000 NP |
401 | * |
402 | * For vmas that pass the filters, merge/split as appropriate. | |
403 | */ | |
1da177e4 | 404 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, |
ca16d140 | 405 | unsigned long start, unsigned long end, vm_flags_t newflags) |
1da177e4 | 406 | { |
b291f000 | 407 | struct mm_struct *mm = vma->vm_mm; |
1da177e4 | 408 | pgoff_t pgoff; |
b291f000 | 409 | int nr_pages; |
1da177e4 | 410 | int ret = 0; |
34b67923 | 411 | vm_flags_t oldflags = vma->vm_flags; |
1da177e4 | 412 | |
34b67923 | 413 | if (newflags == oldflags || (oldflags & VM_SPECIAL) || |
e1fb4a08 | 414 | is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || |
1507f512 | 415 | vma_is_dax(vma) || vma_is_secretmem(vma)) |
b0f205c2 EM |
416 | /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ |
417 | goto out; | |
b291f000 | 418 | |
1da177e4 LT |
419 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
420 | *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, | |
19a809af | 421 | vma->vm_file, pgoff, vma_policy(vma), |
5c26f6ac | 422 | vma->vm_userfaultfd_ctx, anon_vma_name(vma)); |
1da177e4 LT |
423 | if (*prev) { |
424 | vma = *prev; | |
425 | goto success; | |
426 | } | |
427 | ||
1da177e4 LT |
428 | if (start != vma->vm_start) { |
429 | ret = split_vma(mm, vma, start, 1); | |
430 | if (ret) | |
431 | goto out; | |
432 | } | |
433 | ||
434 | if (end != vma->vm_end) { | |
435 | ret = split_vma(mm, vma, end, 0); | |
436 | if (ret) | |
437 | goto out; | |
438 | } | |
439 | ||
440 | success: | |
b291f000 NP |
441 | /* |
442 | * Keep track of amount of locked VM. | |
443 | */ | |
444 | nr_pages = (end - start) >> PAGE_SHIFT; | |
34b67923 | 445 | if (!(newflags & VM_LOCKED)) |
b291f000 | 446 | nr_pages = -nr_pages; |
34b67923 | 447 | else if (oldflags & VM_LOCKED) |
b155b4fd | 448 | nr_pages = 0; |
b291f000 NP |
449 | mm->locked_vm += nr_pages; |
450 | ||
1da177e4 | 451 | /* |
c1e8d7c6 | 452 | * vm_flags is protected by the mmap_lock held in write mode. |
1da177e4 | 453 | * It's okay if try_to_unmap_one unmaps a page just after we |
fc05f566 | 454 | * set VM_LOCKED, populate_vma_page_range will bring it back. |
1da177e4 | 455 | */ |
1da177e4 | 456 | |
34b67923 HD |
457 | if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { |
458 | /* No work to do, and mlocking twice would be wrong */ | |
408e82b7 | 459 | vma->vm_flags = newflags; |
34b67923 HD |
460 | } else { |
461 | mlock_vma_pages_range(vma, start, end, newflags); | |
462 | } | |
1da177e4 | 463 | out: |
b291f000 | 464 | *prev = vma; |
1da177e4 LT |
465 | return ret; |
466 | } | |
467 | ||
1aab92ec EM |
468 | static int apply_vma_lock_flags(unsigned long start, size_t len, |
469 | vm_flags_t flags) | |
1da177e4 LT |
470 | { |
471 | unsigned long nstart, end, tmp; | |
68d68ff6 | 472 | struct vm_area_struct *vma, *prev; |
1da177e4 LT |
473 | int error; |
474 | ||
8fd9e488 | 475 | VM_BUG_ON(offset_in_page(start)); |
fed067da | 476 | VM_BUG_ON(len != PAGE_ALIGN(len)); |
1da177e4 LT |
477 | end = start + len; |
478 | if (end < start) | |
479 | return -EINVAL; | |
480 | if (end == start) | |
481 | return 0; | |
097d5910 | 482 | vma = find_vma(current->mm, start); |
1da177e4 LT |
483 | if (!vma || vma->vm_start > start) |
484 | return -ENOMEM; | |
485 | ||
097d5910 | 486 | prev = vma->vm_prev; |
1da177e4 LT |
487 | if (start > vma->vm_start) |
488 | prev = vma; | |
489 | ||
490 | for (nstart = start ; ; ) { | |
b0f205c2 | 491 | vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; |
1da177e4 | 492 | |
1aab92ec | 493 | newflags |= flags; |
1da177e4 | 494 | |
1aab92ec | 495 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
1da177e4 LT |
496 | tmp = vma->vm_end; |
497 | if (tmp > end) | |
498 | tmp = end; | |
499 | error = mlock_fixup(vma, &prev, nstart, tmp, newflags); | |
500 | if (error) | |
501 | break; | |
502 | nstart = tmp; | |
503 | if (nstart < prev->vm_end) | |
504 | nstart = prev->vm_end; | |
505 | if (nstart >= end) | |
506 | break; | |
507 | ||
508 | vma = prev->vm_next; | |
509 | if (!vma || vma->vm_start != nstart) { | |
510 | error = -ENOMEM; | |
511 | break; | |
512 | } | |
513 | } | |
514 | return error; | |
515 | } | |
516 | ||
0cf2f6f6 SG |
517 | /* |
518 | * Go through vma areas and sum size of mlocked | |
519 | * vma pages, as return value. | |
520 | * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) | |
521 | * is also counted. | |
522 | * Return value: previously mlocked page counts | |
523 | */ | |
0874bb49 | 524 | static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, |
0cf2f6f6 SG |
525 | unsigned long start, size_t len) |
526 | { | |
527 | struct vm_area_struct *vma; | |
0874bb49 | 528 | unsigned long count = 0; |
0cf2f6f6 SG |
529 | |
530 | if (mm == NULL) | |
531 | mm = current->mm; | |
532 | ||
533 | vma = find_vma(mm, start); | |
534 | if (vma == NULL) | |
48b03eea | 535 | return 0; |
0cf2f6f6 SG |
536 | |
537 | for (; vma ; vma = vma->vm_next) { | |
538 | if (start >= vma->vm_end) | |
539 | continue; | |
540 | if (start + len <= vma->vm_start) | |
541 | break; | |
542 | if (vma->vm_flags & VM_LOCKED) { | |
543 | if (start > vma->vm_start) | |
544 | count -= (start - vma->vm_start); | |
545 | if (start + len < vma->vm_end) { | |
546 | count += start + len - vma->vm_start; | |
547 | break; | |
548 | } | |
549 | count += vma->vm_end - vma->vm_start; | |
550 | } | |
551 | } | |
552 | ||
553 | return count >> PAGE_SHIFT; | |
554 | } | |
555 | ||
ebcbc6ea HD |
556 | /* |
557 | * convert get_user_pages() return value to posix mlock() error | |
558 | */ | |
559 | static int __mlock_posix_error_return(long retval) | |
560 | { | |
561 | if (retval == -EFAULT) | |
562 | retval = -ENOMEM; | |
563 | else if (retval == -ENOMEM) | |
564 | retval = -EAGAIN; | |
565 | return retval; | |
566 | } | |
567 | ||
dc0ef0df | 568 | static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) |
1da177e4 LT |
569 | { |
570 | unsigned long locked; | |
571 | unsigned long lock_limit; | |
572 | int error = -ENOMEM; | |
573 | ||
057d3389 AK |
574 | start = untagged_addr(start); |
575 | ||
1da177e4 LT |
576 | if (!can_do_mlock()) |
577 | return -EPERM; | |
578 | ||
8fd9e488 | 579 | len = PAGE_ALIGN(len + (offset_in_page(start))); |
1da177e4 LT |
580 | start &= PAGE_MASK; |
581 | ||
59e99e5b | 582 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
1da177e4 | 583 | lock_limit >>= PAGE_SHIFT; |
1f1cd705 DB |
584 | locked = len >> PAGE_SHIFT; |
585 | ||
d8ed45c5 | 586 | if (mmap_write_lock_killable(current->mm)) |
dc0ef0df | 587 | return -EINTR; |
1f1cd705 DB |
588 | |
589 | locked += current->mm->locked_vm; | |
0cf2f6f6 SG |
590 | if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { |
591 | /* | |
592 | * It is possible that the regions requested intersect with | |
593 | * previously mlocked areas, that part area in "mm->locked_vm" | |
594 | * should not be counted to new mlock increment count. So check | |
595 | * and adjust locked count if necessary. | |
596 | */ | |
597 | locked -= count_mm_mlocked_page_nr(current->mm, | |
598 | start, len); | |
599 | } | |
1da177e4 LT |
600 | |
601 | /* check against resource limits */ | |
602 | if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) | |
1aab92ec | 603 | error = apply_vma_lock_flags(start, len, flags); |
1f1cd705 | 604 | |
d8ed45c5 | 605 | mmap_write_unlock(current->mm); |
c561259c KS |
606 | if (error) |
607 | return error; | |
608 | ||
609 | error = __mm_populate(start, len, 0); | |
610 | if (error) | |
611 | return __mlock_posix_error_return(error); | |
612 | return 0; | |
1da177e4 LT |
613 | } |
614 | ||
1aab92ec EM |
615 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) |
616 | { | |
617 | return do_mlock(start, len, VM_LOCKED); | |
618 | } | |
619 | ||
a8ca5d0e EM |
620 | SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) |
621 | { | |
b0f205c2 EM |
622 | vm_flags_t vm_flags = VM_LOCKED; |
623 | ||
624 | if (flags & ~MLOCK_ONFAULT) | |
a8ca5d0e EM |
625 | return -EINVAL; |
626 | ||
b0f205c2 EM |
627 | if (flags & MLOCK_ONFAULT) |
628 | vm_flags |= VM_LOCKONFAULT; | |
629 | ||
630 | return do_mlock(start, len, vm_flags); | |
a8ca5d0e EM |
631 | } |
632 | ||
6a6160a7 | 633 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) |
1da177e4 LT |
634 | { |
635 | int ret; | |
636 | ||
057d3389 AK |
637 | start = untagged_addr(start); |
638 | ||
8fd9e488 | 639 | len = PAGE_ALIGN(len + (offset_in_page(start))); |
1da177e4 | 640 | start &= PAGE_MASK; |
1f1cd705 | 641 | |
d8ed45c5 | 642 | if (mmap_write_lock_killable(current->mm)) |
dc0ef0df | 643 | return -EINTR; |
1aab92ec | 644 | ret = apply_vma_lock_flags(start, len, 0); |
d8ed45c5 | 645 | mmap_write_unlock(current->mm); |
1f1cd705 | 646 | |
1da177e4 LT |
647 | return ret; |
648 | } | |
649 | ||
b0f205c2 EM |
650 | /* |
651 | * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) | |
652 | * and translate into the appropriate modifications to mm->def_flags and/or the | |
653 | * flags for all current VMAs. | |
654 | * | |
655 | * There are a couple of subtleties with this. If mlockall() is called multiple | |
656 | * times with different flags, the values do not necessarily stack. If mlockall | |
657 | * is called once including the MCL_FUTURE flag and then a second time without | |
658 | * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. | |
659 | */ | |
1aab92ec | 660 | static int apply_mlockall_flags(int flags) |
1da177e4 | 661 | { |
68d68ff6 | 662 | struct vm_area_struct *vma, *prev = NULL; |
b0f205c2 | 663 | vm_flags_t to_add = 0; |
1da177e4 | 664 | |
b0f205c2 EM |
665 | current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; |
666 | if (flags & MCL_FUTURE) { | |
09a9f1d2 | 667 | current->mm->def_flags |= VM_LOCKED; |
1aab92ec | 668 | |
b0f205c2 EM |
669 | if (flags & MCL_ONFAULT) |
670 | current->mm->def_flags |= VM_LOCKONFAULT; | |
671 | ||
672 | if (!(flags & MCL_CURRENT)) | |
673 | goto out; | |
674 | } | |
675 | ||
676 | if (flags & MCL_CURRENT) { | |
677 | to_add |= VM_LOCKED; | |
678 | if (flags & MCL_ONFAULT) | |
679 | to_add |= VM_LOCKONFAULT; | |
680 | } | |
1da177e4 LT |
681 | |
682 | for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { | |
ca16d140 | 683 | vm_flags_t newflags; |
1da177e4 | 684 | |
b0f205c2 EM |
685 | newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; |
686 | newflags |= to_add; | |
1da177e4 LT |
687 | |
688 | /* Ignore errors */ | |
689 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); | |
50d4fb78 | 690 | cond_resched(); |
1da177e4 LT |
691 | } |
692 | out: | |
693 | return 0; | |
694 | } | |
695 | ||
3480b257 | 696 | SYSCALL_DEFINE1(mlockall, int, flags) |
1da177e4 LT |
697 | { |
698 | unsigned long lock_limit; | |
86d2adcc | 699 | int ret; |
1da177e4 | 700 | |
dedca635 PS |
701 | if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || |
702 | flags == MCL_ONFAULT) | |
86d2adcc | 703 | return -EINVAL; |
1da177e4 | 704 | |
1da177e4 | 705 | if (!can_do_mlock()) |
86d2adcc | 706 | return -EPERM; |
1da177e4 | 707 | |
59e99e5b | 708 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
1da177e4 LT |
709 | lock_limit >>= PAGE_SHIFT; |
710 | ||
d8ed45c5 | 711 | if (mmap_write_lock_killable(current->mm)) |
dc0ef0df | 712 | return -EINTR; |
1f1cd705 | 713 | |
dc0ef0df | 714 | ret = -ENOMEM; |
1da177e4 LT |
715 | if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || |
716 | capable(CAP_IPC_LOCK)) | |
1aab92ec | 717 | ret = apply_mlockall_flags(flags); |
d8ed45c5 | 718 | mmap_write_unlock(current->mm); |
bebeb3d6 ML |
719 | if (!ret && (flags & MCL_CURRENT)) |
720 | mm_populate(0, TASK_SIZE); | |
86d2adcc | 721 | |
1da177e4 LT |
722 | return ret; |
723 | } | |
724 | ||
3480b257 | 725 | SYSCALL_DEFINE0(munlockall) |
1da177e4 LT |
726 | { |
727 | int ret; | |
728 | ||
d8ed45c5 | 729 | if (mmap_write_lock_killable(current->mm)) |
dc0ef0df | 730 | return -EINTR; |
1aab92ec | 731 | ret = apply_mlockall_flags(0); |
d8ed45c5 | 732 | mmap_write_unlock(current->mm); |
1da177e4 LT |
733 | return ret; |
734 | } | |
735 | ||
736 | /* | |
737 | * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB | |
738 | * shm segments) get accounted against the user_struct instead. | |
739 | */ | |
740 | static DEFINE_SPINLOCK(shmlock_user_lock); | |
741 | ||
d7c9e99a | 742 | int user_shm_lock(size_t size, struct ucounts *ucounts) |
1da177e4 LT |
743 | { |
744 | unsigned long lock_limit, locked; | |
d7c9e99a | 745 | long memlock; |
1da177e4 LT |
746 | int allowed = 0; |
747 | ||
748 | locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
59e99e5b | 749 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
e97824ff ML |
750 | if (lock_limit != RLIM_INFINITY) |
751 | lock_limit >>= PAGE_SHIFT; | |
1da177e4 | 752 | spin_lock(&shmlock_user_lock); |
d7c9e99a AG |
753 | memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); |
754 | ||
e97824ff | 755 | if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { |
d7c9e99a AG |
756 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); |
757 | goto out; | |
758 | } | |
759 | if (!get_ucounts(ucounts)) { | |
760 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); | |
5c2a956c | 761 | allowed = 0; |
1da177e4 | 762 | goto out; |
d7c9e99a | 763 | } |
1da177e4 LT |
764 | allowed = 1; |
765 | out: | |
766 | spin_unlock(&shmlock_user_lock); | |
767 | return allowed; | |
768 | } | |
769 | ||
d7c9e99a | 770 | void user_shm_unlock(size_t size, struct ucounts *ucounts) |
1da177e4 LT |
771 | { |
772 | spin_lock(&shmlock_user_lock); | |
d7c9e99a | 773 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); |
1da177e4 | 774 | spin_unlock(&shmlock_user_lock); |
d7c9e99a | 775 | put_ucounts(ucounts); |
1da177e4 | 776 | } |