]> Git Repo - J-linux.git/blob - fs/proc/task_mmu.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / proc / task_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/mm_inline.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/ksm.h>
8 #include <linux/seq_file.h>
9 #include <linux/highmem.h>
10 #include <linux/ptrace.h>
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/mempolicy.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/sched/mm.h>
17 #include <linux/swapops.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/page_idle.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/pkeys.h>
23 #include <linux/minmax.h>
24 #include <linux/overflow.h>
25 #include <linux/buildid.h>
26
27 #include <asm/elf.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include "internal.h"
31
32 #define SEQ_PUT_DEC(str, val) \
33                 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
34 void task_mem(struct seq_file *m, struct mm_struct *mm)
35 {
36         unsigned long text, lib, swap, anon, file, shmem;
37         unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
38
39         anon = get_mm_counter(mm, MM_ANONPAGES);
40         file = get_mm_counter(mm, MM_FILEPAGES);
41         shmem = get_mm_counter(mm, MM_SHMEMPAGES);
42
43         /*
44          * Note: to minimize their overhead, mm maintains hiwater_vm and
45          * hiwater_rss only when about to *lower* total_vm or rss.  Any
46          * collector of these hiwater stats must therefore get total_vm
47          * and rss too, which will usually be the higher.  Barriers? not
48          * worth the effort, such snapshots can always be inconsistent.
49          */
50         hiwater_vm = total_vm = mm->total_vm;
51         if (hiwater_vm < mm->hiwater_vm)
52                 hiwater_vm = mm->hiwater_vm;
53         hiwater_rss = total_rss = anon + file + shmem;
54         if (hiwater_rss < mm->hiwater_rss)
55                 hiwater_rss = mm->hiwater_rss;
56
57         /* split executable areas between text and lib */
58         text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
59         text = min(text, mm->exec_vm << PAGE_SHIFT);
60         lib = (mm->exec_vm << PAGE_SHIFT) - text;
61
62         swap = get_mm_counter(mm, MM_SWAPENTS);
63         SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
64         SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
65         SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
66         SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
67         SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
68         SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
69         SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
70         SEQ_PUT_DEC(" kB\nRssFile:\t", file);
71         SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
72         SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
73         SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
74         seq_put_decimal_ull_width(m,
75                     " kB\nVmExe:\t", text >> 10, 8);
76         seq_put_decimal_ull_width(m,
77                     " kB\nVmLib:\t", lib >> 10, 8);
78         seq_put_decimal_ull_width(m,
79                     " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
80         SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
81         seq_puts(m, " kB\n");
82         hugetlb_report_usage(m, mm);
83 }
84 #undef SEQ_PUT_DEC
85
86 unsigned long task_vsize(struct mm_struct *mm)
87 {
88         return PAGE_SIZE * mm->total_vm;
89 }
90
91 unsigned long task_statm(struct mm_struct *mm,
92                          unsigned long *shared, unsigned long *text,
93                          unsigned long *data, unsigned long *resident)
94 {
95         *shared = get_mm_counter(mm, MM_FILEPAGES) +
96                         get_mm_counter(mm, MM_SHMEMPAGES);
97         *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
98                                                                 >> PAGE_SHIFT;
99         *data = mm->data_vm + mm->stack_vm;
100         *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
101         return mm->total_vm;
102 }
103
104 #ifdef CONFIG_NUMA
105 /*
106  * Save get_task_policy() for show_numa_map().
107  */
108 static void hold_task_mempolicy(struct proc_maps_private *priv)
109 {
110         struct task_struct *task = priv->task;
111
112         task_lock(task);
113         priv->task_mempolicy = get_task_policy(task);
114         mpol_get(priv->task_mempolicy);
115         task_unlock(task);
116 }
117 static void release_task_mempolicy(struct proc_maps_private *priv)
118 {
119         mpol_put(priv->task_mempolicy);
120 }
121 #else
122 static void hold_task_mempolicy(struct proc_maps_private *priv)
123 {
124 }
125 static void release_task_mempolicy(struct proc_maps_private *priv)
126 {
127 }
128 #endif
129
130 static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
131                                                 loff_t *ppos)
132 {
133         struct vm_area_struct *vma = vma_next(&priv->iter);
134
135         if (vma) {
136                 *ppos = vma->vm_start;
137         } else {
138                 *ppos = -2UL;
139                 vma = get_gate_vma(priv->mm);
140         }
141
142         return vma;
143 }
144
145 static void *m_start(struct seq_file *m, loff_t *ppos)
146 {
147         struct proc_maps_private *priv = m->private;
148         unsigned long last_addr = *ppos;
149         struct mm_struct *mm;
150
151         /* See m_next(). Zero at the start or after lseek. */
152         if (last_addr == -1UL)
153                 return NULL;
154
155         priv->task = get_proc_task(priv->inode);
156         if (!priv->task)
157                 return ERR_PTR(-ESRCH);
158
159         mm = priv->mm;
160         if (!mm || !mmget_not_zero(mm)) {
161                 put_task_struct(priv->task);
162                 priv->task = NULL;
163                 return NULL;
164         }
165
166         if (mmap_read_lock_killable(mm)) {
167                 mmput(mm);
168                 put_task_struct(priv->task);
169                 priv->task = NULL;
170                 return ERR_PTR(-EINTR);
171         }
172
173         vma_iter_init(&priv->iter, mm, last_addr);
174         hold_task_mempolicy(priv);
175         if (last_addr == -2UL)
176                 return get_gate_vma(mm);
177
178         return proc_get_vma(priv, ppos);
179 }
180
181 static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
182 {
183         if (*ppos == -2UL) {
184                 *ppos = -1UL;
185                 return NULL;
186         }
187         return proc_get_vma(m->private, ppos);
188 }
189
190 static void m_stop(struct seq_file *m, void *v)
191 {
192         struct proc_maps_private *priv = m->private;
193         struct mm_struct *mm = priv->mm;
194
195         if (!priv->task)
196                 return;
197
198         release_task_mempolicy(priv);
199         mmap_read_unlock(mm);
200         mmput(mm);
201         put_task_struct(priv->task);
202         priv->task = NULL;
203 }
204
205 static int proc_maps_open(struct inode *inode, struct file *file,
206                         const struct seq_operations *ops, int psize)
207 {
208         struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
209
210         if (!priv)
211                 return -ENOMEM;
212
213         priv->inode = inode;
214         priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
215         if (IS_ERR(priv->mm)) {
216                 int err = PTR_ERR(priv->mm);
217
218                 seq_release_private(inode, file);
219                 return err;
220         }
221
222         return 0;
223 }
224
225 static int proc_map_release(struct inode *inode, struct file *file)
226 {
227         struct seq_file *seq = file->private_data;
228         struct proc_maps_private *priv = seq->private;
229
230         if (priv->mm)
231                 mmdrop(priv->mm);
232
233         return seq_release_private(inode, file);
234 }
235
236 static int do_maps_open(struct inode *inode, struct file *file,
237                         const struct seq_operations *ops)
238 {
239         return proc_maps_open(inode, file, ops,
240                                 sizeof(struct proc_maps_private));
241 }
242
243 static void get_vma_name(struct vm_area_struct *vma,
244                          const struct path **path,
245                          const char **name,
246                          const char **name_fmt)
247 {
248         struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
249
250         *name = NULL;
251         *path = NULL;
252         *name_fmt = NULL;
253
254         /*
255          * Print the dentry name for named mappings, and a
256          * special [heap] marker for the heap:
257          */
258         if (vma->vm_file) {
259                 /*
260                  * If user named this anon shared memory via
261                  * prctl(PR_SET_VMA ..., use the provided name.
262                  */
263                 if (anon_name) {
264                         *name_fmt = "[anon_shmem:%s]";
265                         *name = anon_name->name;
266                 } else {
267                         *path = file_user_path(vma->vm_file);
268                 }
269                 return;
270         }
271
272         if (vma->vm_ops && vma->vm_ops->name) {
273                 *name = vma->vm_ops->name(vma);
274                 if (*name)
275                         return;
276         }
277
278         *name = arch_vma_name(vma);
279         if (*name)
280                 return;
281
282         if (!vma->vm_mm) {
283                 *name = "[vdso]";
284                 return;
285         }
286
287         if (vma_is_initial_heap(vma)) {
288                 *name = "[heap]";
289                 return;
290         }
291
292         if (vma_is_initial_stack(vma)) {
293                 *name = "[stack]";
294                 return;
295         }
296
297         if (anon_name) {
298                 *name_fmt = "[anon:%s]";
299                 *name = anon_name->name;
300                 return;
301         }
302 }
303
304 static void show_vma_header_prefix(struct seq_file *m,
305                                    unsigned long start, unsigned long end,
306                                    vm_flags_t flags, unsigned long long pgoff,
307                                    dev_t dev, unsigned long ino)
308 {
309         seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310         seq_put_hex_ll(m, NULL, start, 8);
311         seq_put_hex_ll(m, "-", end, 8);
312         seq_putc(m, ' ');
313         seq_putc(m, flags & VM_READ ? 'r' : '-');
314         seq_putc(m, flags & VM_WRITE ? 'w' : '-');
315         seq_putc(m, flags & VM_EXEC ? 'x' : '-');
316         seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
317         seq_put_hex_ll(m, " ", pgoff, 8);
318         seq_put_hex_ll(m, " ", MAJOR(dev), 2);
319         seq_put_hex_ll(m, ":", MINOR(dev), 2);
320         seq_put_decimal_ull(m, " ", ino);
321         seq_putc(m, ' ');
322 }
323
324 static void
325 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
326 {
327         const struct path *path;
328         const char *name_fmt, *name;
329         vm_flags_t flags = vma->vm_flags;
330         unsigned long ino = 0;
331         unsigned long long pgoff = 0;
332         unsigned long start, end;
333         dev_t dev = 0;
334
335         if (vma->vm_file) {
336                 const struct inode *inode = file_user_inode(vma->vm_file);
337
338                 dev = inode->i_sb->s_dev;
339                 ino = inode->i_ino;
340                 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
341         }
342
343         start = vma->vm_start;
344         end = vma->vm_end;
345         show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
346
347         get_vma_name(vma, &path, &name, &name_fmt);
348         if (path) {
349                 seq_pad(m, ' ');
350                 seq_path(m, path, "\n");
351         } else if (name_fmt) {
352                 seq_pad(m, ' ');
353                 seq_printf(m, name_fmt, name);
354         } else if (name) {
355                 seq_pad(m, ' ');
356                 seq_puts(m, name);
357         }
358         seq_putc(m, '\n');
359 }
360
361 static int show_map(struct seq_file *m, void *v)
362 {
363         show_map_vma(m, v);
364         return 0;
365 }
366
367 static const struct seq_operations proc_pid_maps_op = {
368         .start  = m_start,
369         .next   = m_next,
370         .stop   = m_stop,
371         .show   = show_map
372 };
373
374 static int pid_maps_open(struct inode *inode, struct file *file)
375 {
376         return do_maps_open(inode, file, &proc_pid_maps_op);
377 }
378
379 #define PROCMAP_QUERY_VMA_FLAGS (                               \
380                 PROCMAP_QUERY_VMA_READABLE |                    \
381                 PROCMAP_QUERY_VMA_WRITABLE |                    \
382                 PROCMAP_QUERY_VMA_EXECUTABLE |                  \
383                 PROCMAP_QUERY_VMA_SHARED                        \
384 )
385
386 #define PROCMAP_QUERY_VALID_FLAGS_MASK (                        \
387                 PROCMAP_QUERY_COVERING_OR_NEXT_VMA |            \
388                 PROCMAP_QUERY_FILE_BACKED_VMA |                 \
389                 PROCMAP_QUERY_VMA_FLAGS                         \
390 )
391
392 static int query_vma_setup(struct mm_struct *mm)
393 {
394         return mmap_read_lock_killable(mm);
395 }
396
397 static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
398 {
399         mmap_read_unlock(mm);
400 }
401
402 static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
403 {
404         return find_vma(mm, addr);
405 }
406
407 static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
408                                                  unsigned long addr, u32 flags)
409 {
410         struct vm_area_struct *vma;
411
412 next_vma:
413         vma = query_vma_find_by_addr(mm, addr);
414         if (!vma)
415                 goto no_vma;
416
417         /* user requested only file-backed VMA, keep iterating */
418         if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
419                 goto skip_vma;
420
421         /* VMA permissions should satisfy query flags */
422         if (flags & PROCMAP_QUERY_VMA_FLAGS) {
423                 u32 perm = 0;
424
425                 if (flags & PROCMAP_QUERY_VMA_READABLE)
426                         perm |= VM_READ;
427                 if (flags & PROCMAP_QUERY_VMA_WRITABLE)
428                         perm |= VM_WRITE;
429                 if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
430                         perm |= VM_EXEC;
431                 if (flags & PROCMAP_QUERY_VMA_SHARED)
432                         perm |= VM_MAYSHARE;
433
434                 if ((vma->vm_flags & perm) != perm)
435                         goto skip_vma;
436         }
437
438         /* found covering VMA or user is OK with the matching next VMA */
439         if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
440                 return vma;
441
442 skip_vma:
443         /*
444          * If the user needs closest matching VMA, keep iterating.
445          */
446         addr = vma->vm_end;
447         if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
448                 goto next_vma;
449
450 no_vma:
451         return ERR_PTR(-ENOENT);
452 }
453
454 static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
455 {
456         struct procmap_query karg;
457         struct vm_area_struct *vma;
458         struct mm_struct *mm;
459         const char *name = NULL;
460         char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
461         __u64 usize;
462         int err;
463
464         if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
465                 return -EFAULT;
466         /* argument struct can never be that large, reject abuse */
467         if (usize > PAGE_SIZE)
468                 return -E2BIG;
469         /* argument struct should have at least query_flags and query_addr fields */
470         if (usize < offsetofend(struct procmap_query, query_addr))
471                 return -EINVAL;
472         err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
473         if (err)
474                 return err;
475
476         /* reject unknown flags */
477         if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
478                 return -EINVAL;
479         /* either both buffer address and size are set, or both should be zero */
480         if (!!karg.vma_name_size != !!karg.vma_name_addr)
481                 return -EINVAL;
482         if (!!karg.build_id_size != !!karg.build_id_addr)
483                 return -EINVAL;
484
485         mm = priv->mm;
486         if (!mm || !mmget_not_zero(mm))
487                 return -ESRCH;
488
489         err = query_vma_setup(mm);
490         if (err) {
491                 mmput(mm);
492                 return err;
493         }
494
495         vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
496         if (IS_ERR(vma)) {
497                 err = PTR_ERR(vma);
498                 vma = NULL;
499                 goto out;
500         }
501
502         karg.vma_start = vma->vm_start;
503         karg.vma_end = vma->vm_end;
504
505         karg.vma_flags = 0;
506         if (vma->vm_flags & VM_READ)
507                 karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
508         if (vma->vm_flags & VM_WRITE)
509                 karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
510         if (vma->vm_flags & VM_EXEC)
511                 karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
512         if (vma->vm_flags & VM_MAYSHARE)
513                 karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
514
515         karg.vma_page_size = vma_kernel_pagesize(vma);
516
517         if (vma->vm_file) {
518                 const struct inode *inode = file_user_inode(vma->vm_file);
519
520                 karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
521                 karg.dev_major = MAJOR(inode->i_sb->s_dev);
522                 karg.dev_minor = MINOR(inode->i_sb->s_dev);
523                 karg.inode = inode->i_ino;
524         } else {
525                 karg.vma_offset = 0;
526                 karg.dev_major = 0;
527                 karg.dev_minor = 0;
528                 karg.inode = 0;
529         }
530
531         if (karg.build_id_size) {
532                 __u32 build_id_sz;
533
534                 err = build_id_parse(vma, build_id_buf, &build_id_sz);
535                 if (err) {
536                         karg.build_id_size = 0;
537                 } else {
538                         if (karg.build_id_size < build_id_sz) {
539                                 err = -ENAMETOOLONG;
540                                 goto out;
541                         }
542                         karg.build_id_size = build_id_sz;
543                 }
544         }
545
546         if (karg.vma_name_size) {
547                 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
548                 const struct path *path;
549                 const char *name_fmt;
550                 size_t name_sz = 0;
551
552                 get_vma_name(vma, &path, &name, &name_fmt);
553
554                 if (path || name_fmt || name) {
555                         name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
556                         if (!name_buf) {
557                                 err = -ENOMEM;
558                                 goto out;
559                         }
560                 }
561                 if (path) {
562                         name = d_path(path, name_buf, name_buf_sz);
563                         if (IS_ERR(name)) {
564                                 err = PTR_ERR(name);
565                                 goto out;
566                         }
567                         name_sz = name_buf + name_buf_sz - name;
568                 } else if (name || name_fmt) {
569                         name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
570                         name = name_buf;
571                 }
572                 if (name_sz > name_buf_sz) {
573                         err = -ENAMETOOLONG;
574                         goto out;
575                 }
576                 karg.vma_name_size = name_sz;
577         }
578
579         /* unlock vma or mmap_lock, and put mm_struct before copying data to user */
580         query_vma_teardown(mm, vma);
581         mmput(mm);
582
583         if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
584                                                name, karg.vma_name_size)) {
585                 kfree(name_buf);
586                 return -EFAULT;
587         }
588         kfree(name_buf);
589
590         if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
591                                                build_id_buf, karg.build_id_size))
592                 return -EFAULT;
593
594         if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
595                 return -EFAULT;
596
597         return 0;
598
599 out:
600         query_vma_teardown(mm, vma);
601         mmput(mm);
602         kfree(name_buf);
603         return err;
604 }
605
606 static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
607 {
608         struct seq_file *seq = file->private_data;
609         struct proc_maps_private *priv = seq->private;
610
611         switch (cmd) {
612         case PROCMAP_QUERY:
613                 return do_procmap_query(priv, (void __user *)arg);
614         default:
615                 return -ENOIOCTLCMD;
616         }
617 }
618
619 const struct file_operations proc_pid_maps_operations = {
620         .open           = pid_maps_open,
621         .read           = seq_read,
622         .llseek         = seq_lseek,
623         .release        = proc_map_release,
624         .unlocked_ioctl = procfs_procmap_ioctl,
625         .compat_ioctl   = compat_ptr_ioctl,
626 };
627
628 /*
629  * Proportional Set Size(PSS): my share of RSS.
630  *
631  * PSS of a process is the count of pages it has in memory, where each
632  * page is divided by the number of processes sharing it.  So if a
633  * process has 1000 pages all to itself, and 1000 shared with one other
634  * process, its PSS will be 1500.
635  *
636  * To keep (accumulated) division errors low, we adopt a 64bit
637  * fixed-point pss counter to minimize division errors. So (pss >>
638  * PSS_SHIFT) would be the real byte count.
639  *
640  * A shift of 12 before division means (assuming 4K page size):
641  *      - 1M 3-user-pages add up to 8KB errors;
642  *      - supports mapcount up to 2^24, or 16M;
643  *      - supports PSS up to 2^52 bytes, or 4PB.
644  */
645 #define PSS_SHIFT 12
646
647 #ifdef CONFIG_PROC_PAGE_MONITOR
648 struct mem_size_stats {
649         unsigned long resident;
650         unsigned long shared_clean;
651         unsigned long shared_dirty;
652         unsigned long private_clean;
653         unsigned long private_dirty;
654         unsigned long referenced;
655         unsigned long anonymous;
656         unsigned long lazyfree;
657         unsigned long anonymous_thp;
658         unsigned long shmem_thp;
659         unsigned long file_thp;
660         unsigned long swap;
661         unsigned long shared_hugetlb;
662         unsigned long private_hugetlb;
663         unsigned long ksm;
664         u64 pss;
665         u64 pss_anon;
666         u64 pss_file;
667         u64 pss_shmem;
668         u64 pss_dirty;
669         u64 pss_locked;
670         u64 swap_pss;
671 };
672
673 static void smaps_page_accumulate(struct mem_size_stats *mss,
674                 struct folio *folio, unsigned long size, unsigned long pss,
675                 bool dirty, bool locked, bool private)
676 {
677         mss->pss += pss;
678
679         if (folio_test_anon(folio))
680                 mss->pss_anon += pss;
681         else if (folio_test_swapbacked(folio))
682                 mss->pss_shmem += pss;
683         else
684                 mss->pss_file += pss;
685
686         if (locked)
687                 mss->pss_locked += pss;
688
689         if (dirty || folio_test_dirty(folio)) {
690                 mss->pss_dirty += pss;
691                 if (private)
692                         mss->private_dirty += size;
693                 else
694                         mss->shared_dirty += size;
695         } else {
696                 if (private)
697                         mss->private_clean += size;
698                 else
699                         mss->shared_clean += size;
700         }
701 }
702
703 static void smaps_account(struct mem_size_stats *mss, struct page *page,
704                 bool compound, bool young, bool dirty, bool locked,
705                 bool present)
706 {
707         struct folio *folio = page_folio(page);
708         int i, nr = compound ? compound_nr(page) : 1;
709         unsigned long size = nr * PAGE_SIZE;
710
711         /*
712          * First accumulate quantities that depend only on |size| and the type
713          * of the compound page.
714          */
715         if (folio_test_anon(folio)) {
716                 mss->anonymous += size;
717                 if (!folio_test_swapbacked(folio) && !dirty &&
718                     !folio_test_dirty(folio))
719                         mss->lazyfree += size;
720         }
721
722         if (folio_test_ksm(folio))
723                 mss->ksm += size;
724
725         mss->resident += size;
726         /* Accumulate the size in pages that have been accessed. */
727         if (young || folio_test_young(folio) || folio_test_referenced(folio))
728                 mss->referenced += size;
729
730         /*
731          * Then accumulate quantities that may depend on sharing, or that may
732          * differ page-by-page.
733          *
734          * refcount == 1 for present entries guarantees that the folio is mapped
735          * exactly once. For large folios this implies that exactly one
736          * PTE/PMD/... maps (a part of) this folio.
737          *
738          * Treat all non-present entries (where relying on the mapcount and
739          * refcount doesn't make sense) as "maybe shared, but not sure how
740          * often". We treat device private entries as being fake-present.
741          *
742          * Note that it would not be safe to read the mapcount especially for
743          * pages referenced by migration entries, even with the PTL held.
744          */
745         if (folio_ref_count(folio) == 1 || !present) {
746                 smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
747                                       dirty, locked, present);
748                 return;
749         }
750         /*
751          * We obtain a snapshot of the mapcount. Without holding the folio lock
752          * this snapshot can be slightly wrong as we cannot always read the
753          * mapcount atomically.
754          */
755         for (i = 0; i < nr; i++, page++) {
756                 int mapcount = folio_precise_page_mapcount(folio, page);
757                 unsigned long pss = PAGE_SIZE << PSS_SHIFT;
758                 if (mapcount >= 2)
759                         pss /= mapcount;
760                 smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
761                                 dirty, locked, mapcount < 2);
762         }
763 }
764
765 #ifdef CONFIG_SHMEM
766 static int smaps_pte_hole(unsigned long addr, unsigned long end,
767                           __always_unused int depth, struct mm_walk *walk)
768 {
769         struct mem_size_stats *mss = walk->private;
770         struct vm_area_struct *vma = walk->vma;
771
772         mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
773                                               linear_page_index(vma, addr),
774                                               linear_page_index(vma, end));
775
776         return 0;
777 }
778 #else
779 #define smaps_pte_hole          NULL
780 #endif /* CONFIG_SHMEM */
781
782 static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
783 {
784 #ifdef CONFIG_SHMEM
785         if (walk->ops->pte_hole) {
786                 /* depth is not used */
787                 smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
788         }
789 #endif
790 }
791
792 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
793                 struct mm_walk *walk)
794 {
795         struct mem_size_stats *mss = walk->private;
796         struct vm_area_struct *vma = walk->vma;
797         bool locked = !!(vma->vm_flags & VM_LOCKED);
798         struct page *page = NULL;
799         bool present = false, young = false, dirty = false;
800         pte_t ptent = ptep_get(pte);
801
802         if (pte_present(ptent)) {
803                 page = vm_normal_page(vma, addr, ptent);
804                 young = pte_young(ptent);
805                 dirty = pte_dirty(ptent);
806                 present = true;
807         } else if (is_swap_pte(ptent)) {
808                 swp_entry_t swpent = pte_to_swp_entry(ptent);
809
810                 if (!non_swap_entry(swpent)) {
811                         int mapcount;
812
813                         mss->swap += PAGE_SIZE;
814                         mapcount = swp_swapcount(swpent);
815                         if (mapcount >= 2) {
816                                 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
817
818                                 do_div(pss_delta, mapcount);
819                                 mss->swap_pss += pss_delta;
820                         } else {
821                                 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
822                         }
823                 } else if (is_pfn_swap_entry(swpent)) {
824                         if (is_device_private_entry(swpent))
825                                 present = true;
826                         page = pfn_swap_entry_to_page(swpent);
827                 }
828         } else {
829                 smaps_pte_hole_lookup(addr, walk);
830                 return;
831         }
832
833         if (!page)
834                 return;
835
836         smaps_account(mss, page, false, young, dirty, locked, present);
837 }
838
839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
840 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
841                 struct mm_walk *walk)
842 {
843         struct mem_size_stats *mss = walk->private;
844         struct vm_area_struct *vma = walk->vma;
845         bool locked = !!(vma->vm_flags & VM_LOCKED);
846         struct page *page = NULL;
847         bool present = false;
848         struct folio *folio;
849
850         if (pmd_present(*pmd)) {
851                 page = vm_normal_page_pmd(vma, addr, *pmd);
852                 present = true;
853         } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
854                 swp_entry_t entry = pmd_to_swp_entry(*pmd);
855
856                 if (is_pfn_swap_entry(entry))
857                         page = pfn_swap_entry_to_page(entry);
858         }
859         if (IS_ERR_OR_NULL(page))
860                 return;
861         folio = page_folio(page);
862         if (folio_test_anon(folio))
863                 mss->anonymous_thp += HPAGE_PMD_SIZE;
864         else if (folio_test_swapbacked(folio))
865                 mss->shmem_thp += HPAGE_PMD_SIZE;
866         else if (folio_is_zone_device(folio))
867                 /* pass */;
868         else
869                 mss->file_thp += HPAGE_PMD_SIZE;
870
871         smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
872                       locked, present);
873 }
874 #else
875 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
876                 struct mm_walk *walk)
877 {
878 }
879 #endif
880
881 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
882                            struct mm_walk *walk)
883 {
884         struct vm_area_struct *vma = walk->vma;
885         pte_t *pte;
886         spinlock_t *ptl;
887
888         ptl = pmd_trans_huge_lock(pmd, vma);
889         if (ptl) {
890                 smaps_pmd_entry(pmd, addr, walk);
891                 spin_unlock(ptl);
892                 goto out;
893         }
894
895         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
896         if (!pte) {
897                 walk->action = ACTION_AGAIN;
898                 return 0;
899         }
900         for (; addr != end; pte++, addr += PAGE_SIZE)
901                 smaps_pte_entry(pte, addr, walk);
902         pte_unmap_unlock(pte - 1, ptl);
903 out:
904         cond_resched();
905         return 0;
906 }
907
908 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
909 {
910         /*
911          * Don't forget to update Documentation/ on changes.
912          *
913          * The length of the second argument of mnemonics[]
914          * needs to be 3 instead of previously set 2
915          * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
916          * to avoid spurious
917          * -Werror=unterminated-string-initialization warning
918          *  with GCC 15
919          */
920         static const char mnemonics[BITS_PER_LONG][3] = {
921                 /*
922                  * In case if we meet a flag we don't know about.
923                  */
924                 [0 ... (BITS_PER_LONG-1)] = "??",
925
926                 [ilog2(VM_READ)]        = "rd",
927                 [ilog2(VM_WRITE)]       = "wr",
928                 [ilog2(VM_EXEC)]        = "ex",
929                 [ilog2(VM_SHARED)]      = "sh",
930                 [ilog2(VM_MAYREAD)]     = "mr",
931                 [ilog2(VM_MAYWRITE)]    = "mw",
932                 [ilog2(VM_MAYEXEC)]     = "me",
933                 [ilog2(VM_MAYSHARE)]    = "ms",
934                 [ilog2(VM_GROWSDOWN)]   = "gd",
935                 [ilog2(VM_PFNMAP)]      = "pf",
936                 [ilog2(VM_LOCKED)]      = "lo",
937                 [ilog2(VM_IO)]          = "io",
938                 [ilog2(VM_SEQ_READ)]    = "sr",
939                 [ilog2(VM_RAND_READ)]   = "rr",
940                 [ilog2(VM_DONTCOPY)]    = "dc",
941                 [ilog2(VM_DONTEXPAND)]  = "de",
942                 [ilog2(VM_LOCKONFAULT)] = "lf",
943                 [ilog2(VM_ACCOUNT)]     = "ac",
944                 [ilog2(VM_NORESERVE)]   = "nr",
945                 [ilog2(VM_HUGETLB)]     = "ht",
946                 [ilog2(VM_SYNC)]        = "sf",
947                 [ilog2(VM_ARCH_1)]      = "ar",
948                 [ilog2(VM_WIPEONFORK)]  = "wf",
949                 [ilog2(VM_DONTDUMP)]    = "dd",
950 #ifdef CONFIG_ARM64_BTI
951                 [ilog2(VM_ARM64_BTI)]   = "bt",
952 #endif
953 #ifdef CONFIG_MEM_SOFT_DIRTY
954                 [ilog2(VM_SOFTDIRTY)]   = "sd",
955 #endif
956                 [ilog2(VM_MIXEDMAP)]    = "mm",
957                 [ilog2(VM_HUGEPAGE)]    = "hg",
958                 [ilog2(VM_NOHUGEPAGE)]  = "nh",
959                 [ilog2(VM_MERGEABLE)]   = "mg",
960                 [ilog2(VM_UFFD_MISSING)]= "um",
961                 [ilog2(VM_UFFD_WP)]     = "uw",
962 #ifdef CONFIG_ARM64_MTE
963                 [ilog2(VM_MTE)]         = "mt",
964                 [ilog2(VM_MTE_ALLOWED)] = "",
965 #endif
966 #ifdef CONFIG_ARCH_HAS_PKEYS
967                 /* These come out via ProtectionKey: */
968                 [ilog2(VM_PKEY_BIT0)]   = "",
969                 [ilog2(VM_PKEY_BIT1)]   = "",
970                 [ilog2(VM_PKEY_BIT2)]   = "",
971 #if VM_PKEY_BIT3
972                 [ilog2(VM_PKEY_BIT3)]   = "",
973 #endif
974 #if VM_PKEY_BIT4
975                 [ilog2(VM_PKEY_BIT4)]   = "",
976 #endif
977 #endif /* CONFIG_ARCH_HAS_PKEYS */
978 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
979                 [ilog2(VM_UFFD_MINOR)]  = "ui",
980 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
981 #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
982                 [ilog2(VM_SHADOW_STACK)] = "ss",
983 #endif
984 #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
985                 [ilog2(VM_DROPPABLE)] = "dp",
986 #endif
987 #ifdef CONFIG_64BIT
988                 [ilog2(VM_SEALED)] = "sl",
989 #endif
990         };
991         size_t i;
992
993         seq_puts(m, "VmFlags: ");
994         for (i = 0; i < BITS_PER_LONG; i++) {
995                 if (!mnemonics[i][0])
996                         continue;
997                 if (vma->vm_flags & (1UL << i))
998                         seq_printf(m, "%s ", mnemonics[i]);
999         }
1000         seq_putc(m, '\n');
1001 }
1002
1003 #ifdef CONFIG_HUGETLB_PAGE
1004 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1005                                  unsigned long addr, unsigned long end,
1006                                  struct mm_walk *walk)
1007 {
1008         struct mem_size_stats *mss = walk->private;
1009         struct vm_area_struct *vma = walk->vma;
1010         pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
1011         struct folio *folio = NULL;
1012         bool present = false;
1013
1014         if (pte_present(ptent)) {
1015                 folio = page_folio(pte_page(ptent));
1016                 present = true;
1017         } else if (is_swap_pte(ptent)) {
1018                 swp_entry_t swpent = pte_to_swp_entry(ptent);
1019
1020                 if (is_pfn_swap_entry(swpent))
1021                         folio = pfn_swap_entry_folio(swpent);
1022         }
1023
1024         if (folio) {
1025                 /* We treat non-present entries as "maybe shared". */
1026                 if (!present || folio_likely_mapped_shared(folio) ||
1027                     hugetlb_pmd_shared(pte))
1028                         mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1029                 else
1030                         mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1031         }
1032         return 0;
1033 }
1034 #else
1035 #define smaps_hugetlb_range     NULL
1036 #endif /* HUGETLB_PAGE */
1037
1038 static const struct mm_walk_ops smaps_walk_ops = {
1039         .pmd_entry              = smaps_pte_range,
1040         .hugetlb_entry          = smaps_hugetlb_range,
1041         .walk_lock              = PGWALK_RDLOCK,
1042 };
1043
1044 static const struct mm_walk_ops smaps_shmem_walk_ops = {
1045         .pmd_entry              = smaps_pte_range,
1046         .hugetlb_entry          = smaps_hugetlb_range,
1047         .pte_hole               = smaps_pte_hole,
1048         .walk_lock              = PGWALK_RDLOCK,
1049 };
1050
1051 /*
1052  * Gather mem stats from @vma with the indicated beginning
1053  * address @start, and keep them in @mss.
1054  *
1055  * Use vm_start of @vma as the beginning address if @start is 0.
1056  */
1057 static void smap_gather_stats(struct vm_area_struct *vma,
1058                 struct mem_size_stats *mss, unsigned long start)
1059 {
1060         const struct mm_walk_ops *ops = &smaps_walk_ops;
1061
1062         /* Invalid start */
1063         if (start >= vma->vm_end)
1064                 return;
1065
1066         if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
1067                 /*
1068                  * For shared or readonly shmem mappings we know that all
1069                  * swapped out pages belong to the shmem object, and we can
1070                  * obtain the swap value much more efficiently. For private
1071                  * writable mappings, we might have COW pages that are
1072                  * not affected by the parent swapped out pages of the shmem
1073                  * object, so we have to distinguish them during the page walk.
1074                  * Unless we know that the shmem object (or the part mapped by
1075                  * our VMA) has no swapped out pages at all.
1076                  */
1077                 unsigned long shmem_swapped = shmem_swap_usage(vma);
1078
1079                 if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
1080                                         !(vma->vm_flags & VM_WRITE))) {
1081                         mss->swap += shmem_swapped;
1082                 } else {
1083                         ops = &smaps_shmem_walk_ops;
1084                 }
1085         }
1086
1087         /* mmap_lock is held in m_start */
1088         if (!start)
1089                 walk_page_vma(vma, ops, mss);
1090         else
1091                 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
1092 }
1093
1094 #define SEQ_PUT_DEC(str, val) \
1095                 seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
1096
1097 /* Show the contents common for smaps and smaps_rollup */
1098 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
1099         bool rollup_mode)
1100 {
1101         SEQ_PUT_DEC("Rss:            ", mss->resident);
1102         SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
1103         SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
1104         if (rollup_mode) {
1105                 /*
1106                  * These are meaningful only for smaps_rollup, otherwise two of
1107                  * them are zero, and the other one is the same as Pss.
1108                  */
1109                 SEQ_PUT_DEC(" kB\nPss_Anon:       ",
1110                         mss->pss_anon >> PSS_SHIFT);
1111                 SEQ_PUT_DEC(" kB\nPss_File:       ",
1112                         mss->pss_file >> PSS_SHIFT);
1113                 SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
1114                         mss->pss_shmem >> PSS_SHIFT);
1115         }
1116         SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
1117         SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
1118         SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
1119         SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
1120         SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
1121         SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
1122         SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
1123         SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
1124         SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
1125         SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
1126         SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
1127         SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
1128         seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
1129                                   mss->private_hugetlb >> 10, 7);
1130         SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
1131         SEQ_PUT_DEC(" kB\nSwapPss:        ",
1132                                         mss->swap_pss >> PSS_SHIFT);
1133         SEQ_PUT_DEC(" kB\nLocked:         ",
1134                                         mss->pss_locked >> PSS_SHIFT);
1135         seq_puts(m, " kB\n");
1136 }
1137
1138 static int show_smap(struct seq_file *m, void *v)
1139 {
1140         struct vm_area_struct *vma = v;
1141         struct mem_size_stats mss = {};
1142
1143         smap_gather_stats(vma, &mss, 0);
1144
1145         show_map_vma(m, vma);
1146
1147         SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
1148         SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
1149         SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
1150         seq_puts(m, " kB\n");
1151
1152         __show_smap(m, &mss, false);
1153
1154         seq_printf(m, "THPeligible:    %8u\n",
1155                    !!thp_vma_allowable_orders(vma, vma->vm_flags,
1156                            TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
1157
1158         if (arch_pkeys_enabled())
1159                 seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
1160         show_smap_vma_flags(m, vma);
1161
1162         return 0;
1163 }
1164
1165 static int show_smaps_rollup(struct seq_file *m, void *v)
1166 {
1167         struct proc_maps_private *priv = m->private;
1168         struct mem_size_stats mss = {};
1169         struct mm_struct *mm = priv->mm;
1170         struct vm_area_struct *vma;
1171         unsigned long vma_start = 0, last_vma_end = 0;
1172         int ret = 0;
1173         VMA_ITERATOR(vmi, mm, 0);
1174
1175         priv->task = get_proc_task(priv->inode);
1176         if (!priv->task)
1177                 return -ESRCH;
1178
1179         if (!mm || !mmget_not_zero(mm)) {
1180                 ret = -ESRCH;
1181                 goto out_put_task;
1182         }
1183
1184         ret = mmap_read_lock_killable(mm);
1185         if (ret)
1186                 goto out_put_mm;
1187
1188         hold_task_mempolicy(priv);
1189         vma = vma_next(&vmi);
1190
1191         if (unlikely(!vma))
1192                 goto empty_set;
1193
1194         vma_start = vma->vm_start;
1195         do {
1196                 smap_gather_stats(vma, &mss, 0);
1197                 last_vma_end = vma->vm_end;
1198
1199                 /*
1200                  * Release mmap_lock temporarily if someone wants to
1201                  * access it for write request.
1202                  */
1203                 if (mmap_lock_is_contended(mm)) {
1204                         vma_iter_invalidate(&vmi);
1205                         mmap_read_unlock(mm);
1206                         ret = mmap_read_lock_killable(mm);
1207                         if (ret) {
1208                                 release_task_mempolicy(priv);
1209                                 goto out_put_mm;
1210                         }
1211
1212                         /*
1213                          * After dropping the lock, there are four cases to
1214                          * consider. See the following example for explanation.
1215                          *
1216                          *   +------+------+-----------+
1217                          *   | VMA1 | VMA2 | VMA3      |
1218                          *   +------+------+-----------+
1219                          *   |      |      |           |
1220                          *  4k     8k     16k         400k
1221                          *
1222                          * Suppose we drop the lock after reading VMA2 due to
1223                          * contention, then we get:
1224                          *
1225                          *      last_vma_end = 16k
1226                          *
1227                          * 1) VMA2 is freed, but VMA3 exists:
1228                          *
1229                          *    vma_next(vmi) will return VMA3.
1230                          *    In this case, just continue from VMA3.
1231                          *
1232                          * 2) VMA2 still exists:
1233                          *
1234                          *    vma_next(vmi) will return VMA3.
1235                          *    In this case, just continue from VMA3.
1236                          *
1237                          * 3) No more VMAs can be found:
1238                          *
1239                          *    vma_next(vmi) will return NULL.
1240                          *    No more things to do, just break.
1241                          *
1242                          * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
1243                          *
1244                          *    vma_next(vmi) will return VMA' whose range
1245                          *    contains last_vma_end.
1246                          *    Iterate VMA' from last_vma_end.
1247                          */
1248                         vma = vma_next(&vmi);
1249                         /* Case 3 above */
1250                         if (!vma)
1251                                 break;
1252
1253                         /* Case 1 and 2 above */
1254                         if (vma->vm_start >= last_vma_end) {
1255                                 smap_gather_stats(vma, &mss, 0);
1256                                 last_vma_end = vma->vm_end;
1257                                 continue;
1258                         }
1259
1260                         /* Case 4 above */
1261                         if (vma->vm_end > last_vma_end) {
1262                                 smap_gather_stats(vma, &mss, last_vma_end);
1263                                 last_vma_end = vma->vm_end;
1264                         }
1265                 }
1266         } for_each_vma(vmi, vma);
1267
1268 empty_set:
1269         show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
1270         seq_pad(m, ' ');
1271         seq_puts(m, "[rollup]\n");
1272
1273         __show_smap(m, &mss, true);
1274
1275         release_task_mempolicy(priv);
1276         mmap_read_unlock(mm);
1277
1278 out_put_mm:
1279         mmput(mm);
1280 out_put_task:
1281         put_task_struct(priv->task);
1282         priv->task = NULL;
1283
1284         return ret;
1285 }
1286 #undef SEQ_PUT_DEC
1287
1288 static const struct seq_operations proc_pid_smaps_op = {
1289         .start  = m_start,
1290         .next   = m_next,
1291         .stop   = m_stop,
1292         .show   = show_smap
1293 };
1294
1295 static int pid_smaps_open(struct inode *inode, struct file *file)
1296 {
1297         return do_maps_open(inode, file, &proc_pid_smaps_op);
1298 }
1299
1300 static int smaps_rollup_open(struct inode *inode, struct file *file)
1301 {
1302         int ret;
1303         struct proc_maps_private *priv;
1304
1305         priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1306         if (!priv)
1307                 return -ENOMEM;
1308
1309         ret = single_open(file, show_smaps_rollup, priv);
1310         if (ret)
1311                 goto out_free;
1312
1313         priv->inode = inode;
1314         priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1315         if (IS_ERR(priv->mm)) {
1316                 ret = PTR_ERR(priv->mm);
1317
1318                 single_release(inode, file);
1319                 goto out_free;
1320         }
1321
1322         return 0;
1323
1324 out_free:
1325         kfree(priv);
1326         return ret;
1327 }
1328
1329 static int smaps_rollup_release(struct inode *inode, struct file *file)
1330 {
1331         struct seq_file *seq = file->private_data;
1332         struct proc_maps_private *priv = seq->private;
1333
1334         if (priv->mm)
1335                 mmdrop(priv->mm);
1336
1337         kfree(priv);
1338         return single_release(inode, file);
1339 }
1340
1341 const struct file_operations proc_pid_smaps_operations = {
1342         .open           = pid_smaps_open,
1343         .read           = seq_read,
1344         .llseek         = seq_lseek,
1345         .release        = proc_map_release,
1346 };
1347
1348 const struct file_operations proc_pid_smaps_rollup_operations = {
1349         .open           = smaps_rollup_open,
1350         .read           = seq_read,
1351         .llseek         = seq_lseek,
1352         .release        = smaps_rollup_release,
1353 };
1354
1355 enum clear_refs_types {
1356         CLEAR_REFS_ALL = 1,
1357         CLEAR_REFS_ANON,
1358         CLEAR_REFS_MAPPED,
1359         CLEAR_REFS_SOFT_DIRTY,
1360         CLEAR_REFS_MM_HIWATER_RSS,
1361         CLEAR_REFS_LAST,
1362 };
1363
1364 struct clear_refs_private {
1365         enum clear_refs_types type;
1366 };
1367
1368 #ifdef CONFIG_MEM_SOFT_DIRTY
1369
1370 static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1371 {
1372         struct folio *folio;
1373
1374         if (!pte_write(pte))
1375                 return false;
1376         if (!is_cow_mapping(vma->vm_flags))
1377                 return false;
1378         if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1379                 return false;
1380         folio = vm_normal_folio(vma, addr, pte);
1381         if (!folio)
1382                 return false;
1383         return folio_maybe_dma_pinned(folio);
1384 }
1385
1386 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1387                 unsigned long addr, pte_t *pte)
1388 {
1389         /*
1390          * The soft-dirty tracker uses #PF-s to catch writes
1391          * to pages, so write-protect the pte as well. See the
1392          * Documentation/admin-guide/mm/soft-dirty.rst for full description
1393          * of how soft-dirty works.
1394          */
1395         pte_t ptent = ptep_get(pte);
1396
1397         if (pte_present(ptent)) {
1398                 pte_t old_pte;
1399
1400                 if (pte_is_pinned(vma, addr, ptent))
1401                         return;
1402                 old_pte = ptep_modify_prot_start(vma, addr, pte);
1403                 ptent = pte_wrprotect(old_pte);
1404                 ptent = pte_clear_soft_dirty(ptent);
1405                 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1406         } else if (is_swap_pte(ptent)) {
1407                 ptent = pte_swp_clear_soft_dirty(ptent);
1408                 set_pte_at(vma->vm_mm, addr, pte, ptent);
1409         }
1410 }
1411 #else
1412 static inline void clear_soft_dirty(struct vm_area_struct *vma,
1413                 unsigned long addr, pte_t *pte)
1414 {
1415 }
1416 #endif
1417
1418 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1419 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1420                 unsigned long addr, pmd_t *pmdp)
1421 {
1422         pmd_t old, pmd = *pmdp;
1423
1424         if (pmd_present(pmd)) {
1425                 /* See comment in change_huge_pmd() */
1426                 old = pmdp_invalidate(vma, addr, pmdp);
1427                 if (pmd_dirty(old))
1428                         pmd = pmd_mkdirty(pmd);
1429                 if (pmd_young(old))
1430                         pmd = pmd_mkyoung(pmd);
1431
1432                 pmd = pmd_wrprotect(pmd);
1433                 pmd = pmd_clear_soft_dirty(pmd);
1434
1435                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1436         } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1437                 pmd = pmd_swp_clear_soft_dirty(pmd);
1438                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1439         }
1440 }
1441 #else
1442 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1443                 unsigned long addr, pmd_t *pmdp)
1444 {
1445 }
1446 #endif
1447
1448 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1449                                 unsigned long end, struct mm_walk *walk)
1450 {
1451         struct clear_refs_private *cp = walk->private;
1452         struct vm_area_struct *vma = walk->vma;
1453         pte_t *pte, ptent;
1454         spinlock_t *ptl;
1455         struct folio *folio;
1456
1457         ptl = pmd_trans_huge_lock(pmd, vma);
1458         if (ptl) {
1459                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1460                         clear_soft_dirty_pmd(vma, addr, pmd);
1461                         goto out;
1462                 }
1463
1464                 if (!pmd_present(*pmd))
1465                         goto out;
1466
1467                 folio = pmd_folio(*pmd);
1468
1469                 /* Clear accessed and referenced bits. */
1470                 pmdp_test_and_clear_young(vma, addr, pmd);
1471                 folio_test_clear_young(folio);
1472                 folio_clear_referenced(folio);
1473 out:
1474                 spin_unlock(ptl);
1475                 return 0;
1476         }
1477
1478         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1479         if (!pte) {
1480                 walk->action = ACTION_AGAIN;
1481                 return 0;
1482         }
1483         for (; addr != end; pte++, addr += PAGE_SIZE) {
1484                 ptent = ptep_get(pte);
1485
1486                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1487                         clear_soft_dirty(vma, addr, pte);
1488                         continue;
1489                 }
1490
1491                 if (!pte_present(ptent))
1492                         continue;
1493
1494                 folio = vm_normal_folio(vma, addr, ptent);
1495                 if (!folio)
1496                         continue;
1497
1498                 /* Clear accessed and referenced bits. */
1499                 ptep_test_and_clear_young(vma, addr, pte);
1500                 folio_test_clear_young(folio);
1501                 folio_clear_referenced(folio);
1502         }
1503         pte_unmap_unlock(pte - 1, ptl);
1504         cond_resched();
1505         return 0;
1506 }
1507
1508 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1509                                 struct mm_walk *walk)
1510 {
1511         struct clear_refs_private *cp = walk->private;
1512         struct vm_area_struct *vma = walk->vma;
1513
1514         if (vma->vm_flags & VM_PFNMAP)
1515                 return 1;
1516
1517         /*
1518          * Writing 1 to /proc/pid/clear_refs affects all pages.
1519          * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1520          * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1521          * Writing 4 to /proc/pid/clear_refs affects all pages.
1522          */
1523         if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1524                 return 1;
1525         if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1526                 return 1;
1527         return 0;
1528 }
1529
1530 static const struct mm_walk_ops clear_refs_walk_ops = {
1531         .pmd_entry              = clear_refs_pte_range,
1532         .test_walk              = clear_refs_test_walk,
1533         .walk_lock              = PGWALK_WRLOCK,
1534 };
1535
1536 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1537                                 size_t count, loff_t *ppos)
1538 {
1539         struct task_struct *task;
1540         char buffer[PROC_NUMBUF] = {};
1541         struct mm_struct *mm;
1542         struct vm_area_struct *vma;
1543         enum clear_refs_types type;
1544         int itype;
1545         int rv;
1546
1547         if (count > sizeof(buffer) - 1)
1548                 count = sizeof(buffer) - 1;
1549         if (copy_from_user(buffer, buf, count))
1550                 return -EFAULT;
1551         rv = kstrtoint(strstrip(buffer), 10, &itype);
1552         if (rv < 0)
1553                 return rv;
1554         type = (enum clear_refs_types)itype;
1555         if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1556                 return -EINVAL;
1557
1558         task = get_proc_task(file_inode(file));
1559         if (!task)
1560                 return -ESRCH;
1561         mm = get_task_mm(task);
1562         if (mm) {
1563                 VMA_ITERATOR(vmi, mm, 0);
1564                 struct mmu_notifier_range range;
1565                 struct clear_refs_private cp = {
1566                         .type = type,
1567                 };
1568
1569                 if (mmap_write_lock_killable(mm)) {
1570                         count = -EINTR;
1571                         goto out_mm;
1572                 }
1573                 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1574                         /*
1575                          * Writing 5 to /proc/pid/clear_refs resets the peak
1576                          * resident set size to this mm's current rss value.
1577                          */
1578                         reset_mm_hiwater_rss(mm);
1579                         goto out_unlock;
1580                 }
1581
1582                 if (type == CLEAR_REFS_SOFT_DIRTY) {
1583                         for_each_vma(vmi, vma) {
1584                                 if (!(vma->vm_flags & VM_SOFTDIRTY))
1585                                         continue;
1586                                 vm_flags_clear(vma, VM_SOFTDIRTY);
1587                                 vma_set_page_prot(vma);
1588                         }
1589
1590                         inc_tlb_flush_pending(mm);
1591                         mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1592                                                 0, mm, 0, -1UL);
1593                         mmu_notifier_invalidate_range_start(&range);
1594                 }
1595                 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1596                 if (type == CLEAR_REFS_SOFT_DIRTY) {
1597                         mmu_notifier_invalidate_range_end(&range);
1598                         flush_tlb_mm(mm);
1599                         dec_tlb_flush_pending(mm);
1600                 }
1601 out_unlock:
1602                 mmap_write_unlock(mm);
1603 out_mm:
1604                 mmput(mm);
1605         }
1606         put_task_struct(task);
1607
1608         return count;
1609 }
1610
1611 const struct file_operations proc_clear_refs_operations = {
1612         .write          = clear_refs_write,
1613         .llseek         = noop_llseek,
1614 };
1615
1616 typedef struct {
1617         u64 pme;
1618 } pagemap_entry_t;
1619
1620 struct pagemapread {
1621         int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
1622         pagemap_entry_t *buffer;
1623         bool show_pfn;
1624 };
1625
1626 #define PAGEMAP_WALK_SIZE       (PMD_SIZE)
1627 #define PAGEMAP_WALK_MASK       (PMD_MASK)
1628
1629 #define PM_ENTRY_BYTES          sizeof(pagemap_entry_t)
1630 #define PM_PFRAME_BITS          55
1631 #define PM_PFRAME_MASK          GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1632 #define PM_SOFT_DIRTY           BIT_ULL(55)
1633 #define PM_MMAP_EXCLUSIVE       BIT_ULL(56)
1634 #define PM_UFFD_WP              BIT_ULL(57)
1635 #define PM_FILE                 BIT_ULL(61)
1636 #define PM_SWAP                 BIT_ULL(62)
1637 #define PM_PRESENT              BIT_ULL(63)
1638
1639 #define PM_END_OF_BUFFER    1
1640
1641 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1642 {
1643         return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1644 }
1645
1646 static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
1647 {
1648         pm->buffer[pm->pos++] = *pme;
1649         if (pm->pos >= pm->len)
1650                 return PM_END_OF_BUFFER;
1651         return 0;
1652 }
1653
1654 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1655                             __always_unused int depth, struct mm_walk *walk)
1656 {
1657         struct pagemapread *pm = walk->private;
1658         unsigned long addr = start;
1659         int err = 0;
1660
1661         while (addr < end) {
1662                 struct vm_area_struct *vma = find_vma(walk->mm, addr);
1663                 pagemap_entry_t pme = make_pme(0, 0);
1664                 /* End of address space hole, which we mark as non-present. */
1665                 unsigned long hole_end;
1666
1667                 if (vma)
1668                         hole_end = min(end, vma->vm_start);
1669                 else
1670                         hole_end = end;
1671
1672                 for (; addr < hole_end; addr += PAGE_SIZE) {
1673                         err = add_to_pagemap(&pme, pm);
1674                         if (err)
1675                                 goto out;
1676                 }
1677
1678                 if (!vma)
1679                         break;
1680
1681                 /* Addresses in the VMA. */
1682                 if (vma->vm_flags & VM_SOFTDIRTY)
1683                         pme = make_pme(0, PM_SOFT_DIRTY);
1684                 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1685                         err = add_to_pagemap(&pme, pm);
1686                         if (err)
1687                                 goto out;
1688                 }
1689         }
1690 out:
1691         return err;
1692 }
1693
1694 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1695                 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1696 {
1697         u64 frame = 0, flags = 0;
1698         struct page *page = NULL;
1699         struct folio *folio;
1700
1701         if (pte_present(pte)) {
1702                 if (pm->show_pfn)
1703                         frame = pte_pfn(pte);
1704                 flags |= PM_PRESENT;
1705                 page = vm_normal_page(vma, addr, pte);
1706                 if (pte_soft_dirty(pte))
1707                         flags |= PM_SOFT_DIRTY;
1708                 if (pte_uffd_wp(pte))
1709                         flags |= PM_UFFD_WP;
1710         } else if (is_swap_pte(pte)) {
1711                 swp_entry_t entry;
1712                 if (pte_swp_soft_dirty(pte))
1713                         flags |= PM_SOFT_DIRTY;
1714                 if (pte_swp_uffd_wp(pte))
1715                         flags |= PM_UFFD_WP;
1716                 entry = pte_to_swp_entry(pte);
1717                 if (pm->show_pfn) {
1718                         pgoff_t offset;
1719                         /*
1720                          * For PFN swap offsets, keeping the offset field
1721                          * to be PFN only to be compatible with old smaps.
1722                          */
1723                         if (is_pfn_swap_entry(entry))
1724                                 offset = swp_offset_pfn(entry);
1725                         else
1726                                 offset = swp_offset(entry);
1727                         frame = swp_type(entry) |
1728                             (offset << MAX_SWAPFILES_SHIFT);
1729                 }
1730                 flags |= PM_SWAP;
1731                 if (is_pfn_swap_entry(entry))
1732                         page = pfn_swap_entry_to_page(entry);
1733                 if (pte_marker_entry_uffd_wp(entry))
1734                         flags |= PM_UFFD_WP;
1735         }
1736
1737         if (page) {
1738                 folio = page_folio(page);
1739                 if (!folio_test_anon(folio))
1740                         flags |= PM_FILE;
1741                 if ((flags & PM_PRESENT) &&
1742                     folio_precise_page_mapcount(folio, page) == 1)
1743                         flags |= PM_MMAP_EXCLUSIVE;
1744         }
1745         if (vma->vm_flags & VM_SOFTDIRTY)
1746                 flags |= PM_SOFT_DIRTY;
1747
1748         return make_pme(frame, flags);
1749 }
1750
1751 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1752                              struct mm_walk *walk)
1753 {
1754         struct vm_area_struct *vma = walk->vma;
1755         struct pagemapread *pm = walk->private;
1756         spinlock_t *ptl;
1757         pte_t *pte, *orig_pte;
1758         int err = 0;
1759 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1760
1761         ptl = pmd_trans_huge_lock(pmdp, vma);
1762         if (ptl) {
1763                 unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1764                 u64 flags = 0, frame = 0;
1765                 pmd_t pmd = *pmdp;
1766                 struct page *page = NULL;
1767                 struct folio *folio = NULL;
1768
1769                 if (vma->vm_flags & VM_SOFTDIRTY)
1770                         flags |= PM_SOFT_DIRTY;
1771
1772                 if (pmd_present(pmd)) {
1773                         page = pmd_page(pmd);
1774
1775                         flags |= PM_PRESENT;
1776                         if (pmd_soft_dirty(pmd))
1777                                 flags |= PM_SOFT_DIRTY;
1778                         if (pmd_uffd_wp(pmd))
1779                                 flags |= PM_UFFD_WP;
1780                         if (pm->show_pfn)
1781                                 frame = pmd_pfn(pmd) + idx;
1782                 }
1783 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1784                 else if (is_swap_pmd(pmd)) {
1785                         swp_entry_t entry = pmd_to_swp_entry(pmd);
1786                         unsigned long offset;
1787
1788                         if (pm->show_pfn) {
1789                                 if (is_pfn_swap_entry(entry))
1790                                         offset = swp_offset_pfn(entry) + idx;
1791                                 else
1792                                         offset = swp_offset(entry) + idx;
1793                                 frame = swp_type(entry) |
1794                                         (offset << MAX_SWAPFILES_SHIFT);
1795                         }
1796                         flags |= PM_SWAP;
1797                         if (pmd_swp_soft_dirty(pmd))
1798                                 flags |= PM_SOFT_DIRTY;
1799                         if (pmd_swp_uffd_wp(pmd))
1800                                 flags |= PM_UFFD_WP;
1801                         VM_BUG_ON(!is_pmd_migration_entry(pmd));
1802                         page = pfn_swap_entry_to_page(entry);
1803                 }
1804 #endif
1805
1806                 if (page) {
1807                         folio = page_folio(page);
1808                         if (!folio_test_anon(folio))
1809                                 flags |= PM_FILE;
1810                 }
1811
1812                 for (; addr != end; addr += PAGE_SIZE, idx++) {
1813                         u64 cur_flags = flags;
1814                         pagemap_entry_t pme;
1815
1816                         if (folio && (flags & PM_PRESENT) &&
1817                             folio_precise_page_mapcount(folio, page + idx) == 1)
1818                                 cur_flags |= PM_MMAP_EXCLUSIVE;
1819
1820                         pme = make_pme(frame, cur_flags);
1821                         err = add_to_pagemap(&pme, pm);
1822                         if (err)
1823                                 break;
1824                         if (pm->show_pfn) {
1825                                 if (flags & PM_PRESENT)
1826                                         frame++;
1827                                 else if (flags & PM_SWAP)
1828                                         frame += (1 << MAX_SWAPFILES_SHIFT);
1829                         }
1830                 }
1831                 spin_unlock(ptl);
1832                 return err;
1833         }
1834 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1835
1836         /*
1837          * We can assume that @vma always points to a valid one and @end never
1838          * goes beyond vma->vm_end.
1839          */
1840         orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1841         if (!pte) {
1842                 walk->action = ACTION_AGAIN;
1843                 return err;
1844         }
1845         for (; addr < end; pte++, addr += PAGE_SIZE) {
1846                 pagemap_entry_t pme;
1847
1848                 pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1849                 err = add_to_pagemap(&pme, pm);
1850                 if (err)
1851                         break;
1852         }
1853         pte_unmap_unlock(orig_pte, ptl);
1854
1855         cond_resched();
1856
1857         return err;
1858 }
1859
1860 #ifdef CONFIG_HUGETLB_PAGE
1861 /* This function walks within one hugetlb entry in the single call */
1862 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1863                                  unsigned long addr, unsigned long end,
1864                                  struct mm_walk *walk)
1865 {
1866         struct pagemapread *pm = walk->private;
1867         struct vm_area_struct *vma = walk->vma;
1868         u64 flags = 0, frame = 0;
1869         int err = 0;
1870         pte_t pte;
1871
1872         if (vma->vm_flags & VM_SOFTDIRTY)
1873                 flags |= PM_SOFT_DIRTY;
1874
1875         pte = huge_ptep_get(walk->mm, addr, ptep);
1876         if (pte_present(pte)) {
1877                 struct folio *folio = page_folio(pte_page(pte));
1878
1879                 if (!folio_test_anon(folio))
1880                         flags |= PM_FILE;
1881
1882                 if (!folio_likely_mapped_shared(folio) &&
1883                     !hugetlb_pmd_shared(ptep))
1884                         flags |= PM_MMAP_EXCLUSIVE;
1885
1886                 if (huge_pte_uffd_wp(pte))
1887                         flags |= PM_UFFD_WP;
1888
1889                 flags |= PM_PRESENT;
1890                 if (pm->show_pfn)
1891                         frame = pte_pfn(pte) +
1892                                 ((addr & ~hmask) >> PAGE_SHIFT);
1893         } else if (pte_swp_uffd_wp_any(pte)) {
1894                 flags |= PM_UFFD_WP;
1895         }
1896
1897         for (; addr != end; addr += PAGE_SIZE) {
1898                 pagemap_entry_t pme = make_pme(frame, flags);
1899
1900                 err = add_to_pagemap(&pme, pm);
1901                 if (err)
1902                         return err;
1903                 if (pm->show_pfn && (flags & PM_PRESENT))
1904                         frame++;
1905         }
1906
1907         cond_resched();
1908
1909         return err;
1910 }
1911 #else
1912 #define pagemap_hugetlb_range   NULL
1913 #endif /* HUGETLB_PAGE */
1914
1915 static const struct mm_walk_ops pagemap_ops = {
1916         .pmd_entry      = pagemap_pmd_range,
1917         .pte_hole       = pagemap_pte_hole,
1918         .hugetlb_entry  = pagemap_hugetlb_range,
1919         .walk_lock      = PGWALK_RDLOCK,
1920 };
1921
1922 /*
1923  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1924  *
1925  * For each page in the address space, this file contains one 64-bit entry
1926  * consisting of the following:
1927  *
1928  * Bits 0-54  page frame number (PFN) if present
1929  * Bits 0-4   swap type if swapped
1930  * Bits 5-54  swap offset if swapped
1931  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1932  * Bit  56    page exclusively mapped
1933  * Bit  57    pte is uffd-wp write-protected
1934  * Bits 58-60 zero
1935  * Bit  61    page is file-page or shared-anon
1936  * Bit  62    page swapped
1937  * Bit  63    page present
1938  *
1939  * If the page is not present but in swap, then the PFN contains an
1940  * encoding of the swap file number and the page's offset into the
1941  * swap. Unmapped pages return a null PFN. This allows determining
1942  * precisely which pages are mapped (or in swap) and comparing mapped
1943  * pages between processes.
1944  *
1945  * Efficient users of this interface will use /proc/pid/maps to
1946  * determine which areas of memory are actually mapped and llseek to
1947  * skip over unmapped regions.
1948  */
1949 static ssize_t pagemap_read(struct file *file, char __user *buf,
1950                             size_t count, loff_t *ppos)
1951 {
1952         struct mm_struct *mm = file->private_data;
1953         struct pagemapread pm;
1954         unsigned long src;
1955         unsigned long svpfn;
1956         unsigned long start_vaddr;
1957         unsigned long end_vaddr;
1958         int ret = 0, copied = 0;
1959
1960         if (!mm || !mmget_not_zero(mm))
1961                 goto out;
1962
1963         ret = -EINVAL;
1964         /* file position must be aligned */
1965         if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1966                 goto out_mm;
1967
1968         ret = 0;
1969         if (!count)
1970                 goto out_mm;
1971
1972         /* do not disclose physical addresses: attack vector */
1973         pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1974
1975         pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1976         pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1977         ret = -ENOMEM;
1978         if (!pm.buffer)
1979                 goto out_mm;
1980
1981         src = *ppos;
1982         svpfn = src / PM_ENTRY_BYTES;
1983         end_vaddr = mm->task_size;
1984
1985         /* watch out for wraparound */
1986         start_vaddr = end_vaddr;
1987         if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
1988                 unsigned long end;
1989
1990                 ret = mmap_read_lock_killable(mm);
1991                 if (ret)
1992                         goto out_free;
1993                 start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
1994                 mmap_read_unlock(mm);
1995
1996                 end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
1997                 if (end >= start_vaddr && end < mm->task_size)
1998                         end_vaddr = end;
1999         }
2000
2001         /* Ensure the address is inside the task */
2002         if (start_vaddr > mm->task_size)
2003                 start_vaddr = end_vaddr;
2004
2005         ret = 0;
2006         while (count && (start_vaddr < end_vaddr)) {
2007                 int len;
2008                 unsigned long end;
2009
2010                 pm.pos = 0;
2011                 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2012                 /* overflow ? */
2013                 if (end < start_vaddr || end > end_vaddr)
2014                         end = end_vaddr;
2015                 ret = mmap_read_lock_killable(mm);
2016                 if (ret)
2017                         goto out_free;
2018                 ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
2019                 mmap_read_unlock(mm);
2020                 start_vaddr = end;
2021
2022                 len = min(count, PM_ENTRY_BYTES * pm.pos);
2023                 if (copy_to_user(buf, pm.buffer, len)) {
2024                         ret = -EFAULT;
2025                         goto out_free;
2026                 }
2027                 copied += len;
2028                 buf += len;
2029                 count -= len;
2030         }
2031         *ppos += copied;
2032         if (!ret || ret == PM_END_OF_BUFFER)
2033                 ret = copied;
2034
2035 out_free:
2036         kfree(pm.buffer);
2037 out_mm:
2038         mmput(mm);
2039 out:
2040         return ret;
2041 }
2042
2043 static int pagemap_open(struct inode *inode, struct file *file)
2044 {
2045         struct mm_struct *mm;
2046
2047         mm = proc_mem_open(inode, PTRACE_MODE_READ);
2048         if (IS_ERR(mm))
2049                 return PTR_ERR(mm);
2050         file->private_data = mm;
2051         return 0;
2052 }
2053
2054 static int pagemap_release(struct inode *inode, struct file *file)
2055 {
2056         struct mm_struct *mm = file->private_data;
2057
2058         if (mm)
2059                 mmdrop(mm);
2060         return 0;
2061 }
2062
2063 #define PM_SCAN_CATEGORIES      (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |  \
2064                                  PAGE_IS_FILE | PAGE_IS_PRESENT |       \
2065                                  PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |    \
2066                                  PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
2067 #define PM_SCAN_FLAGS           (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
2068
2069 struct pagemap_scan_private {
2070         struct pm_scan_arg arg;
2071         unsigned long masks_of_interest, cur_vma_category;
2072         struct page_region *vec_buf;
2073         unsigned long vec_buf_len, vec_buf_index, found_pages;
2074         struct page_region __user *vec_out;
2075 };
2076
2077 static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
2078                                            struct vm_area_struct *vma,
2079                                            unsigned long addr, pte_t pte)
2080 {
2081         unsigned long categories = 0;
2082
2083         if (pte_present(pte)) {
2084                 struct page *page;
2085
2086                 categories |= PAGE_IS_PRESENT;
2087                 if (!pte_uffd_wp(pte))
2088                         categories |= PAGE_IS_WRITTEN;
2089
2090                 if (p->masks_of_interest & PAGE_IS_FILE) {
2091                         page = vm_normal_page(vma, addr, pte);
2092                         if (page && !PageAnon(page))
2093                                 categories |= PAGE_IS_FILE;
2094                 }
2095
2096                 if (is_zero_pfn(pte_pfn(pte)))
2097                         categories |= PAGE_IS_PFNZERO;
2098                 if (pte_soft_dirty(pte))
2099                         categories |= PAGE_IS_SOFT_DIRTY;
2100         } else if (is_swap_pte(pte)) {
2101                 swp_entry_t swp;
2102
2103                 categories |= PAGE_IS_SWAPPED;
2104                 if (!pte_swp_uffd_wp_any(pte))
2105                         categories |= PAGE_IS_WRITTEN;
2106
2107                 if (p->masks_of_interest & PAGE_IS_FILE) {
2108                         swp = pte_to_swp_entry(pte);
2109                         if (is_pfn_swap_entry(swp) &&
2110                             !folio_test_anon(pfn_swap_entry_folio(swp)))
2111                                 categories |= PAGE_IS_FILE;
2112                 }
2113                 if (pte_swp_soft_dirty(pte))
2114                         categories |= PAGE_IS_SOFT_DIRTY;
2115         }
2116
2117         return categories;
2118 }
2119
2120 static void make_uffd_wp_pte(struct vm_area_struct *vma,
2121                              unsigned long addr, pte_t *pte, pte_t ptent)
2122 {
2123         if (pte_present(ptent)) {
2124                 pte_t old_pte;
2125
2126                 old_pte = ptep_modify_prot_start(vma, addr, pte);
2127                 ptent = pte_mkuffd_wp(old_pte);
2128                 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
2129         } else if (is_swap_pte(ptent)) {
2130                 ptent = pte_swp_mkuffd_wp(ptent);
2131                 set_pte_at(vma->vm_mm, addr, pte, ptent);
2132         } else {
2133                 set_pte_at(vma->vm_mm, addr, pte,
2134                            make_pte_marker(PTE_MARKER_UFFD_WP));
2135         }
2136 }
2137
2138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2139 static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
2140                                           struct vm_area_struct *vma,
2141                                           unsigned long addr, pmd_t pmd)
2142 {
2143         unsigned long categories = PAGE_IS_HUGE;
2144
2145         if (pmd_present(pmd)) {
2146                 struct page *page;
2147
2148                 categories |= PAGE_IS_PRESENT;
2149                 if (!pmd_uffd_wp(pmd))
2150                         categories |= PAGE_IS_WRITTEN;
2151
2152                 if (p->masks_of_interest & PAGE_IS_FILE) {
2153                         page = vm_normal_page_pmd(vma, addr, pmd);
2154                         if (page && !PageAnon(page))
2155                                 categories |= PAGE_IS_FILE;
2156                 }
2157
2158                 if (is_zero_pfn(pmd_pfn(pmd)))
2159                         categories |= PAGE_IS_PFNZERO;
2160                 if (pmd_soft_dirty(pmd))
2161                         categories |= PAGE_IS_SOFT_DIRTY;
2162         } else if (is_swap_pmd(pmd)) {
2163                 swp_entry_t swp;
2164
2165                 categories |= PAGE_IS_SWAPPED;
2166                 if (!pmd_swp_uffd_wp(pmd))
2167                         categories |= PAGE_IS_WRITTEN;
2168                 if (pmd_swp_soft_dirty(pmd))
2169                         categories |= PAGE_IS_SOFT_DIRTY;
2170
2171                 if (p->masks_of_interest & PAGE_IS_FILE) {
2172                         swp = pmd_to_swp_entry(pmd);
2173                         if (is_pfn_swap_entry(swp) &&
2174                             !folio_test_anon(pfn_swap_entry_folio(swp)))
2175                                 categories |= PAGE_IS_FILE;
2176                 }
2177         }
2178
2179         return categories;
2180 }
2181
2182 static void make_uffd_wp_pmd(struct vm_area_struct *vma,
2183                              unsigned long addr, pmd_t *pmdp)
2184 {
2185         pmd_t old, pmd = *pmdp;
2186
2187         if (pmd_present(pmd)) {
2188                 old = pmdp_invalidate_ad(vma, addr, pmdp);
2189                 pmd = pmd_mkuffd_wp(old);
2190                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2191         } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
2192                 pmd = pmd_swp_mkuffd_wp(pmd);
2193                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2194         }
2195 }
2196 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2197
2198 #ifdef CONFIG_HUGETLB_PAGE
2199 static unsigned long pagemap_hugetlb_category(pte_t pte)
2200 {
2201         unsigned long categories = PAGE_IS_HUGE;
2202
2203         /*
2204          * According to pagemap_hugetlb_range(), file-backed HugeTLB
2205          * page cannot be swapped. So PAGE_IS_FILE is not checked for
2206          * swapped pages.
2207          */
2208         if (pte_present(pte)) {
2209                 categories |= PAGE_IS_PRESENT;
2210                 if (!huge_pte_uffd_wp(pte))
2211                         categories |= PAGE_IS_WRITTEN;
2212                 if (!PageAnon(pte_page(pte)))
2213                         categories |= PAGE_IS_FILE;
2214                 if (is_zero_pfn(pte_pfn(pte)))
2215                         categories |= PAGE_IS_PFNZERO;
2216                 if (pte_soft_dirty(pte))
2217                         categories |= PAGE_IS_SOFT_DIRTY;
2218         } else if (is_swap_pte(pte)) {
2219                 categories |= PAGE_IS_SWAPPED;
2220                 if (!pte_swp_uffd_wp_any(pte))
2221                         categories |= PAGE_IS_WRITTEN;
2222                 if (pte_swp_soft_dirty(pte))
2223                         categories |= PAGE_IS_SOFT_DIRTY;
2224         }
2225
2226         return categories;
2227 }
2228
2229 static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
2230                                   unsigned long addr, pte_t *ptep,
2231                                   pte_t ptent)
2232 {
2233         unsigned long psize;
2234
2235         if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
2236                 return;
2237
2238         psize = huge_page_size(hstate_vma(vma));
2239
2240         if (is_hugetlb_entry_migration(ptent))
2241                 set_huge_pte_at(vma->vm_mm, addr, ptep,
2242                                 pte_swp_mkuffd_wp(ptent), psize);
2243         else if (!huge_pte_none(ptent))
2244                 huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
2245                                              huge_pte_mkuffd_wp(ptent));
2246         else
2247                 set_huge_pte_at(vma->vm_mm, addr, ptep,
2248                                 make_pte_marker(PTE_MARKER_UFFD_WP), psize);
2249 }
2250 #endif /* CONFIG_HUGETLB_PAGE */
2251
2252 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
2253 static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
2254                                        unsigned long addr, unsigned long end)
2255 {
2256         struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2257
2258         if (cur_buf->start != addr)
2259                 cur_buf->end = addr;
2260         else
2261                 cur_buf->start = cur_buf->end = 0;
2262
2263         p->found_pages -= (end - addr) / PAGE_SIZE;
2264 }
2265 #endif
2266
2267 static bool pagemap_scan_is_interesting_page(unsigned long categories,
2268                                              const struct pagemap_scan_private *p)
2269 {
2270         categories ^= p->arg.category_inverted;
2271         if ((categories & p->arg.category_mask) != p->arg.category_mask)
2272                 return false;
2273         if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
2274                 return false;
2275
2276         return true;
2277 }
2278
2279 static bool pagemap_scan_is_interesting_vma(unsigned long categories,
2280                                             const struct pagemap_scan_private *p)
2281 {
2282         unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
2283
2284         categories ^= p->arg.category_inverted;
2285         if ((categories & required) != required)
2286                 return false;
2287
2288         return true;
2289 }
2290
2291 static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2292                                   struct mm_walk *walk)
2293 {
2294         struct pagemap_scan_private *p = walk->private;
2295         struct vm_area_struct *vma = walk->vma;
2296         unsigned long vma_category = 0;
2297         bool wp_allowed = userfaultfd_wp_async(vma) &&
2298             userfaultfd_wp_use_markers(vma);
2299
2300         if (!wp_allowed) {
2301                 /* User requested explicit failure over wp-async capability */
2302                 if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2303                         return -EPERM;
2304                 /*
2305                  * User requires wr-protect, and allows silently skipping
2306                  * unsupported vmas.
2307                  */
2308                 if (p->arg.flags & PM_SCAN_WP_MATCHING)
2309                         return 1;
2310                 /*
2311                  * Then the request doesn't involve wr-protects at all,
2312                  * fall through to the rest checks, and allow vma walk.
2313                  */
2314         }
2315
2316         if (vma->vm_flags & VM_PFNMAP)
2317                 return 1;
2318
2319         if (wp_allowed)
2320                 vma_category |= PAGE_IS_WPALLOWED;
2321
2322         if (vma->vm_flags & VM_SOFTDIRTY)
2323                 vma_category |= PAGE_IS_SOFT_DIRTY;
2324
2325         if (!pagemap_scan_is_interesting_vma(vma_category, p))
2326                 return 1;
2327
2328         p->cur_vma_category = vma_category;
2329
2330         return 0;
2331 }
2332
2333 static bool pagemap_scan_push_range(unsigned long categories,
2334                                     struct pagemap_scan_private *p,
2335                                     unsigned long addr, unsigned long end)
2336 {
2337         struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2338
2339         /*
2340          * When there is no output buffer provided at all, the sentinel values
2341          * won't match here. There is no other way for `cur_buf->end` to be
2342          * non-zero other than it being non-empty.
2343          */
2344         if (addr == cur_buf->end && categories == cur_buf->categories) {
2345                 cur_buf->end = end;
2346                 return true;
2347         }
2348
2349         if (cur_buf->end) {
2350                 if (p->vec_buf_index >= p->vec_buf_len - 1)
2351                         return false;
2352
2353                 cur_buf = &p->vec_buf[++p->vec_buf_index];
2354         }
2355
2356         cur_buf->start = addr;
2357         cur_buf->end = end;
2358         cur_buf->categories = categories;
2359
2360         return true;
2361 }
2362
2363 static int pagemap_scan_output(unsigned long categories,
2364                                struct pagemap_scan_private *p,
2365                                unsigned long addr, unsigned long *end)
2366 {
2367         unsigned long n_pages, total_pages;
2368         int ret = 0;
2369
2370         if (!p->vec_buf)
2371                 return 0;
2372
2373         categories &= p->arg.return_mask;
2374
2375         n_pages = (*end - addr) / PAGE_SIZE;
2376         if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2377             total_pages > p->arg.max_pages) {
2378                 size_t n_too_much = total_pages - p->arg.max_pages;
2379                 *end -= n_too_much * PAGE_SIZE;
2380                 n_pages -= n_too_much;
2381                 ret = -ENOSPC;
2382         }
2383
2384         if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2385                 *end = addr;
2386                 n_pages = 0;
2387                 ret = -ENOSPC;
2388         }
2389
2390         p->found_pages += n_pages;
2391         if (ret)
2392                 p->arg.walk_end = *end;
2393
2394         return ret;
2395 }
2396
2397 static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2398                                   unsigned long end, struct mm_walk *walk)
2399 {
2400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2401         struct pagemap_scan_private *p = walk->private;
2402         struct vm_area_struct *vma = walk->vma;
2403         unsigned long categories;
2404         spinlock_t *ptl;
2405         int ret = 0;
2406
2407         ptl = pmd_trans_huge_lock(pmd, vma);
2408         if (!ptl)
2409                 return -ENOENT;
2410
2411         categories = p->cur_vma_category |
2412                      pagemap_thp_category(p, vma, start, *pmd);
2413
2414         if (!pagemap_scan_is_interesting_page(categories, p))
2415                 goto out_unlock;
2416
2417         ret = pagemap_scan_output(categories, p, start, &end);
2418         if (start == end)
2419                 goto out_unlock;
2420
2421         if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2422                 goto out_unlock;
2423         if (~categories & PAGE_IS_WRITTEN)
2424                 goto out_unlock;
2425
2426         /*
2427          * Break huge page into small pages if the WP operation
2428          * needs to be performed on a portion of the huge page.
2429          */
2430         if (end != start + HPAGE_SIZE) {
2431                 spin_unlock(ptl);
2432                 split_huge_pmd(vma, pmd, start);
2433                 pagemap_scan_backout_range(p, start, end);
2434                 /* Report as if there was no THP */
2435                 return -ENOENT;
2436         }
2437
2438         make_uffd_wp_pmd(vma, start, pmd);
2439         flush_tlb_range(vma, start, end);
2440 out_unlock:
2441         spin_unlock(ptl);
2442         return ret;
2443 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2444         return -ENOENT;
2445 #endif
2446 }
2447
2448 static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2449                                   unsigned long end, struct mm_walk *walk)
2450 {
2451         struct pagemap_scan_private *p = walk->private;
2452         struct vm_area_struct *vma = walk->vma;
2453         unsigned long addr, flush_end = 0;
2454         pte_t *pte, *start_pte;
2455         spinlock_t *ptl;
2456         int ret;
2457
2458         arch_enter_lazy_mmu_mode();
2459
2460         ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2461         if (ret != -ENOENT) {
2462                 arch_leave_lazy_mmu_mode();
2463                 return ret;
2464         }
2465
2466         ret = 0;
2467         start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2468         if (!pte) {
2469                 arch_leave_lazy_mmu_mode();
2470                 walk->action = ACTION_AGAIN;
2471                 return 0;
2472         }
2473
2474         if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2475                 /* Fast path for performing exclusive WP */
2476                 for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2477                         pte_t ptent = ptep_get(pte);
2478
2479                         if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2480                             pte_swp_uffd_wp_any(ptent))
2481                                 continue;
2482                         make_uffd_wp_pte(vma, addr, pte, ptent);
2483                         if (!flush_end)
2484                                 start = addr;
2485                         flush_end = addr + PAGE_SIZE;
2486                 }
2487                 goto flush_and_return;
2488         }
2489
2490         if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2491             p->arg.category_mask == PAGE_IS_WRITTEN &&
2492             p->arg.return_mask == PAGE_IS_WRITTEN) {
2493                 for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2494                         unsigned long next = addr + PAGE_SIZE;
2495                         pte_t ptent = ptep_get(pte);
2496
2497                         if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2498                             pte_swp_uffd_wp_any(ptent))
2499                                 continue;
2500                         ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2501                                                   p, addr, &next);
2502                         if (next == addr)
2503                                 break;
2504                         if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2505                                 continue;
2506                         make_uffd_wp_pte(vma, addr, pte, ptent);
2507                         if (!flush_end)
2508                                 start = addr;
2509                         flush_end = next;
2510                 }
2511                 goto flush_and_return;
2512         }
2513
2514         for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2515                 pte_t ptent = ptep_get(pte);
2516                 unsigned long categories = p->cur_vma_category |
2517                                            pagemap_page_category(p, vma, addr, ptent);
2518                 unsigned long next = addr + PAGE_SIZE;
2519
2520                 if (!pagemap_scan_is_interesting_page(categories, p))
2521                         continue;
2522
2523                 ret = pagemap_scan_output(categories, p, addr, &next);
2524                 if (next == addr)
2525                         break;
2526
2527                 if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2528                         continue;
2529                 if (~categories & PAGE_IS_WRITTEN)
2530                         continue;
2531
2532                 make_uffd_wp_pte(vma, addr, pte, ptent);
2533                 if (!flush_end)
2534                         start = addr;
2535                 flush_end = next;
2536         }
2537
2538 flush_and_return:
2539         if (flush_end)
2540                 flush_tlb_range(vma, start, addr);
2541
2542         pte_unmap_unlock(start_pte, ptl);
2543         arch_leave_lazy_mmu_mode();
2544
2545         cond_resched();
2546         return ret;
2547 }
2548
2549 #ifdef CONFIG_HUGETLB_PAGE
2550 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2551                                       unsigned long start, unsigned long end,
2552                                       struct mm_walk *walk)
2553 {
2554         struct pagemap_scan_private *p = walk->private;
2555         struct vm_area_struct *vma = walk->vma;
2556         unsigned long categories;
2557         spinlock_t *ptl;
2558         int ret = 0;
2559         pte_t pte;
2560
2561         if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2562                 /* Go the short route when not write-protecting pages. */
2563
2564                 pte = huge_ptep_get(walk->mm, start, ptep);
2565                 categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2566
2567                 if (!pagemap_scan_is_interesting_page(categories, p))
2568                         return 0;
2569
2570                 return pagemap_scan_output(categories, p, start, &end);
2571         }
2572
2573         i_mmap_lock_write(vma->vm_file->f_mapping);
2574         ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2575
2576         pte = huge_ptep_get(walk->mm, start, ptep);
2577         categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2578
2579         if (!pagemap_scan_is_interesting_page(categories, p))
2580                 goto out_unlock;
2581
2582         ret = pagemap_scan_output(categories, p, start, &end);
2583         if (start == end)
2584                 goto out_unlock;
2585
2586         if (~categories & PAGE_IS_WRITTEN)
2587                 goto out_unlock;
2588
2589         if (end != start + HPAGE_SIZE) {
2590                 /* Partial HugeTLB page WP isn't possible. */
2591                 pagemap_scan_backout_range(p, start, end);
2592                 p->arg.walk_end = start;
2593                 ret = 0;
2594                 goto out_unlock;
2595         }
2596
2597         make_uffd_wp_huge_pte(vma, start, ptep, pte);
2598         flush_hugetlb_tlb_range(vma, start, end);
2599
2600 out_unlock:
2601         spin_unlock(ptl);
2602         i_mmap_unlock_write(vma->vm_file->f_mapping);
2603
2604         return ret;
2605 }
2606 #else
2607 #define pagemap_scan_hugetlb_entry NULL
2608 #endif
2609
2610 static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2611                                  int depth, struct mm_walk *walk)
2612 {
2613         struct pagemap_scan_private *p = walk->private;
2614         struct vm_area_struct *vma = walk->vma;
2615         int ret, err;
2616
2617         if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2618                 return 0;
2619
2620         ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2621         if (addr == end)
2622                 return ret;
2623
2624         if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2625                 return ret;
2626
2627         err = uffd_wp_range(vma, addr, end - addr, true);
2628         if (err < 0)
2629                 ret = err;
2630
2631         return ret;
2632 }
2633
2634 static const struct mm_walk_ops pagemap_scan_ops = {
2635         .test_walk = pagemap_scan_test_walk,
2636         .pmd_entry = pagemap_scan_pmd_entry,
2637         .pte_hole = pagemap_scan_pte_hole,
2638         .hugetlb_entry = pagemap_scan_hugetlb_entry,
2639 };
2640
2641 static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2642                                  unsigned long uarg)
2643 {
2644         if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2645                 return -EFAULT;
2646
2647         if (arg->size != sizeof(struct pm_scan_arg))
2648                 return -EINVAL;
2649
2650         /* Validate requested features */
2651         if (arg->flags & ~PM_SCAN_FLAGS)
2652                 return -EINVAL;
2653         if ((arg->category_inverted | arg->category_mask |
2654              arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2655                 return -EINVAL;
2656
2657         arg->start = untagged_addr((unsigned long)arg->start);
2658         arg->end = untagged_addr((unsigned long)arg->end);
2659         arg->vec = untagged_addr((unsigned long)arg->vec);
2660
2661         /* Validate memory pointers */
2662         if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2663                 return -EINVAL;
2664         if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2665                 return -EFAULT;
2666         if (!arg->vec && arg->vec_len)
2667                 return -EINVAL;
2668         if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
2669                 return -EINVAL;
2670         if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2671                                    size_mul(arg->vec_len, sizeof(struct page_region))))
2672                 return -EFAULT;
2673
2674         /* Fixup default values */
2675         arg->end = ALIGN(arg->end, PAGE_SIZE);
2676         arg->walk_end = 0;
2677         if (!arg->max_pages)
2678                 arg->max_pages = ULONG_MAX;
2679
2680         return 0;
2681 }
2682
2683 static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2684                                        unsigned long uargl)
2685 {
2686         struct pm_scan_arg __user *uarg = (void __user *)uargl;
2687
2688         if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2689                 return -EFAULT;
2690
2691         return 0;
2692 }
2693
2694 static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2695 {
2696         if (!p->arg.vec_len)
2697                 return 0;
2698
2699         p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2700                                p->arg.vec_len);
2701         p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2702                                    GFP_KERNEL);
2703         if (!p->vec_buf)
2704                 return -ENOMEM;
2705
2706         p->vec_buf->start = p->vec_buf->end = 0;
2707         p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2708
2709         return 0;
2710 }
2711
2712 static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2713 {
2714         const struct page_region *buf = p->vec_buf;
2715         long n = p->vec_buf_index;
2716
2717         if (!p->vec_buf)
2718                 return 0;
2719
2720         if (buf[n].end != buf[n].start)
2721                 n++;
2722
2723         if (!n)
2724                 return 0;
2725
2726         if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2727                 return -EFAULT;
2728
2729         p->arg.vec_len -= n;
2730         p->vec_out += n;
2731
2732         p->vec_buf_index = 0;
2733         p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2734         p->vec_buf->start = p->vec_buf->end = 0;
2735
2736         return n;
2737 }
2738
2739 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2740 {
2741         struct pagemap_scan_private p = {0};
2742         unsigned long walk_start;
2743         size_t n_ranges_out = 0;
2744         int ret;
2745
2746         ret = pagemap_scan_get_args(&p.arg, uarg);
2747         if (ret)
2748                 return ret;
2749
2750         p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2751                               p.arg.return_mask;
2752         ret = pagemap_scan_init_bounce_buffer(&p);
2753         if (ret)
2754                 return ret;
2755
2756         for (walk_start = p.arg.start; walk_start < p.arg.end;
2757                         walk_start = p.arg.walk_end) {
2758                 struct mmu_notifier_range range;
2759                 long n_out;
2760
2761                 if (fatal_signal_pending(current)) {
2762                         ret = -EINTR;
2763                         break;
2764                 }
2765
2766                 ret = mmap_read_lock_killable(mm);
2767                 if (ret)
2768                         break;
2769
2770                 /* Protection change for the range is going to happen. */
2771                 if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2772                         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2773                                                 mm, walk_start, p.arg.end);
2774                         mmu_notifier_invalidate_range_start(&range);
2775                 }
2776
2777                 ret = walk_page_range(mm, walk_start, p.arg.end,
2778                                       &pagemap_scan_ops, &p);
2779
2780                 if (p.arg.flags & PM_SCAN_WP_MATCHING)
2781                         mmu_notifier_invalidate_range_end(&range);
2782
2783                 mmap_read_unlock(mm);
2784
2785                 n_out = pagemap_scan_flush_buffer(&p);
2786                 if (n_out < 0)
2787                         ret = n_out;
2788                 else
2789                         n_ranges_out += n_out;
2790
2791                 if (ret != -ENOSPC)
2792                         break;
2793
2794                 if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2795                         break;
2796         }
2797
2798         /* ENOSPC signifies early stop (buffer full) from the walk. */
2799         if (!ret || ret == -ENOSPC)
2800                 ret = n_ranges_out;
2801
2802         /* The walk_end isn't set when ret is zero */
2803         if (!p.arg.walk_end)
2804                 p.arg.walk_end = p.arg.end;
2805         if (pagemap_scan_writeback_args(&p.arg, uarg))
2806                 ret = -EFAULT;
2807
2808         kfree(p.vec_buf);
2809         return ret;
2810 }
2811
2812 static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2813                            unsigned long arg)
2814 {
2815         struct mm_struct *mm = file->private_data;
2816
2817         switch (cmd) {
2818         case PAGEMAP_SCAN:
2819                 return do_pagemap_scan(mm, arg);
2820
2821         default:
2822                 return -EINVAL;
2823         }
2824 }
2825
2826 const struct file_operations proc_pagemap_operations = {
2827         .llseek         = mem_lseek, /* borrow this */
2828         .read           = pagemap_read,
2829         .open           = pagemap_open,
2830         .release        = pagemap_release,
2831         .unlocked_ioctl = do_pagemap_cmd,
2832         .compat_ioctl   = do_pagemap_cmd,
2833 };
2834 #endif /* CONFIG_PROC_PAGE_MONITOR */
2835
2836 #ifdef CONFIG_NUMA
2837
2838 struct numa_maps {
2839         unsigned long pages;
2840         unsigned long anon;
2841         unsigned long active;
2842         unsigned long writeback;
2843         unsigned long mapcount_max;
2844         unsigned long dirty;
2845         unsigned long swapcache;
2846         unsigned long node[MAX_NUMNODES];
2847 };
2848
2849 struct numa_maps_private {
2850         struct proc_maps_private proc_maps;
2851         struct numa_maps md;
2852 };
2853
2854 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2855                         unsigned long nr_pages)
2856 {
2857         struct folio *folio = page_folio(page);
2858         int count = folio_precise_page_mapcount(folio, page);
2859
2860         md->pages += nr_pages;
2861         if (pte_dirty || folio_test_dirty(folio))
2862                 md->dirty += nr_pages;
2863
2864         if (folio_test_swapcache(folio))
2865                 md->swapcache += nr_pages;
2866
2867         if (folio_test_active(folio) || folio_test_unevictable(folio))
2868                 md->active += nr_pages;
2869
2870         if (folio_test_writeback(folio))
2871                 md->writeback += nr_pages;
2872
2873         if (folio_test_anon(folio))
2874                 md->anon += nr_pages;
2875
2876         if (count > md->mapcount_max)
2877                 md->mapcount_max = count;
2878
2879         md->node[folio_nid(folio)] += nr_pages;
2880 }
2881
2882 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2883                 unsigned long addr)
2884 {
2885         struct page *page;
2886         int nid;
2887
2888         if (!pte_present(pte))
2889                 return NULL;
2890
2891         page = vm_normal_page(vma, addr, pte);
2892         if (!page || is_zone_device_page(page))
2893                 return NULL;
2894
2895         if (PageReserved(page))
2896                 return NULL;
2897
2898         nid = page_to_nid(page);
2899         if (!node_isset(nid, node_states[N_MEMORY]))
2900                 return NULL;
2901
2902         return page;
2903 }
2904
2905 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2906 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2907                                               struct vm_area_struct *vma,
2908                                               unsigned long addr)
2909 {
2910         struct page *page;
2911         int nid;
2912
2913         if (!pmd_present(pmd))
2914                 return NULL;
2915
2916         page = vm_normal_page_pmd(vma, addr, pmd);
2917         if (!page)
2918                 return NULL;
2919
2920         if (PageReserved(page))
2921                 return NULL;
2922
2923         nid = page_to_nid(page);
2924         if (!node_isset(nid, node_states[N_MEMORY]))
2925                 return NULL;
2926
2927         return page;
2928 }
2929 #endif
2930
2931 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2932                 unsigned long end, struct mm_walk *walk)
2933 {
2934         struct numa_maps *md = walk->private;
2935         struct vm_area_struct *vma = walk->vma;
2936         spinlock_t *ptl;
2937         pte_t *orig_pte;
2938         pte_t *pte;
2939
2940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2941         ptl = pmd_trans_huge_lock(pmd, vma);
2942         if (ptl) {
2943                 struct page *page;
2944
2945                 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2946                 if (page)
2947                         gather_stats(page, md, pmd_dirty(*pmd),
2948                                      HPAGE_PMD_SIZE/PAGE_SIZE);
2949                 spin_unlock(ptl);
2950                 return 0;
2951         }
2952 #endif
2953         orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2954         if (!pte) {
2955                 walk->action = ACTION_AGAIN;
2956                 return 0;
2957         }
2958         do {
2959                 pte_t ptent = ptep_get(pte);
2960                 struct page *page = can_gather_numa_stats(ptent, vma, addr);
2961                 if (!page)
2962                         continue;
2963                 gather_stats(page, md, pte_dirty(ptent), 1);
2964
2965         } while (pte++, addr += PAGE_SIZE, addr != end);
2966         pte_unmap_unlock(orig_pte, ptl);
2967         cond_resched();
2968         return 0;
2969 }
2970 #ifdef CONFIG_HUGETLB_PAGE
2971 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2972                 unsigned long addr, unsigned long end, struct mm_walk *walk)
2973 {
2974         pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
2975         struct numa_maps *md;
2976         struct page *page;
2977
2978         if (!pte_present(huge_pte))
2979                 return 0;
2980
2981         page = pte_page(huge_pte);
2982
2983         md = walk->private;
2984         gather_stats(page, md, pte_dirty(huge_pte), 1);
2985         return 0;
2986 }
2987
2988 #else
2989 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2990                 unsigned long addr, unsigned long end, struct mm_walk *walk)
2991 {
2992         return 0;
2993 }
2994 #endif
2995
2996 static const struct mm_walk_ops show_numa_ops = {
2997         .hugetlb_entry = gather_hugetlb_stats,
2998         .pmd_entry = gather_pte_stats,
2999         .walk_lock = PGWALK_RDLOCK,
3000 };
3001
3002 /*
3003  * Display pages allocated per node and memory policy via /proc.
3004  */
3005 static int show_numa_map(struct seq_file *m, void *v)
3006 {
3007         struct numa_maps_private *numa_priv = m->private;
3008         struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
3009         struct vm_area_struct *vma = v;
3010         struct numa_maps *md = &numa_priv->md;
3011         struct file *file = vma->vm_file;
3012         struct mm_struct *mm = vma->vm_mm;
3013         char buffer[64];
3014         struct mempolicy *pol;
3015         pgoff_t ilx;
3016         int nid;
3017
3018         if (!mm)
3019                 return 0;
3020
3021         /* Ensure we start with an empty set of numa_maps statistics. */
3022         memset(md, 0, sizeof(*md));
3023
3024         pol = __get_vma_policy(vma, vma->vm_start, &ilx);
3025         if (pol) {
3026                 mpol_to_str(buffer, sizeof(buffer), pol);
3027                 mpol_cond_put(pol);
3028         } else {
3029                 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
3030         }
3031
3032         seq_printf(m, "%08lx %s", vma->vm_start, buffer);
3033
3034         if (file) {
3035                 seq_puts(m, " file=");
3036                 seq_path(m, file_user_path(file), "\n\t= ");
3037         } else if (vma_is_initial_heap(vma)) {
3038                 seq_puts(m, " heap");
3039         } else if (vma_is_initial_stack(vma)) {
3040                 seq_puts(m, " stack");
3041         }
3042
3043         if (is_vm_hugetlb_page(vma))
3044                 seq_puts(m, " huge");
3045
3046         /* mmap_lock is held by m_start */
3047         walk_page_vma(vma, &show_numa_ops, md);
3048
3049         if (!md->pages)
3050                 goto out;
3051
3052         if (md->anon)
3053                 seq_printf(m, " anon=%lu", md->anon);
3054
3055         if (md->dirty)
3056                 seq_printf(m, " dirty=%lu", md->dirty);
3057
3058         if (md->pages != md->anon && md->pages != md->dirty)
3059                 seq_printf(m, " mapped=%lu", md->pages);
3060
3061         if (md->mapcount_max > 1)
3062                 seq_printf(m, " mapmax=%lu", md->mapcount_max);
3063
3064         if (md->swapcache)
3065                 seq_printf(m, " swapcache=%lu", md->swapcache);
3066
3067         if (md->active < md->pages && !is_vm_hugetlb_page(vma))
3068                 seq_printf(m, " active=%lu", md->active);
3069
3070         if (md->writeback)
3071                 seq_printf(m, " writeback=%lu", md->writeback);
3072
3073         for_each_node_state(nid, N_MEMORY)
3074                 if (md->node[nid])
3075                         seq_printf(m, " N%d=%lu", nid, md->node[nid]);
3076
3077         seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
3078 out:
3079         seq_putc(m, '\n');
3080         return 0;
3081 }
3082
3083 static const struct seq_operations proc_pid_numa_maps_op = {
3084         .start  = m_start,
3085         .next   = m_next,
3086         .stop   = m_stop,
3087         .show   = show_numa_map,
3088 };
3089
3090 static int pid_numa_maps_open(struct inode *inode, struct file *file)
3091 {
3092         return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
3093                                 sizeof(struct numa_maps_private));
3094 }
3095
3096 const struct file_operations proc_pid_numa_maps_operations = {
3097         .open           = pid_numa_maps_open,
3098         .read           = seq_read,
3099         .llseek         = seq_lseek,
3100         .release        = proc_map_release,
3101 };
3102
3103 #endif /* CONFIG_NUMA */
This page took 0.212427 seconds and 4 git commands to generate.