]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | |
2 | #include <linux/mm.h> | |
3 | #include <linux/file.h> | |
eb28062f | 4 | #include <linux/fdtable.h> |
5ad4e53b | 5 | #include <linux/fs_struct.h> |
1da177e4 | 6 | #include <linux/mount.h> |
5096add8 | 7 | #include <linux/ptrace.h> |
5a0e3ad6 | 8 | #include <linux/slab.h> |
1da177e4 LT |
9 | #include <linux/seq_file.h> |
10 | #include "internal.h" | |
11 | ||
12 | /* | |
13 | * Logic: we've got two memory sums for each process, "shared", and | |
025dfdaf | 14 | * "non-shared". Shared memory may get counted more than once, for |
1da177e4 LT |
15 | * each process that owns it. Non-shared memory is counted |
16 | * accurately. | |
17 | */ | |
df5f8314 | 18 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
1da177e4 | 19 | { |
8feae131 | 20 | struct vm_area_struct *vma; |
38f71479 | 21 | struct vm_region *region; |
8feae131 | 22 | struct rb_node *p; |
38f71479 | 23 | unsigned long bytes = 0, sbytes = 0, slack = 0, size; |
1da177e4 LT |
24 | |
25 | down_read(&mm->mmap_sem); | |
8feae131 DH |
26 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
27 | vma = rb_entry(p, struct vm_area_struct, vm_rb); | |
1da177e4 | 28 | |
8feae131 | 29 | bytes += kobjsize(vma); |
38f71479 DH |
30 | |
31 | region = vma->vm_region; | |
32 | if (region) { | |
33 | size = kobjsize(region); | |
34 | size += region->vm_end - region->vm_start; | |
35 | } else { | |
36 | size = vma->vm_end - vma->vm_start; | |
37 | } | |
38 | ||
1da177e4 | 39 | if (atomic_read(&mm->mm_count) > 1 || |
8feae131 | 40 | vma->vm_flags & VM_MAYSHARE) { |
38f71479 | 41 | sbytes += size; |
1da177e4 | 42 | } else { |
38f71479 DH |
43 | bytes += size; |
44 | if (region) | |
45 | slack = region->vm_end - vma->vm_end; | |
1da177e4 LT |
46 | } |
47 | } | |
48 | ||
49 | if (atomic_read(&mm->mm_count) > 1) | |
50 | sbytes += kobjsize(mm); | |
51 | else | |
52 | bytes += kobjsize(mm); | |
53 | ||
498052bb | 54 | if (current->fs && current->fs->users > 1) |
1da177e4 LT |
55 | sbytes += kobjsize(current->fs); |
56 | else | |
57 | bytes += kobjsize(current->fs); | |
58 | ||
59 | if (current->files && atomic_read(¤t->files->count) > 1) | |
60 | sbytes += kobjsize(current->files); | |
61 | else | |
62 | bytes += kobjsize(current->files); | |
63 | ||
64 | if (current->sighand && atomic_read(¤t->sighand->count) > 1) | |
65 | sbytes += kobjsize(current->sighand); | |
66 | else | |
67 | bytes += kobjsize(current->sighand); | |
68 | ||
69 | bytes += kobjsize(current); /* includes kernel stack */ | |
70 | ||
df5f8314 | 71 | seq_printf(m, |
1da177e4 LT |
72 | "Mem:\t%8lu bytes\n" |
73 | "Slack:\t%8lu bytes\n" | |
74 | "Shared:\t%8lu bytes\n", | |
75 | bytes, slack, sbytes); | |
76 | ||
77 | up_read(&mm->mmap_sem); | |
1da177e4 LT |
78 | } |
79 | ||
80 | unsigned long task_vsize(struct mm_struct *mm) | |
81 | { | |
8feae131 DH |
82 | struct vm_area_struct *vma; |
83 | struct rb_node *p; | |
1da177e4 LT |
84 | unsigned long vsize = 0; |
85 | ||
86 | down_read(&mm->mmap_sem); | |
8feae131 DH |
87 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
88 | vma = rb_entry(p, struct vm_area_struct, vm_rb); | |
38f71479 | 89 | vsize += vma->vm_end - vma->vm_start; |
1da177e4 LT |
90 | } |
91 | up_read(&mm->mmap_sem); | |
92 | return vsize; | |
93 | } | |
94 | ||
a2ade7b6 AD |
95 | unsigned long task_statm(struct mm_struct *mm, |
96 | unsigned long *shared, unsigned long *text, | |
97 | unsigned long *data, unsigned long *resident) | |
1da177e4 | 98 | { |
8feae131 | 99 | struct vm_area_struct *vma; |
38f71479 | 100 | struct vm_region *region; |
8feae131 | 101 | struct rb_node *p; |
a2ade7b6 | 102 | unsigned long size = kobjsize(mm); |
1da177e4 LT |
103 | |
104 | down_read(&mm->mmap_sem); | |
8feae131 DH |
105 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
106 | vma = rb_entry(p, struct vm_area_struct, vm_rb); | |
107 | size += kobjsize(vma); | |
38f71479 DH |
108 | region = vma->vm_region; |
109 | if (region) { | |
110 | size += kobjsize(region); | |
111 | size += region->vm_end - region->vm_start; | |
112 | } | |
1da177e4 LT |
113 | } |
114 | ||
7e1e0ef2 SM |
115 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
116 | >> PAGE_SHIFT; | |
117 | *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) | |
118 | >> PAGE_SHIFT; | |
1da177e4 | 119 | up_read(&mm->mmap_sem); |
7e1e0ef2 SM |
120 | size >>= PAGE_SHIFT; |
121 | size += *text + *data; | |
1da177e4 LT |
122 | *resident = size; |
123 | return size; | |
124 | } | |
125 | ||
65376df5 | 126 | static int is_stack(struct proc_maps_private *priv, |
b18cb64e | 127 | struct vm_area_struct *vma) |
58cb6548 | 128 | { |
65376df5 | 129 | struct mm_struct *mm = vma->vm_mm; |
b18cb64e AL |
130 | |
131 | /* | |
132 | * We make no effort to guess what a given thread considers to be | |
133 | * its "stack". It's not even well-defined for programs written | |
134 | * languages like Go. | |
135 | */ | |
136 | return vma->vm_start <= mm->start_stack && | |
137 | vma->vm_end >= mm->start_stack; | |
58cb6548 ON |
138 | } |
139 | ||
8feae131 DH |
140 | /* |
141 | * display a single VMA to a sequenced file | |
142 | */ | |
b7643757 SP |
143 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, |
144 | int is_pid) | |
8feae131 | 145 | { |
3c26c9d9 | 146 | struct mm_struct *mm = vma->vm_mm; |
b7643757 | 147 | struct proc_maps_private *priv = m->private; |
8feae131 DH |
148 | unsigned long ino = 0; |
149 | struct file *file; | |
150 | dev_t dev = 0; | |
652586df | 151 | int flags; |
6260a4b0 | 152 | unsigned long long pgoff = 0; |
8feae131 DH |
153 | |
154 | flags = vma->vm_flags; | |
155 | file = vma->vm_file; | |
156 | ||
157 | if (file) { | |
496ad9aa | 158 | struct inode *inode = file_inode(vma->vm_file); |
8feae131 DH |
159 | dev = inode->i_sb->s_dev; |
160 | ino = inode->i_ino; | |
4c967291 | 161 | pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
8feae131 DH |
162 | } |
163 | ||
652586df | 164 | seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); |
8feae131 | 165 | seq_printf(m, |
652586df | 166 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", |
8feae131 DH |
167 | vma->vm_start, |
168 | vma->vm_end, | |
169 | flags & VM_READ ? 'r' : '-', | |
170 | flags & VM_WRITE ? 'w' : '-', | |
171 | flags & VM_EXEC ? 'x' : '-', | |
172 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | |
6260a4b0 | 173 | pgoff, |
652586df | 174 | MAJOR(dev), MINOR(dev), ino); |
8feae131 DH |
175 | |
176 | if (file) { | |
652586df | 177 | seq_pad(m, ' '); |
2726d566 | 178 | seq_file_path(m, file, ""); |
b18cb64e | 179 | } else if (mm && is_stack(priv, vma)) { |
65376df5 JW |
180 | seq_pad(m, ' '); |
181 | seq_printf(m, "[stack]"); | |
8feae131 DH |
182 | } |
183 | ||
184 | seq_putc(m, '\n'); | |
185 | return 0; | |
186 | } | |
187 | ||
1da177e4 | 188 | /* |
dbf8685c | 189 | * display mapping lines for a particular process's /proc/pid/maps |
1da177e4 | 190 | */ |
b7643757 | 191 | static int show_map(struct seq_file *m, void *_p, int is_pid) |
1da177e4 | 192 | { |
8feae131 | 193 | struct rb_node *p = _p; |
5096add8 | 194 | |
b7643757 SP |
195 | return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), |
196 | is_pid); | |
197 | } | |
198 | ||
199 | static int show_pid_map(struct seq_file *m, void *_p) | |
200 | { | |
201 | return show_map(m, _p, 1); | |
202 | } | |
203 | ||
204 | static int show_tid_map(struct seq_file *m, void *_p) | |
205 | { | |
206 | return show_map(m, _p, 0); | |
1da177e4 | 207 | } |
dbf8685c | 208 | |
1da177e4 LT |
209 | static void *m_start(struct seq_file *m, loff_t *pos) |
210 | { | |
dbf8685c | 211 | struct proc_maps_private *priv = m->private; |
dbf8685c | 212 | struct mm_struct *mm; |
8feae131 | 213 | struct rb_node *p; |
dbf8685c DH |
214 | loff_t n = *pos; |
215 | ||
216 | /* pin the task and mm whilst we play with them */ | |
2c03376d | 217 | priv->task = get_proc_task(priv->inode); |
dbf8685c | 218 | if (!priv->task) |
ec6fd8a4 | 219 | return ERR_PTR(-ESRCH); |
dbf8685c | 220 | |
27692cd5 | 221 | mm = priv->mm; |
47fecca1 | 222 | if (!mm || !atomic_inc_not_zero(&mm->mm_users)) |
27692cd5 | 223 | return NULL; |
dbf8685c | 224 | |
47fecca1 | 225 | down_read(&mm->mmap_sem); |
dbf8685c | 226 | /* start from the Nth VMA */ |
8feae131 | 227 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
dbf8685c | 228 | if (n-- == 0) |
8feae131 | 229 | return p; |
47fecca1 ON |
230 | |
231 | up_read(&mm->mmap_sem); | |
232 | mmput(mm); | |
1da177e4 LT |
233 | return NULL; |
234 | } | |
dbf8685c DH |
235 | |
236 | static void m_stop(struct seq_file *m, void *_vml) | |
1da177e4 | 237 | { |
dbf8685c DH |
238 | struct proc_maps_private *priv = m->private; |
239 | ||
47fecca1 ON |
240 | if (!IS_ERR_OR_NULL(_vml)) { |
241 | up_read(&priv->mm->mmap_sem); | |
242 | mmput(priv->mm); | |
243 | } | |
dbf8685c | 244 | if (priv->task) { |
dbf8685c | 245 | put_task_struct(priv->task); |
47fecca1 | 246 | priv->task = NULL; |
dbf8685c | 247 | } |
1da177e4 | 248 | } |
dbf8685c | 249 | |
8feae131 | 250 | static void *m_next(struct seq_file *m, void *_p, loff_t *pos) |
1da177e4 | 251 | { |
8feae131 | 252 | struct rb_node *p = _p; |
dbf8685c DH |
253 | |
254 | (*pos)++; | |
8feae131 | 255 | return p ? rb_next(p) : NULL; |
1da177e4 | 256 | } |
dbf8685c | 257 | |
03a44825 | 258 | static const struct seq_operations proc_pid_maps_ops = { |
1da177e4 LT |
259 | .start = m_start, |
260 | .next = m_next, | |
261 | .stop = m_stop, | |
b7643757 SP |
262 | .show = show_pid_map |
263 | }; | |
264 | ||
265 | static const struct seq_operations proc_tid_maps_ops = { | |
266 | .start = m_start, | |
267 | .next = m_next, | |
268 | .stop = m_stop, | |
269 | .show = show_tid_map | |
1da177e4 | 270 | }; |
662795de | 271 | |
b7643757 SP |
272 | static int maps_open(struct inode *inode, struct file *file, |
273 | const struct seq_operations *ops) | |
662795de | 274 | { |
dbf8685c | 275 | struct proc_maps_private *priv; |
ce34fddb | 276 | |
27692cd5 | 277 | priv = __seq_open_private(file, ops, sizeof(*priv)); |
ce34fddb ON |
278 | if (!priv) |
279 | return -ENOMEM; | |
280 | ||
2c03376d | 281 | priv->inode = inode; |
27692cd5 ON |
282 | priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); |
283 | if (IS_ERR(priv->mm)) { | |
284 | int err = PTR_ERR(priv->mm); | |
285 | ||
286 | seq_release_private(inode, file); | |
287 | return err; | |
288 | } | |
289 | ||
ce34fddb | 290 | return 0; |
662795de EB |
291 | } |
292 | ||
27692cd5 ON |
293 | |
294 | static int map_release(struct inode *inode, struct file *file) | |
295 | { | |
296 | struct seq_file *seq = file->private_data; | |
297 | struct proc_maps_private *priv = seq->private; | |
298 | ||
299 | if (priv->mm) | |
300 | mmdrop(priv->mm); | |
301 | ||
302 | return seq_release_private(inode, file); | |
303 | } | |
304 | ||
b7643757 SP |
305 | static int pid_maps_open(struct inode *inode, struct file *file) |
306 | { | |
307 | return maps_open(inode, file, &proc_pid_maps_ops); | |
308 | } | |
309 | ||
310 | static int tid_maps_open(struct inode *inode, struct file *file) | |
311 | { | |
312 | return maps_open(inode, file, &proc_tid_maps_ops); | |
313 | } | |
314 | ||
315 | const struct file_operations proc_pid_maps_operations = { | |
316 | .open = pid_maps_open, | |
317 | .read = seq_read, | |
318 | .llseek = seq_lseek, | |
27692cd5 | 319 | .release = map_release, |
b7643757 SP |
320 | }; |
321 | ||
322 | const struct file_operations proc_tid_maps_operations = { | |
323 | .open = tid_maps_open, | |
662795de EB |
324 | .read = seq_read, |
325 | .llseek = seq_lseek, | |
27692cd5 | 326 | .release = map_release, |
662795de EB |
327 | }; |
328 |