]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/shm.c | |
3 | * Copyright (C) 1992, 1993 Krishna Balasubramanian | |
4 | * Many improvements/fixes by Bruno Haible. | |
5 | * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. | |
6 | * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. | |
7 | * | |
8 | * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <[email protected]> | |
9 | * BIGMEM support, Andrea Arcangeli <[email protected]> | |
10 | * SMP thread shm, Jean-Luc Boyard <[email protected]> | |
11 | * HIGHMEM support, Ingo Molnar <[email protected]> | |
12 | * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <[email protected]> | |
13 | * Shared /dev/zero support, Kanoj Sarcar <[email protected]> | |
14 | * Move the mm functionality over to mm/shmem.c, Christoph Rohland <[email protected]> | |
15 | * | |
073115d6 SG |
16 | * support for audit of ipc object properties and permission changes |
17 | * Dustin Kirkland <[email protected]> | |
4e982311 KK |
18 | * |
19 | * namespaces support | |
20 | * OpenVZ, SWsoft Inc. | |
21 | * Pavel Emelianov <[email protected]> | |
1da177e4 LT |
22 | */ |
23 | ||
1da177e4 LT |
24 | #include <linux/slab.h> |
25 | #include <linux/mm.h> | |
26 | #include <linux/hugetlb.h> | |
27 | #include <linux/shm.h> | |
28 | #include <linux/init.h> | |
29 | #include <linux/file.h> | |
30 | #include <linux/mman.h> | |
1da177e4 LT |
31 | #include <linux/shmem_fs.h> |
32 | #include <linux/security.h> | |
33 | #include <linux/syscalls.h> | |
34 | #include <linux/audit.h> | |
c59ede7b | 35 | #include <linux/capability.h> |
7d87e14c | 36 | #include <linux/ptrace.h> |
19b4946c | 37 | #include <linux/seq_file.h> |
3e148c79 | 38 | #include <linux/rwsem.h> |
4e982311 | 39 | #include <linux/nsproxy.h> |
bc56bba8 | 40 | #include <linux/mount.h> |
ae5e1b22 | 41 | #include <linux/ipc_namespace.h> |
7d87e14c | 42 | |
1da177e4 LT |
43 | #include <asm/uaccess.h> |
44 | ||
45 | #include "util.h" | |
46 | ||
bc56bba8 EB |
47 | struct shm_file_data { |
48 | int id; | |
49 | struct ipc_namespace *ns; | |
50 | struct file *file; | |
51 | const struct vm_operations_struct *vm_ops; | |
52 | }; | |
53 | ||
54 | #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) | |
55 | ||
9a32144e | 56 | static const struct file_operations shm_file_operations; |
f0f37e2f | 57 | static const struct vm_operations_struct shm_vm_ops; |
1da177e4 | 58 | |
ed2ddbf8 | 59 | #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
1da177e4 | 60 | |
4e982311 KK |
61 | #define shm_unlock(shp) \ |
62 | ipc_unlock(&(shp)->shm_perm) | |
1da177e4 | 63 | |
7748dbfa | 64 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
bc56bba8 EB |
65 | static void shm_open(struct vm_area_struct *vma); |
66 | static void shm_close(struct vm_area_struct *vma); | |
4e982311 | 67 | static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); |
1da177e4 | 68 | #ifdef CONFIG_PROC_FS |
19b4946c | 69 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
1da177e4 LT |
70 | #endif |
71 | ||
ed2ddbf8 | 72 | void shm_init_ns(struct ipc_namespace *ns) |
4e982311 | 73 | { |
4e982311 KK |
74 | ns->shm_ctlmax = SHMMAX; |
75 | ns->shm_ctlall = SHMALL; | |
76 | ns->shm_ctlmni = SHMMNI; | |
77 | ns->shm_tot = 0; | |
e8148f75 | 78 | ipc_init_ids(&shm_ids(ns)); |
4e982311 KK |
79 | } |
80 | ||
f4566f04 | 81 | /* |
3e148c79 ND |
82 | * Called with shm_ids.rw_mutex (writer) and the shp structure locked. |
83 | * Only shm_ids.rw_mutex remains locked on exit. | |
f4566f04 | 84 | */ |
01b8b07a | 85 | static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
4e982311 | 86 | { |
01b8b07a PP |
87 | struct shmid_kernel *shp; |
88 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
89 | ||
4e982311 KK |
90 | if (shp->shm_nattch){ |
91 | shp->shm_perm.mode |= SHM_DEST; | |
92 | /* Do not find it any more */ | |
93 | shp->shm_perm.key = IPC_PRIVATE; | |
94 | shm_unlock(shp); | |
95 | } else | |
96 | shm_destroy(ns, shp); | |
97 | } | |
98 | ||
ae5e1b22 | 99 | #ifdef CONFIG_IPC_NS |
4e982311 KK |
100 | void shm_exit_ns(struct ipc_namespace *ns) |
101 | { | |
01b8b07a | 102 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
7d6feeb2 | 103 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
4e982311 | 104 | } |
ae5e1b22 | 105 | #endif |
1da177e4 LT |
106 | |
107 | void __init shm_init (void) | |
108 | { | |
ed2ddbf8 | 109 | shm_init_ns(&init_ipc_ns); |
19b4946c | 110 | ipc_init_proc_interface("sysvipc/shm", |
b7952180 HD |
111 | #if BITS_PER_LONG <= 32 |
112 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
113 | #else | |
114 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
115 | #endif | |
4e982311 | 116 | IPC_SHM_IDS, sysvipc_shm_proc_show); |
1da177e4 LT |
117 | } |
118 | ||
3e148c79 ND |
119 | /* |
120 | * shm_lock_(check_) routines are called in the paths where the rw_mutex | |
00c2bf85 | 121 | * is not necessarily held. |
3e148c79 | 122 | */ |
023a5355 | 123 | static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
1da177e4 | 124 | { |
03f02c76 ND |
125 | struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); |
126 | ||
b1ed88b4 PP |
127 | if (IS_ERR(ipcp)) |
128 | return (struct shmid_kernel *)ipcp; | |
129 | ||
03f02c76 | 130 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
023a5355 ND |
131 | } |
132 | ||
133 | static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, | |
134 | int id) | |
135 | { | |
03f02c76 ND |
136 | struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); |
137 | ||
b1ed88b4 PP |
138 | if (IS_ERR(ipcp)) |
139 | return (struct shmid_kernel *)ipcp; | |
140 | ||
03f02c76 | 141 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
1da177e4 LT |
142 | } |
143 | ||
7ca7e564 | 144 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
1da177e4 | 145 | { |
7ca7e564 | 146 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
1da177e4 LT |
147 | } |
148 | ||
1da177e4 | 149 | |
bc56bba8 EB |
150 | /* This is called by fork, once for every shm attach. */ |
151 | static void shm_open(struct vm_area_struct *vma) | |
4e982311 | 152 | { |
bc56bba8 EB |
153 | struct file *file = vma->vm_file; |
154 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 LT |
155 | struct shmid_kernel *shp; |
156 | ||
bc56bba8 | 157 | shp = shm_lock(sfd->ns, sfd->id); |
023a5355 | 158 | BUG_ON(IS_ERR(shp)); |
1da177e4 | 159 | shp->shm_atim = get_seconds(); |
b488893a | 160 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
161 | shp->shm_nattch++; |
162 | shm_unlock(shp); | |
163 | } | |
164 | ||
1da177e4 LT |
165 | /* |
166 | * shm_destroy - free the struct shmid_kernel | |
167 | * | |
f4566f04 | 168 | * @ns: namespace |
1da177e4 LT |
169 | * @shp: struct to free |
170 | * | |
3e148c79 | 171 | * It has to be called with shp and shm_ids.rw_mutex (writer) locked, |
1da177e4 LT |
172 | * but returns with shp unlocked and freed. |
173 | */ | |
4e982311 | 174 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
1da177e4 | 175 | { |
4e982311 | 176 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
7ca7e564 | 177 | shm_rmid(ns, shp); |
1da177e4 LT |
178 | shm_unlock(shp); |
179 | if (!is_file_hugepages(shp->shm_file)) | |
180 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | |
353d5c30 | 181 | else if (shp->mlock_user) |
6d63079a | 182 | user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, |
1da177e4 LT |
183 | shp->mlock_user); |
184 | fput (shp->shm_file); | |
185 | security_shm_free(shp); | |
186 | ipc_rcu_putref(shp); | |
187 | } | |
188 | ||
189 | /* | |
bc56bba8 | 190 | * remove the attach descriptor vma. |
1da177e4 LT |
191 | * free memory for segment if it is marked destroyed. |
192 | * The descriptor has already been removed from the current->mm->mmap list | |
193 | * and will later be kfree()d. | |
194 | */ | |
bc56bba8 | 195 | static void shm_close(struct vm_area_struct *vma) |
1da177e4 | 196 | { |
bc56bba8 EB |
197 | struct file * file = vma->vm_file; |
198 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 | 199 | struct shmid_kernel *shp; |
bc56bba8 | 200 | struct ipc_namespace *ns = sfd->ns; |
4e982311 | 201 | |
3e148c79 | 202 | down_write(&shm_ids(ns).rw_mutex); |
1da177e4 | 203 | /* remove from the list of attaches of the shm segment */ |
00c2bf85 | 204 | shp = shm_lock(ns, sfd->id); |
023a5355 | 205 | BUG_ON(IS_ERR(shp)); |
b488893a | 206 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
207 | shp->shm_dtim = get_seconds(); |
208 | shp->shm_nattch--; | |
209 | if(shp->shm_nattch == 0 && | |
b33291c0 | 210 | shp->shm_perm.mode & SHM_DEST) |
4e982311 | 211 | shm_destroy(ns, shp); |
1da177e4 LT |
212 | else |
213 | shm_unlock(shp); | |
3e148c79 | 214 | up_write(&shm_ids(ns).rw_mutex); |
1da177e4 LT |
215 | } |
216 | ||
d0217ac0 | 217 | static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
bc56bba8 EB |
218 | { |
219 | struct file *file = vma->vm_file; | |
220 | struct shm_file_data *sfd = shm_file_data(file); | |
221 | ||
d0217ac0 | 222 | return sfd->vm_ops->fault(vma, vmf); |
bc56bba8 EB |
223 | } |
224 | ||
225 | #ifdef CONFIG_NUMA | |
d823e3e7 | 226 | static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
bc56bba8 EB |
227 | { |
228 | struct file *file = vma->vm_file; | |
229 | struct shm_file_data *sfd = shm_file_data(file); | |
230 | int err = 0; | |
231 | if (sfd->vm_ops->set_policy) | |
232 | err = sfd->vm_ops->set_policy(vma, new); | |
233 | return err; | |
234 | } | |
235 | ||
d823e3e7 AB |
236 | static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, |
237 | unsigned long addr) | |
bc56bba8 EB |
238 | { |
239 | struct file *file = vma->vm_file; | |
240 | struct shm_file_data *sfd = shm_file_data(file); | |
241 | struct mempolicy *pol = NULL; | |
242 | ||
243 | if (sfd->vm_ops->get_policy) | |
244 | pol = sfd->vm_ops->get_policy(vma, addr); | |
52cd3b07 | 245 | else if (vma->vm_policy) |
bc56bba8 | 246 | pol = vma->vm_policy; |
52cd3b07 | 247 | |
bc56bba8 EB |
248 | return pol; |
249 | } | |
250 | #endif | |
251 | ||
1da177e4 LT |
252 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
253 | { | |
bc56bba8 | 254 | struct shm_file_data *sfd = shm_file_data(file); |
b0e15190 DH |
255 | int ret; |
256 | ||
bc56bba8 EB |
257 | ret = sfd->file->f_op->mmap(sfd->file, vma); |
258 | if (ret != 0) | |
259 | return ret; | |
260 | sfd->vm_ops = vma->vm_ops; | |
2e92a3ba | 261 | #ifdef CONFIG_MMU |
54cb8821 | 262 | BUG_ON(!sfd->vm_ops->fault); |
2e92a3ba | 263 | #endif |
bc56bba8 EB |
264 | vma->vm_ops = &shm_vm_ops; |
265 | shm_open(vma); | |
b0e15190 DH |
266 | |
267 | return ret; | |
1da177e4 LT |
268 | } |
269 | ||
4e982311 KK |
270 | static int shm_release(struct inode *ino, struct file *file) |
271 | { | |
bc56bba8 | 272 | struct shm_file_data *sfd = shm_file_data(file); |
4e982311 | 273 | |
bc56bba8 EB |
274 | put_ipc_ns(sfd->ns); |
275 | shm_file_data(file) = NULL; | |
276 | kfree(sfd); | |
4e982311 KK |
277 | return 0; |
278 | } | |
279 | ||
7ea80859 | 280 | static int shm_fsync(struct file *file, int datasync) |
516dffdc | 281 | { |
516dffdc | 282 | struct shm_file_data *sfd = shm_file_data(file); |
516dffdc | 283 | |
7ea80859 CH |
284 | if (!sfd->file->f_op->fsync) |
285 | return -EINVAL; | |
286 | return sfd->file->f_op->fsync(sfd->file, datasync); | |
516dffdc AL |
287 | } |
288 | ||
bc56bba8 EB |
289 | static unsigned long shm_get_unmapped_area(struct file *file, |
290 | unsigned long addr, unsigned long len, unsigned long pgoff, | |
291 | unsigned long flags) | |
292 | { | |
293 | struct shm_file_data *sfd = shm_file_data(file); | |
c4caa778 AV |
294 | return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, |
295 | pgoff, flags); | |
bc56bba8 | 296 | } |
bc56bba8 | 297 | |
9a32144e | 298 | static const struct file_operations shm_file_operations = { |
4e982311 | 299 | .mmap = shm_mmap, |
516dffdc | 300 | .fsync = shm_fsync, |
4e982311 | 301 | .release = shm_release, |
ed5e5894 DH |
302 | #ifndef CONFIG_MMU |
303 | .get_unmapped_area = shm_get_unmapped_area, | |
304 | #endif | |
6038f373 | 305 | .llseek = noop_llseek, |
c4caa778 AV |
306 | }; |
307 | ||
308 | static const struct file_operations shm_file_operations_huge = { | |
309 | .mmap = shm_mmap, | |
310 | .fsync = shm_fsync, | |
311 | .release = shm_release, | |
bc56bba8 | 312 | .get_unmapped_area = shm_get_unmapped_area, |
6038f373 | 313 | .llseek = noop_llseek, |
1da177e4 LT |
314 | }; |
315 | ||
c4caa778 AV |
316 | int is_file_shm_hugepages(struct file *file) |
317 | { | |
318 | return file->f_op == &shm_file_operations_huge; | |
319 | } | |
320 | ||
f0f37e2f | 321 | static const struct vm_operations_struct shm_vm_ops = { |
1da177e4 LT |
322 | .open = shm_open, /* callback for a new vm-area open */ |
323 | .close = shm_close, /* callback for when the vm-area is released */ | |
54cb8821 | 324 | .fault = shm_fault, |
bc56bba8 EB |
325 | #if defined(CONFIG_NUMA) |
326 | .set_policy = shm_set_policy, | |
327 | .get_policy = shm_get_policy, | |
1da177e4 LT |
328 | #endif |
329 | }; | |
330 | ||
f4566f04 ND |
331 | /** |
332 | * newseg - Create a new shared memory segment | |
333 | * @ns: namespace | |
334 | * @params: ptr to the structure that contains key, size and shmflg | |
335 | * | |
3e148c79 | 336 | * Called with shm_ids.rw_mutex held as a writer. |
f4566f04 ND |
337 | */ |
338 | ||
7748dbfa | 339 | static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4 | 340 | { |
7748dbfa ND |
341 | key_t key = params->key; |
342 | int shmflg = params->flg; | |
343 | size_t size = params->u.size; | |
1da177e4 LT |
344 | int error; |
345 | struct shmid_kernel *shp; | |
346 | int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; | |
347 | struct file * file; | |
348 | char name[13]; | |
349 | int id; | |
5a6fe125 | 350 | int acctflag = 0; |
1da177e4 | 351 | |
4e982311 | 352 | if (size < SHMMIN || size > ns->shm_ctlmax) |
1da177e4 LT |
353 | return -EINVAL; |
354 | ||
f66d45e9 | 355 | if (ns->shm_tot + numpages > ns->shm_ctlall) |
1da177e4 LT |
356 | return -ENOSPC; |
357 | ||
358 | shp = ipc_rcu_alloc(sizeof(*shp)); | |
359 | if (!shp) | |
360 | return -ENOMEM; | |
361 | ||
362 | shp->shm_perm.key = key; | |
b33291c0 | 363 | shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
1da177e4 LT |
364 | shp->mlock_user = NULL; |
365 | ||
366 | shp->shm_perm.security = NULL; | |
367 | error = security_shm_alloc(shp); | |
368 | if (error) { | |
369 | ipc_rcu_putref(shp); | |
370 | return error; | |
371 | } | |
372 | ||
9d66586f | 373 | sprintf (name, "SYSV%08x", key); |
1da177e4 | 374 | if (shmflg & SHM_HUGETLB) { |
5a6fe125 MG |
375 | /* hugetlb_file_setup applies strict accounting */ |
376 | if (shmflg & SHM_NORESERVE) | |
377 | acctflag = VM_NORESERVE; | |
353d5c30 | 378 | file = hugetlb_file_setup(name, size, acctflag, |
6bfde05b | 379 | &shp->mlock_user, HUGETLB_SHMFS_INODE); |
1da177e4 | 380 | } else { |
bf8f972d BP |
381 | /* |
382 | * Do not allow no accounting for OVERCOMMIT_NEVER, even | |
383 | * if it's asked for. | |
384 | */ | |
385 | if ((shmflg & SHM_NORESERVE) && | |
386 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) | |
fc8744ad | 387 | acctflag = VM_NORESERVE; |
bf8f972d | 388 | file = shmem_file_setup(name, size, acctflag); |
1da177e4 LT |
389 | } |
390 | error = PTR_ERR(file); | |
391 | if (IS_ERR(file)) | |
392 | goto no_file; | |
393 | ||
48dea404 | 394 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
283bb7fa PP |
395 | if (id < 0) { |
396 | error = id; | |
1da177e4 | 397 | goto no_id; |
283bb7fa | 398 | } |
1da177e4 | 399 | |
b488893a | 400 | shp->shm_cprid = task_tgid_vnr(current); |
1da177e4 LT |
401 | shp->shm_lprid = 0; |
402 | shp->shm_atim = shp->shm_dtim = 0; | |
403 | shp->shm_ctim = get_seconds(); | |
404 | shp->shm_segsz = size; | |
405 | shp->shm_nattch = 0; | |
1da177e4 | 406 | shp->shm_file = file; |
30475cc1 BP |
407 | /* |
408 | * shmid gets reported as "inode#" in /proc/pid/maps. | |
409 | * proc-ps tools use this. Changing this will break them. | |
410 | */ | |
7ca7e564 | 411 | file->f_dentry->d_inode->i_ino = shp->shm_perm.id; |
551110a9 | 412 | |
4e982311 | 413 | ns->shm_tot += numpages; |
7ca7e564 | 414 | error = shp->shm_perm.id; |
1da177e4 | 415 | shm_unlock(shp); |
7ca7e564 | 416 | return error; |
1da177e4 LT |
417 | |
418 | no_id: | |
2195d281 | 419 | if (is_file_hugepages(file) && shp->mlock_user) |
353d5c30 | 420 | user_shm_unlock(size, shp->mlock_user); |
1da177e4 LT |
421 | fput(file); |
422 | no_file: | |
423 | security_shm_free(shp); | |
424 | ipc_rcu_putref(shp); | |
425 | return error; | |
426 | } | |
427 | ||
f4566f04 | 428 | /* |
3e148c79 | 429 | * Called with shm_ids.rw_mutex and ipcp locked. |
f4566f04 | 430 | */ |
03f02c76 | 431 | static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) |
7748dbfa | 432 | { |
03f02c76 ND |
433 | struct shmid_kernel *shp; |
434 | ||
435 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
436 | return security_shm_associate(shp, shmflg); | |
7748dbfa ND |
437 | } |
438 | ||
f4566f04 | 439 | /* |
3e148c79 | 440 | * Called with shm_ids.rw_mutex and ipcp locked. |
f4566f04 | 441 | */ |
03f02c76 ND |
442 | static inline int shm_more_checks(struct kern_ipc_perm *ipcp, |
443 | struct ipc_params *params) | |
7748dbfa | 444 | { |
03f02c76 ND |
445 | struct shmid_kernel *shp; |
446 | ||
447 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
448 | if (shp->shm_segsz < params->u.size) | |
7748dbfa ND |
449 | return -EINVAL; |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
d5460c99 | 454 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
1da177e4 | 455 | { |
4e982311 | 456 | struct ipc_namespace *ns; |
7748dbfa ND |
457 | struct ipc_ops shm_ops; |
458 | struct ipc_params shm_params; | |
4e982311 KK |
459 | |
460 | ns = current->nsproxy->ipc_ns; | |
1da177e4 | 461 | |
7748dbfa ND |
462 | shm_ops.getnew = newseg; |
463 | shm_ops.associate = shm_security; | |
464 | shm_ops.more_checks = shm_more_checks; | |
7ca7e564 | 465 | |
7748dbfa ND |
466 | shm_params.key = key; |
467 | shm_params.flg = shmflg; | |
468 | shm_params.u.size = size; | |
1da177e4 | 469 | |
7748dbfa | 470 | return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
1da177e4 LT |
471 | } |
472 | ||
473 | static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) | |
474 | { | |
475 | switch(version) { | |
476 | case IPC_64: | |
477 | return copy_to_user(buf, in, sizeof(*in)); | |
478 | case IPC_OLD: | |
479 | { | |
480 | struct shmid_ds out; | |
481 | ||
3af54c9b | 482 | memset(&out, 0, sizeof(out)); |
1da177e4 LT |
483 | ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
484 | out.shm_segsz = in->shm_segsz; | |
485 | out.shm_atime = in->shm_atime; | |
486 | out.shm_dtime = in->shm_dtime; | |
487 | out.shm_ctime = in->shm_ctime; | |
488 | out.shm_cpid = in->shm_cpid; | |
489 | out.shm_lpid = in->shm_lpid; | |
490 | out.shm_nattch = in->shm_nattch; | |
491 | ||
492 | return copy_to_user(buf, &out, sizeof(out)); | |
493 | } | |
494 | default: | |
495 | return -EINVAL; | |
496 | } | |
497 | } | |
498 | ||
016d7132 PP |
499 | static inline unsigned long |
500 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) | |
1da177e4 LT |
501 | { |
502 | switch(version) { | |
503 | case IPC_64: | |
016d7132 | 504 | if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4 | 505 | return -EFAULT; |
1da177e4 | 506 | return 0; |
1da177e4 LT |
507 | case IPC_OLD: |
508 | { | |
509 | struct shmid_ds tbuf_old; | |
510 | ||
511 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | |
512 | return -EFAULT; | |
513 | ||
016d7132 PP |
514 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
515 | out->shm_perm.gid = tbuf_old.shm_perm.gid; | |
516 | out->shm_perm.mode = tbuf_old.shm_perm.mode; | |
1da177e4 LT |
517 | |
518 | return 0; | |
519 | } | |
520 | default: | |
521 | return -EINVAL; | |
522 | } | |
523 | } | |
524 | ||
525 | static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) | |
526 | { | |
527 | switch(version) { | |
528 | case IPC_64: | |
529 | return copy_to_user(buf, in, sizeof(*in)); | |
530 | case IPC_OLD: | |
531 | { | |
532 | struct shminfo out; | |
533 | ||
534 | if(in->shmmax > INT_MAX) | |
535 | out.shmmax = INT_MAX; | |
536 | else | |
537 | out.shmmax = (int)in->shmmax; | |
538 | ||
539 | out.shmmin = in->shmmin; | |
540 | out.shmmni = in->shmmni; | |
541 | out.shmseg = in->shmseg; | |
542 | out.shmall = in->shmall; | |
543 | ||
544 | return copy_to_user(buf, &out, sizeof(out)); | |
545 | } | |
546 | default: | |
547 | return -EINVAL; | |
548 | } | |
549 | } | |
550 | ||
b7952180 HD |
551 | /* |
552 | * Calculate and add used RSS and swap pages of a shm. | |
553 | * Called with shm_ids.rw_mutex held as a reader | |
554 | */ | |
555 | static void shm_add_rss_swap(struct shmid_kernel *shp, | |
556 | unsigned long *rss_add, unsigned long *swp_add) | |
557 | { | |
558 | struct inode *inode; | |
559 | ||
560 | inode = shp->shm_file->f_path.dentry->d_inode; | |
561 | ||
562 | if (is_file_hugepages(shp->shm_file)) { | |
563 | struct address_space *mapping = inode->i_mapping; | |
564 | struct hstate *h = hstate_file(shp->shm_file); | |
565 | *rss_add += pages_per_huge_page(h) * mapping->nrpages; | |
566 | } else { | |
567 | #ifdef CONFIG_SHMEM | |
568 | struct shmem_inode_info *info = SHMEM_I(inode); | |
569 | spin_lock(&info->lock); | |
570 | *rss_add += inode->i_mapping->nrpages; | |
571 | *swp_add += info->swapped; | |
572 | spin_unlock(&info->lock); | |
573 | #else | |
574 | *rss_add += inode->i_mapping->nrpages; | |
575 | #endif | |
576 | } | |
577 | } | |
578 | ||
f4566f04 | 579 | /* |
3e148c79 | 580 | * Called with shm_ids.rw_mutex held as a reader |
f4566f04 | 581 | */ |
4e982311 KK |
582 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
583 | unsigned long *swp) | |
1da177e4 | 584 | { |
7ca7e564 ND |
585 | int next_id; |
586 | int total, in_use; | |
1da177e4 LT |
587 | |
588 | *rss = 0; | |
589 | *swp = 0; | |
590 | ||
7ca7e564 ND |
591 | in_use = shm_ids(ns).in_use; |
592 | ||
593 | for (total = 0, next_id = 0; total < in_use; next_id++) { | |
e562aebc | 594 | struct kern_ipc_perm *ipc; |
1da177e4 | 595 | struct shmid_kernel *shp; |
1da177e4 | 596 | |
e562aebc TB |
597 | ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); |
598 | if (ipc == NULL) | |
1da177e4 | 599 | continue; |
e562aebc | 600 | shp = container_of(ipc, struct shmid_kernel, shm_perm); |
1da177e4 | 601 | |
b7952180 | 602 | shm_add_rss_swap(shp, rss, swp); |
7ca7e564 ND |
603 | |
604 | total++; | |
1da177e4 LT |
605 | } |
606 | } | |
607 | ||
8d4cc8b5 PP |
608 | /* |
609 | * This function handles some shmctl commands which require the rw_mutex | |
610 | * to be held in write mode. | |
611 | * NOTE: no locks must be held, the rw_mutex is taken inside this function. | |
612 | */ | |
613 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, | |
614 | struct shmid_ds __user *buf, int version) | |
1da177e4 | 615 | { |
8d4cc8b5 | 616 | struct kern_ipc_perm *ipcp; |
016d7132 | 617 | struct shmid64_ds shmid64; |
8d4cc8b5 PP |
618 | struct shmid_kernel *shp; |
619 | int err; | |
620 | ||
621 | if (cmd == IPC_SET) { | |
016d7132 | 622 | if (copy_shmid_from_user(&shmid64, buf, version)) |
8d4cc8b5 PP |
623 | return -EFAULT; |
624 | } | |
625 | ||
a5f75e7f PP |
626 | ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); |
627 | if (IS_ERR(ipcp)) | |
628 | return PTR_ERR(ipcp); | |
8d4cc8b5 | 629 | |
a5f75e7f | 630 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
8d4cc8b5 PP |
631 | |
632 | err = security_shm_shmctl(shp, cmd); | |
633 | if (err) | |
634 | goto out_unlock; | |
635 | switch (cmd) { | |
636 | case IPC_RMID: | |
637 | do_shm_rmid(ns, ipcp); | |
638 | goto out_up; | |
639 | case IPC_SET: | |
8f4a3809 | 640 | ipc_update_perm(&shmid64.shm_perm, ipcp); |
8d4cc8b5 PP |
641 | shp->shm_ctim = get_seconds(); |
642 | break; | |
643 | default: | |
644 | err = -EINVAL; | |
645 | } | |
646 | out_unlock: | |
647 | shm_unlock(shp); | |
648 | out_up: | |
649 | up_write(&shm_ids(ns).rw_mutex); | |
650 | return err; | |
651 | } | |
652 | ||
d5460c99 | 653 | SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) |
8d4cc8b5 | 654 | { |
1da177e4 LT |
655 | struct shmid_kernel *shp; |
656 | int err, version; | |
4e982311 | 657 | struct ipc_namespace *ns; |
1da177e4 LT |
658 | |
659 | if (cmd < 0 || shmid < 0) { | |
660 | err = -EINVAL; | |
661 | goto out; | |
662 | } | |
663 | ||
664 | version = ipc_parse_version(&cmd); | |
4e982311 | 665 | ns = current->nsproxy->ipc_ns; |
1da177e4 LT |
666 | |
667 | switch (cmd) { /* replace with proc interface ? */ | |
668 | case IPC_INFO: | |
669 | { | |
670 | struct shminfo64 shminfo; | |
671 | ||
672 | err = security_shm_shmctl(NULL, cmd); | |
673 | if (err) | |
674 | return err; | |
675 | ||
e8148f75 | 676 | memset(&shminfo, 0, sizeof(shminfo)); |
4e982311 KK |
677 | shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; |
678 | shminfo.shmmax = ns->shm_ctlmax; | |
679 | shminfo.shmall = ns->shm_ctlall; | |
1da177e4 LT |
680 | |
681 | shminfo.shmmin = SHMMIN; | |
682 | if(copy_shminfo_to_user (buf, &shminfo, version)) | |
683 | return -EFAULT; | |
f4566f04 | 684 | |
3e148c79 | 685 | down_read(&shm_ids(ns).rw_mutex); |
7ca7e564 | 686 | err = ipc_get_maxid(&shm_ids(ns)); |
3e148c79 | 687 | up_read(&shm_ids(ns).rw_mutex); |
f4566f04 | 688 | |
1da177e4 LT |
689 | if(err<0) |
690 | err = 0; | |
691 | goto out; | |
692 | } | |
693 | case SHM_INFO: | |
694 | { | |
695 | struct shm_info shm_info; | |
696 | ||
697 | err = security_shm_shmctl(NULL, cmd); | |
698 | if (err) | |
699 | return err; | |
700 | ||
e8148f75 | 701 | memset(&shm_info, 0, sizeof(shm_info)); |
3e148c79 | 702 | down_read(&shm_ids(ns).rw_mutex); |
4e982311 KK |
703 | shm_info.used_ids = shm_ids(ns).in_use; |
704 | shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); | |
705 | shm_info.shm_tot = ns->shm_tot; | |
1da177e4 LT |
706 | shm_info.swap_attempts = 0; |
707 | shm_info.swap_successes = 0; | |
7ca7e564 | 708 | err = ipc_get_maxid(&shm_ids(ns)); |
3e148c79 | 709 | up_read(&shm_ids(ns).rw_mutex); |
e8148f75 | 710 | if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { |
1da177e4 LT |
711 | err = -EFAULT; |
712 | goto out; | |
713 | } | |
714 | ||
715 | err = err < 0 ? 0 : err; | |
716 | goto out; | |
717 | } | |
718 | case SHM_STAT: | |
719 | case IPC_STAT: | |
720 | { | |
721 | struct shmid64_ds tbuf; | |
722 | int result; | |
023a5355 | 723 | |
023a5355 ND |
724 | if (cmd == SHM_STAT) { |
725 | shp = shm_lock(ns, shmid); | |
726 | if (IS_ERR(shp)) { | |
727 | err = PTR_ERR(shp); | |
728 | goto out; | |
729 | } | |
7ca7e564 | 730 | result = shp->shm_perm.id; |
1da177e4 | 731 | } else { |
023a5355 ND |
732 | shp = shm_lock_check(ns, shmid); |
733 | if (IS_ERR(shp)) { | |
734 | err = PTR_ERR(shp); | |
735 | goto out; | |
736 | } | |
1da177e4 LT |
737 | result = 0; |
738 | } | |
e8148f75 | 739 | err = -EACCES; |
1da177e4 LT |
740 | if (ipcperms (&shp->shm_perm, S_IRUGO)) |
741 | goto out_unlock; | |
742 | err = security_shm_shmctl(shp, cmd); | |
743 | if (err) | |
744 | goto out_unlock; | |
023a5355 | 745 | memset(&tbuf, 0, sizeof(tbuf)); |
1da177e4 LT |
746 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); |
747 | tbuf.shm_segsz = shp->shm_segsz; | |
748 | tbuf.shm_atime = shp->shm_atim; | |
749 | tbuf.shm_dtime = shp->shm_dtim; | |
750 | tbuf.shm_ctime = shp->shm_ctim; | |
751 | tbuf.shm_cpid = shp->shm_cprid; | |
752 | tbuf.shm_lpid = shp->shm_lprid; | |
bc56bba8 | 753 | tbuf.shm_nattch = shp->shm_nattch; |
1da177e4 LT |
754 | shm_unlock(shp); |
755 | if(copy_shmid_to_user (buf, &tbuf, version)) | |
756 | err = -EFAULT; | |
757 | else | |
758 | err = result; | |
759 | goto out; | |
760 | } | |
761 | case SHM_LOCK: | |
762 | case SHM_UNLOCK: | |
763 | { | |
89e004ea LS |
764 | struct file *uninitialized_var(shm_file); |
765 | ||
766 | lru_add_drain_all(); /* drain pagevecs to lru lists */ | |
767 | ||
023a5355 ND |
768 | shp = shm_lock_check(ns, shmid); |
769 | if (IS_ERR(shp)) { | |
770 | err = PTR_ERR(shp); | |
1da177e4 LT |
771 | goto out; |
772 | } | |
1da177e4 | 773 | |
a33e6751 | 774 | audit_ipc_obj(&(shp->shm_perm)); |
073115d6 | 775 | |
1da177e4 | 776 | if (!capable(CAP_IPC_LOCK)) { |
414c0708 | 777 | uid_t euid = current_euid(); |
1da177e4 | 778 | err = -EPERM; |
414c0708 DH |
779 | if (euid != shp->shm_perm.uid && |
780 | euid != shp->shm_perm.cuid) | |
1da177e4 | 781 | goto out_unlock; |
f1eb1332 | 782 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) |
1da177e4 LT |
783 | goto out_unlock; |
784 | } | |
785 | ||
786 | err = security_shm_shmctl(shp, cmd); | |
787 | if (err) | |
788 | goto out_unlock; | |
789 | ||
790 | if(cmd==SHM_LOCK) { | |
86a264ab | 791 | struct user_struct *user = current_user(); |
1da177e4 LT |
792 | if (!is_file_hugepages(shp->shm_file)) { |
793 | err = shmem_lock(shp->shm_file, 1, user); | |
7be77e20 | 794 | if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ |
b33291c0 | 795 | shp->shm_perm.mode |= SHM_LOCKED; |
1da177e4 LT |
796 | shp->mlock_user = user; |
797 | } | |
798 | } | |
799 | } else if (!is_file_hugepages(shp->shm_file)) { | |
800 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | |
b33291c0 | 801 | shp->shm_perm.mode &= ~SHM_LOCKED; |
1da177e4 LT |
802 | shp->mlock_user = NULL; |
803 | } | |
804 | shm_unlock(shp); | |
805 | goto out; | |
806 | } | |
807 | case IPC_RMID: | |
1da177e4 | 808 | case IPC_SET: |
8d4cc8b5 PP |
809 | err = shmctl_down(ns, shmid, cmd, buf, version); |
810 | return err; | |
1da177e4 | 811 | default: |
8d4cc8b5 | 812 | return -EINVAL; |
1da177e4 LT |
813 | } |
814 | ||
1da177e4 LT |
815 | out_unlock: |
816 | shm_unlock(shp); | |
817 | out: | |
818 | return err; | |
819 | } | |
820 | ||
821 | /* | |
822 | * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. | |
823 | * | |
824 | * NOTE! Despite the name, this is NOT a direct system call entrypoint. The | |
825 | * "raddr" thing points to kernel space, and there has to be a wrapper around | |
826 | * this. | |
827 | */ | |
828 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) | |
829 | { | |
830 | struct shmid_kernel *shp; | |
831 | unsigned long addr; | |
832 | unsigned long size; | |
833 | struct file * file; | |
834 | int err; | |
835 | unsigned long flags; | |
836 | unsigned long prot; | |
1da177e4 | 837 | int acc_mode; |
bc56bba8 | 838 | unsigned long user_addr; |
4e982311 | 839 | struct ipc_namespace *ns; |
bc56bba8 EB |
840 | struct shm_file_data *sfd; |
841 | struct path path; | |
aeb5d727 | 842 | fmode_t f_mode; |
1da177e4 | 843 | |
bc56bba8 EB |
844 | err = -EINVAL; |
845 | if (shmid < 0) | |
1da177e4 | 846 | goto out; |
bc56bba8 | 847 | else if ((addr = (ulong)shmaddr)) { |
1da177e4 LT |
848 | if (addr & (SHMLBA-1)) { |
849 | if (shmflg & SHM_RND) | |
850 | addr &= ~(SHMLBA-1); /* round down */ | |
851 | else | |
852 | #ifndef __ARCH_FORCE_SHMLBA | |
853 | if (addr & ~PAGE_MASK) | |
854 | #endif | |
bc56bba8 | 855 | goto out; |
1da177e4 LT |
856 | } |
857 | flags = MAP_SHARED | MAP_FIXED; | |
858 | } else { | |
859 | if ((shmflg & SHM_REMAP)) | |
bc56bba8 | 860 | goto out; |
1da177e4 LT |
861 | |
862 | flags = MAP_SHARED; | |
863 | } | |
864 | ||
865 | if (shmflg & SHM_RDONLY) { | |
866 | prot = PROT_READ; | |
1da177e4 | 867 | acc_mode = S_IRUGO; |
bc56bba8 | 868 | f_mode = FMODE_READ; |
1da177e4 LT |
869 | } else { |
870 | prot = PROT_READ | PROT_WRITE; | |
1da177e4 | 871 | acc_mode = S_IRUGO | S_IWUGO; |
bc56bba8 | 872 | f_mode = FMODE_READ | FMODE_WRITE; |
1da177e4 LT |
873 | } |
874 | if (shmflg & SHM_EXEC) { | |
875 | prot |= PROT_EXEC; | |
876 | acc_mode |= S_IXUGO; | |
877 | } | |
878 | ||
879 | /* | |
880 | * We cannot rely on the fs check since SYSV IPC does have an | |
881 | * additional creator id... | |
882 | */ | |
4e982311 | 883 | ns = current->nsproxy->ipc_ns; |
023a5355 ND |
884 | shp = shm_lock_check(ns, shmid); |
885 | if (IS_ERR(shp)) { | |
886 | err = PTR_ERR(shp); | |
1da177e4 | 887 | goto out; |
023a5355 | 888 | } |
bc56bba8 EB |
889 | |
890 | err = -EACCES; | |
891 | if (ipcperms(&shp->shm_perm, acc_mode)) | |
892 | goto out_unlock; | |
1da177e4 LT |
893 | |
894 | err = security_shm_shmat(shp, shmaddr, shmflg); | |
bc56bba8 EB |
895 | if (err) |
896 | goto out_unlock; | |
897 | ||
2c48b9c4 AV |
898 | path = shp->shm_file->f_path; |
899 | path_get(&path); | |
1da177e4 | 900 | shp->shm_nattch++; |
bc56bba8 | 901 | size = i_size_read(path.dentry->d_inode); |
1da177e4 LT |
902 | shm_unlock(shp); |
903 | ||
bc56bba8 EB |
904 | err = -ENOMEM; |
905 | sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); | |
906 | if (!sfd) | |
ce8d2cdf | 907 | goto out_put_dentry; |
bc56bba8 | 908 | |
2c48b9c4 AV |
909 | file = alloc_file(&path, f_mode, |
910 | is_file_hugepages(shp->shm_file) ? | |
c4caa778 AV |
911 | &shm_file_operations_huge : |
912 | &shm_file_operations); | |
bc56bba8 EB |
913 | if (!file) |
914 | goto out_free; | |
915 | ||
bc56bba8 | 916 | file->private_data = sfd; |
bc56bba8 | 917 | file->f_mapping = shp->shm_file->f_mapping; |
7ca7e564 | 918 | sfd->id = shp->shm_perm.id; |
bc56bba8 EB |
919 | sfd->ns = get_ipc_ns(ns); |
920 | sfd->file = shp->shm_file; | |
921 | sfd->vm_ops = NULL; | |
922 | ||
1da177e4 LT |
923 | down_write(¤t->mm->mmap_sem); |
924 | if (addr && !(shmflg & SHM_REMAP)) { | |
bc56bba8 | 925 | err = -EINVAL; |
1da177e4 LT |
926 | if (find_vma_intersection(current->mm, addr, addr + size)) |
927 | goto invalid; | |
928 | /* | |
929 | * If shm segment goes below stack, make sure there is some | |
930 | * space left for the stack to grow (at least 4 pages). | |
931 | */ | |
932 | if (addr < current->mm->start_stack && | |
933 | addr > current->mm->start_stack - size - PAGE_SIZE * 5) | |
934 | goto invalid; | |
935 | } | |
936 | ||
bc56bba8 EB |
937 | user_addr = do_mmap (file, addr, size, prot, flags, 0); |
938 | *raddr = user_addr; | |
939 | err = 0; | |
940 | if (IS_ERR_VALUE(user_addr)) | |
941 | err = (long)user_addr; | |
1da177e4 LT |
942 | invalid: |
943 | up_write(¤t->mm->mmap_sem); | |
944 | ||
bc56bba8 EB |
945 | fput(file); |
946 | ||
947 | out_nattch: | |
3e148c79 | 948 | down_write(&shm_ids(ns).rw_mutex); |
00c2bf85 | 949 | shp = shm_lock(ns, shmid); |
023a5355 | 950 | BUG_ON(IS_ERR(shp)); |
1da177e4 LT |
951 | shp->shm_nattch--; |
952 | if(shp->shm_nattch == 0 && | |
b33291c0 | 953 | shp->shm_perm.mode & SHM_DEST) |
4e982311 | 954 | shm_destroy(ns, shp); |
1da177e4 LT |
955 | else |
956 | shm_unlock(shp); | |
3e148c79 | 957 | up_write(&shm_ids(ns).rw_mutex); |
1da177e4 | 958 | |
1da177e4 LT |
959 | out: |
960 | return err; | |
bc56bba8 EB |
961 | |
962 | out_unlock: | |
963 | shm_unlock(shp); | |
964 | goto out; | |
965 | ||
966 | out_free: | |
967 | kfree(sfd); | |
ce8d2cdf | 968 | out_put_dentry: |
2c48b9c4 | 969 | path_put(&path); |
bc56bba8 | 970 | goto out_nattch; |
1da177e4 LT |
971 | } |
972 | ||
d5460c99 | 973 | SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
7d87e14c SR |
974 | { |
975 | unsigned long ret; | |
976 | long err; | |
977 | ||
978 | err = do_shmat(shmid, shmaddr, shmflg, &ret); | |
979 | if (err) | |
980 | return err; | |
981 | force_successful_syscall_return(); | |
982 | return (long)ret; | |
983 | } | |
984 | ||
1da177e4 LT |
985 | /* |
986 | * detach and kill segment if marked destroyed. | |
987 | * The work is done in shm_close. | |
988 | */ | |
d5460c99 | 989 | SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) |
1da177e4 LT |
990 | { |
991 | struct mm_struct *mm = current->mm; | |
586c7e6a | 992 | struct vm_area_struct *vma; |
1da177e4 | 993 | unsigned long addr = (unsigned long)shmaddr; |
1da177e4 | 994 | int retval = -EINVAL; |
586c7e6a MF |
995 | #ifdef CONFIG_MMU |
996 | loff_t size = 0; | |
997 | struct vm_area_struct *next; | |
998 | #endif | |
1da177e4 | 999 | |
df1e2fb5 HD |
1000 | if (addr & ~PAGE_MASK) |
1001 | return retval; | |
1002 | ||
1da177e4 LT |
1003 | down_write(&mm->mmap_sem); |
1004 | ||
1005 | /* | |
1006 | * This function tries to be smart and unmap shm segments that | |
1007 | * were modified by partial mlock or munmap calls: | |
1008 | * - It first determines the size of the shm segment that should be | |
1009 | * unmapped: It searches for a vma that is backed by shm and that | |
1010 | * started at address shmaddr. It records it's size and then unmaps | |
1011 | * it. | |
1012 | * - Then it unmaps all shm vmas that started at shmaddr and that | |
1013 | * are within the initially determined size. | |
1014 | * Errors from do_munmap are ignored: the function only fails if | |
1015 | * it's called with invalid parameters or if it's called to unmap | |
1016 | * a part of a vma. Both calls in this function are for full vmas, | |
1017 | * the parameters are directly copied from the vma itself and always | |
1018 | * valid - therefore do_munmap cannot fail. (famous last words?) | |
1019 | */ | |
1020 | /* | |
1021 | * If it had been mremap()'d, the starting address would not | |
1022 | * match the usual checks anyway. So assume all vma's are | |
1023 | * above the starting address given. | |
1024 | */ | |
1025 | vma = find_vma(mm, addr); | |
1026 | ||
8feae131 | 1027 | #ifdef CONFIG_MMU |
1da177e4 LT |
1028 | while (vma) { |
1029 | next = vma->vm_next; | |
1030 | ||
1031 | /* | |
1032 | * Check if the starting address would match, i.e. it's | |
1033 | * a fragment created by mprotect() and/or munmap(), or it | |
1034 | * otherwise it starts at this address with no hassles. | |
1035 | */ | |
bc56bba8 | 1036 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1037 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
1038 | ||
1039 | ||
6d63079a | 1040 | size = vma->vm_file->f_path.dentry->d_inode->i_size; |
1da177e4 LT |
1041 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); |
1042 | /* | |
1043 | * We discovered the size of the shm segment, so | |
1044 | * break out of here and fall through to the next | |
1045 | * loop that uses the size information to stop | |
1046 | * searching for matching vma's. | |
1047 | */ | |
1048 | retval = 0; | |
1049 | vma = next; | |
1050 | break; | |
1051 | } | |
1052 | vma = next; | |
1053 | } | |
1054 | ||
1055 | /* | |
1056 | * We need look no further than the maximum address a fragment | |
1057 | * could possibly have landed at. Also cast things to loff_t to | |
1058 | * prevent overflows and make comparisions vs. equal-width types. | |
1059 | */ | |
8e36709d | 1060 | size = PAGE_ALIGN(size); |
1da177e4 LT |
1061 | while (vma && (loff_t)(vma->vm_end - addr) <= size) { |
1062 | next = vma->vm_next; | |
1063 | ||
1064 | /* finding a matching vma now does not alter retval */ | |
bc56bba8 | 1065 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1066 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) |
1067 | ||
1068 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | |
1069 | vma = next; | |
1070 | } | |
1071 | ||
8feae131 DH |
1072 | #else /* CONFIG_MMU */ |
1073 | /* under NOMMU conditions, the exact address to be destroyed must be | |
1074 | * given */ | |
1075 | retval = -EINVAL; | |
1076 | if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { | |
1077 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | |
1078 | retval = 0; | |
1079 | } | |
1080 | ||
1081 | #endif | |
1082 | ||
1da177e4 LT |
1083 | up_write(&mm->mmap_sem); |
1084 | return retval; | |
1085 | } | |
1086 | ||
1087 | #ifdef CONFIG_PROC_FS | |
19b4946c | 1088 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
1da177e4 | 1089 | { |
19b4946c | 1090 | struct shmid_kernel *shp = it; |
b7952180 HD |
1091 | unsigned long rss = 0, swp = 0; |
1092 | ||
1093 | shm_add_rss_swap(shp, &rss, &swp); | |
1da177e4 | 1094 | |
6c826818 PM |
1095 | #if BITS_PER_LONG <= 32 |
1096 | #define SIZE_SPEC "%10lu" | |
1097 | #else | |
1098 | #define SIZE_SPEC "%21lu" | |
1099 | #endif | |
1da177e4 | 1100 | |
6c826818 PM |
1101 | return seq_printf(s, |
1102 | "%10d %10d %4o " SIZE_SPEC " %5u %5u " | |
b7952180 HD |
1103 | "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " |
1104 | SIZE_SPEC " " SIZE_SPEC "\n", | |
19b4946c | 1105 | shp->shm_perm.key, |
7ca7e564 | 1106 | shp->shm_perm.id, |
b33291c0 | 1107 | shp->shm_perm.mode, |
19b4946c MW |
1108 | shp->shm_segsz, |
1109 | shp->shm_cprid, | |
1110 | shp->shm_lprid, | |
bc56bba8 | 1111 | shp->shm_nattch, |
19b4946c MW |
1112 | shp->shm_perm.uid, |
1113 | shp->shm_perm.gid, | |
1114 | shp->shm_perm.cuid, | |
1115 | shp->shm_perm.cgid, | |
1116 | shp->shm_atim, | |
1117 | shp->shm_dtim, | |
b7952180 HD |
1118 | shp->shm_ctim, |
1119 | rss * PAGE_SIZE, | |
1120 | swp * PAGE_SIZE); | |
1da177e4 LT |
1121 | } |
1122 | #endif |