]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/shm.c | |
3 | * Copyright (C) 1992, 1993 Krishna Balasubramanian | |
4 | * Many improvements/fixes by Bruno Haible. | |
5 | * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. | |
6 | * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. | |
7 | * | |
8 | * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <[email protected]> | |
9 | * BIGMEM support, Andrea Arcangeli <[email protected]> | |
10 | * SMP thread shm, Jean-Luc Boyard <[email protected]> | |
11 | * HIGHMEM support, Ingo Molnar <[email protected]> | |
12 | * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <[email protected]> | |
13 | * Shared /dev/zero support, Kanoj Sarcar <[email protected]> | |
14 | * Move the mm functionality over to mm/shmem.c, Christoph Rohland <[email protected]> | |
15 | * | |
073115d6 SG |
16 | * support for audit of ipc object properties and permission changes |
17 | * Dustin Kirkland <[email protected]> | |
4e982311 KK |
18 | * |
19 | * namespaces support | |
20 | * OpenVZ, SWsoft Inc. | |
21 | * Pavel Emelianov <[email protected]> | |
c2c737a0 DB |
22 | * |
23 | * Better ipc lock (kern_ipc_perm.lock) handling | |
24 | * Davidlohr Bueso <[email protected]>, June 2013. | |
1da177e4 LT |
25 | */ |
26 | ||
1da177e4 LT |
27 | #include <linux/slab.h> |
28 | #include <linux/mm.h> | |
29 | #include <linux/hugetlb.h> | |
30 | #include <linux/shm.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/file.h> | |
33 | #include <linux/mman.h> | |
1da177e4 LT |
34 | #include <linux/shmem_fs.h> |
35 | #include <linux/security.h> | |
36 | #include <linux/syscalls.h> | |
37 | #include <linux/audit.h> | |
c59ede7b | 38 | #include <linux/capability.h> |
7d87e14c | 39 | #include <linux/ptrace.h> |
19b4946c | 40 | #include <linux/seq_file.h> |
3e148c79 | 41 | #include <linux/rwsem.h> |
4e982311 | 42 | #include <linux/nsproxy.h> |
bc56bba8 | 43 | #include <linux/mount.h> |
ae5e1b22 | 44 | #include <linux/ipc_namespace.h> |
7d87e14c | 45 | |
1da177e4 LT |
46 | #include <asm/uaccess.h> |
47 | ||
48 | #include "util.h" | |
49 | ||
bc56bba8 EB |
50 | struct shm_file_data { |
51 | int id; | |
52 | struct ipc_namespace *ns; | |
53 | struct file *file; | |
54 | const struct vm_operations_struct *vm_ops; | |
55 | }; | |
56 | ||
57 | #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) | |
58 | ||
9a32144e | 59 | static const struct file_operations shm_file_operations; |
f0f37e2f | 60 | static const struct vm_operations_struct shm_vm_ops; |
1da177e4 | 61 | |
ed2ddbf8 | 62 | #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
1da177e4 | 63 | |
4e982311 KK |
64 | #define shm_unlock(shp) \ |
65 | ipc_unlock(&(shp)->shm_perm) | |
1da177e4 | 66 | |
7748dbfa | 67 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
bc56bba8 EB |
68 | static void shm_open(struct vm_area_struct *vma); |
69 | static void shm_close(struct vm_area_struct *vma); | |
4e982311 | 70 | static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); |
1da177e4 | 71 | #ifdef CONFIG_PROC_FS |
19b4946c | 72 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
1da177e4 LT |
73 | #endif |
74 | ||
ed2ddbf8 | 75 | void shm_init_ns(struct ipc_namespace *ns) |
4e982311 | 76 | { |
4e982311 KK |
77 | ns->shm_ctlmax = SHMMAX; |
78 | ns->shm_ctlall = SHMALL; | |
79 | ns->shm_ctlmni = SHMMNI; | |
b34a6b1d | 80 | ns->shm_rmid_forced = 0; |
4e982311 | 81 | ns->shm_tot = 0; |
e8148f75 | 82 | ipc_init_ids(&shm_ids(ns)); |
4e982311 KK |
83 | } |
84 | ||
f4566f04 | 85 | /* |
d9a605e4 DB |
86 | * Called with shm_ids.rwsem (writer) and the shp structure locked. |
87 | * Only shm_ids.rwsem remains locked on exit. | |
f4566f04 | 88 | */ |
01b8b07a | 89 | static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
4e982311 | 90 | { |
01b8b07a PP |
91 | struct shmid_kernel *shp; |
92 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
93 | ||
4e982311 KK |
94 | if (shp->shm_nattch){ |
95 | shp->shm_perm.mode |= SHM_DEST; | |
96 | /* Do not find it any more */ | |
97 | shp->shm_perm.key = IPC_PRIVATE; | |
98 | shm_unlock(shp); | |
99 | } else | |
100 | shm_destroy(ns, shp); | |
101 | } | |
102 | ||
ae5e1b22 | 103 | #ifdef CONFIG_IPC_NS |
4e982311 KK |
104 | void shm_exit_ns(struct ipc_namespace *ns) |
105 | { | |
01b8b07a | 106 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
7d6feeb2 | 107 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
4e982311 | 108 | } |
ae5e1b22 | 109 | #endif |
1da177e4 | 110 | |
140d0b21 | 111 | static int __init ipc_ns_init(void) |
1da177e4 | 112 | { |
ed2ddbf8 | 113 | shm_init_ns(&init_ipc_ns); |
140d0b21 LT |
114 | return 0; |
115 | } | |
116 | ||
117 | pure_initcall(ipc_ns_init); | |
118 | ||
119 | void __init shm_init (void) | |
120 | { | |
19b4946c | 121 | ipc_init_proc_interface("sysvipc/shm", |
b7952180 HD |
122 | #if BITS_PER_LONG <= 32 |
123 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
124 | #else | |
125 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
126 | #endif | |
4e982311 | 127 | IPC_SHM_IDS, sysvipc_shm_proc_show); |
1da177e4 LT |
128 | } |
129 | ||
8b8d52ac DB |
130 | static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) |
131 | { | |
132 | struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); | |
133 | ||
134 | if (IS_ERR(ipcp)) | |
135 | return ERR_CAST(ipcp); | |
136 | ||
137 | return container_of(ipcp, struct shmid_kernel, shm_perm); | |
138 | } | |
139 | ||
140 | static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) | |
141 | { | |
142 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); | |
143 | ||
144 | if (IS_ERR(ipcp)) | |
145 | return ERR_CAST(ipcp); | |
146 | ||
147 | return container_of(ipcp, struct shmid_kernel, shm_perm); | |
148 | } | |
149 | ||
3e148c79 | 150 | /* |
d9a605e4 | 151 | * shm_lock_(check_) routines are called in the paths where the rwsem |
00c2bf85 | 152 | * is not necessarily held. |
3e148c79 | 153 | */ |
023a5355 | 154 | static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
1da177e4 | 155 | { |
03f02c76 ND |
156 | struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); |
157 | ||
b1ed88b4 PP |
158 | if (IS_ERR(ipcp)) |
159 | return (struct shmid_kernel *)ipcp; | |
160 | ||
03f02c76 | 161 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
023a5355 ND |
162 | } |
163 | ||
4c677e2e VK |
164 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) |
165 | { | |
166 | rcu_read_lock(); | |
cf9d5d78 | 167 | ipc_lock_object(&ipcp->shm_perm); |
4c677e2e VK |
168 | } |
169 | ||
023a5355 ND |
170 | static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, |
171 | int id) | |
172 | { | |
03f02c76 ND |
173 | struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); |
174 | ||
b1ed88b4 PP |
175 | if (IS_ERR(ipcp)) |
176 | return (struct shmid_kernel *)ipcp; | |
177 | ||
03f02c76 | 178 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
1da177e4 LT |
179 | } |
180 | ||
7ca7e564 | 181 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
1da177e4 | 182 | { |
7ca7e564 | 183 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
1da177e4 LT |
184 | } |
185 | ||
1da177e4 | 186 | |
bc56bba8 EB |
187 | /* This is called by fork, once for every shm attach. */ |
188 | static void shm_open(struct vm_area_struct *vma) | |
4e982311 | 189 | { |
bc56bba8 EB |
190 | struct file *file = vma->vm_file; |
191 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 LT |
192 | struct shmid_kernel *shp; |
193 | ||
bc56bba8 | 194 | shp = shm_lock(sfd->ns, sfd->id); |
023a5355 | 195 | BUG_ON(IS_ERR(shp)); |
1da177e4 | 196 | shp->shm_atim = get_seconds(); |
b488893a | 197 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
198 | shp->shm_nattch++; |
199 | shm_unlock(shp); | |
200 | } | |
201 | ||
1da177e4 LT |
202 | /* |
203 | * shm_destroy - free the struct shmid_kernel | |
204 | * | |
f4566f04 | 205 | * @ns: namespace |
1da177e4 LT |
206 | * @shp: struct to free |
207 | * | |
d9a605e4 | 208 | * It has to be called with shp and shm_ids.rwsem (writer) locked, |
1da177e4 LT |
209 | * but returns with shp unlocked and freed. |
210 | */ | |
4e982311 | 211 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
1da177e4 | 212 | { |
4e982311 | 213 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
7ca7e564 | 214 | shm_rmid(ns, shp); |
1da177e4 LT |
215 | shm_unlock(shp); |
216 | if (!is_file_hugepages(shp->shm_file)) | |
217 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | |
353d5c30 | 218 | else if (shp->mlock_user) |
496ad9aa | 219 | user_shm_unlock(file_inode(shp->shm_file)->i_size, |
1da177e4 LT |
220 | shp->mlock_user); |
221 | fput (shp->shm_file); | |
222 | security_shm_free(shp); | |
223 | ipc_rcu_putref(shp); | |
224 | } | |
225 | ||
b34a6b1d VK |
226 | /* |
227 | * shm_may_destroy - identifies whether shm segment should be destroyed now | |
228 | * | |
229 | * Returns true if and only if there are no active users of the segment and | |
230 | * one of the following is true: | |
231 | * | |
232 | * 1) shmctl(id, IPC_RMID, NULL) was called for this shp | |
233 | * | |
234 | * 2) sysctl kernel.shm_rmid_forced is set to 1. | |
235 | */ | |
236 | static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) | |
237 | { | |
238 | return (shp->shm_nattch == 0) && | |
239 | (ns->shm_rmid_forced || | |
240 | (shp->shm_perm.mode & SHM_DEST)); | |
241 | } | |
242 | ||
1da177e4 | 243 | /* |
bc56bba8 | 244 | * remove the attach descriptor vma. |
1da177e4 LT |
245 | * free memory for segment if it is marked destroyed. |
246 | * The descriptor has already been removed from the current->mm->mmap list | |
247 | * and will later be kfree()d. | |
248 | */ | |
bc56bba8 | 249 | static void shm_close(struct vm_area_struct *vma) |
1da177e4 | 250 | { |
bc56bba8 EB |
251 | struct file * file = vma->vm_file; |
252 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 | 253 | struct shmid_kernel *shp; |
bc56bba8 | 254 | struct ipc_namespace *ns = sfd->ns; |
4e982311 | 255 | |
d9a605e4 | 256 | down_write(&shm_ids(ns).rwsem); |
1da177e4 | 257 | /* remove from the list of attaches of the shm segment */ |
00c2bf85 | 258 | shp = shm_lock(ns, sfd->id); |
023a5355 | 259 | BUG_ON(IS_ERR(shp)); |
b488893a | 260 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
261 | shp->shm_dtim = get_seconds(); |
262 | shp->shm_nattch--; | |
b34a6b1d VK |
263 | if (shm_may_destroy(ns, shp)) |
264 | shm_destroy(ns, shp); | |
265 | else | |
266 | shm_unlock(shp); | |
d9a605e4 | 267 | up_write(&shm_ids(ns).rwsem); |
b34a6b1d VK |
268 | } |
269 | ||
d9a605e4 | 270 | /* Called with ns->shm_ids(ns).rwsem locked */ |
b34a6b1d VK |
271 | static int shm_try_destroy_current(int id, void *p, void *data) |
272 | { | |
273 | struct ipc_namespace *ns = data; | |
4c677e2e VK |
274 | struct kern_ipc_perm *ipcp = p; |
275 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
b34a6b1d | 276 | |
4c677e2e | 277 | if (shp->shm_creator != current) |
5774ed01 | 278 | return 0; |
5774ed01 VK |
279 | |
280 | /* | |
281 | * Mark it as orphaned to destroy the segment when | |
282 | * kernel.shm_rmid_forced is changed. | |
283 | * It is noop if the following shm_may_destroy() returns true. | |
284 | */ | |
285 | shp->shm_creator = NULL; | |
286 | ||
287 | /* | |
288 | * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID | |
289 | * is not set, it shouldn't be deleted here. | |
290 | */ | |
4c677e2e | 291 | if (!ns->shm_rmid_forced) |
b34a6b1d | 292 | return 0; |
b34a6b1d | 293 | |
4c677e2e VK |
294 | if (shm_may_destroy(ns, shp)) { |
295 | shm_lock_by_ptr(shp); | |
b34a6b1d | 296 | shm_destroy(ns, shp); |
4c677e2e | 297 | } |
b34a6b1d VK |
298 | return 0; |
299 | } | |
300 | ||
d9a605e4 | 301 | /* Called with ns->shm_ids(ns).rwsem locked */ |
b34a6b1d VK |
302 | static int shm_try_destroy_orphaned(int id, void *p, void *data) |
303 | { | |
304 | struct ipc_namespace *ns = data; | |
4c677e2e VK |
305 | struct kern_ipc_perm *ipcp = p; |
306 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
b34a6b1d VK |
307 | |
308 | /* | |
309 | * We want to destroy segments without users and with already | |
310 | * exit'ed originating process. | |
4c677e2e | 311 | * |
d9a605e4 | 312 | * As shp->* are changed under rwsem, it's safe to skip shp locking. |
b34a6b1d | 313 | */ |
4c677e2e | 314 | if (shp->shm_creator != NULL) |
b34a6b1d | 315 | return 0; |
b34a6b1d | 316 | |
4c677e2e VK |
317 | if (shm_may_destroy(ns, shp)) { |
318 | shm_lock_by_ptr(shp); | |
4e982311 | 319 | shm_destroy(ns, shp); |
4c677e2e | 320 | } |
b34a6b1d VK |
321 | return 0; |
322 | } | |
323 | ||
324 | void shm_destroy_orphaned(struct ipc_namespace *ns) | |
325 | { | |
d9a605e4 | 326 | down_write(&shm_ids(ns).rwsem); |
33a30ed4 | 327 | if (shm_ids(ns).in_use) |
4c677e2e | 328 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); |
d9a605e4 | 329 | up_write(&shm_ids(ns).rwsem); |
b34a6b1d VK |
330 | } |
331 | ||
332 | ||
333 | void exit_shm(struct task_struct *task) | |
334 | { | |
4c677e2e | 335 | struct ipc_namespace *ns = task->nsproxy->ipc_ns; |
b34a6b1d | 336 | |
298507d4 VK |
337 | if (shm_ids(ns).in_use == 0) |
338 | return; | |
339 | ||
b34a6b1d | 340 | /* Destroy all already created segments, but not mapped yet */ |
d9a605e4 | 341 | down_write(&shm_ids(ns).rwsem); |
33a30ed4 | 342 | if (shm_ids(ns).in_use) |
4c677e2e | 343 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); |
d9a605e4 | 344 | up_write(&shm_ids(ns).rwsem); |
1da177e4 LT |
345 | } |
346 | ||
d0217ac0 | 347 | static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
bc56bba8 EB |
348 | { |
349 | struct file *file = vma->vm_file; | |
350 | struct shm_file_data *sfd = shm_file_data(file); | |
351 | ||
d0217ac0 | 352 | return sfd->vm_ops->fault(vma, vmf); |
bc56bba8 EB |
353 | } |
354 | ||
355 | #ifdef CONFIG_NUMA | |
d823e3e7 | 356 | static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
bc56bba8 EB |
357 | { |
358 | struct file *file = vma->vm_file; | |
359 | struct shm_file_data *sfd = shm_file_data(file); | |
360 | int err = 0; | |
361 | if (sfd->vm_ops->set_policy) | |
362 | err = sfd->vm_ops->set_policy(vma, new); | |
363 | return err; | |
364 | } | |
365 | ||
d823e3e7 AB |
366 | static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, |
367 | unsigned long addr) | |
bc56bba8 EB |
368 | { |
369 | struct file *file = vma->vm_file; | |
370 | struct shm_file_data *sfd = shm_file_data(file); | |
371 | struct mempolicy *pol = NULL; | |
372 | ||
373 | if (sfd->vm_ops->get_policy) | |
374 | pol = sfd->vm_ops->get_policy(vma, addr); | |
52cd3b07 | 375 | else if (vma->vm_policy) |
bc56bba8 | 376 | pol = vma->vm_policy; |
52cd3b07 | 377 | |
bc56bba8 EB |
378 | return pol; |
379 | } | |
380 | #endif | |
381 | ||
1da177e4 LT |
382 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
383 | { | |
bc56bba8 | 384 | struct shm_file_data *sfd = shm_file_data(file); |
b0e15190 DH |
385 | int ret; |
386 | ||
bc56bba8 EB |
387 | ret = sfd->file->f_op->mmap(sfd->file, vma); |
388 | if (ret != 0) | |
389 | return ret; | |
390 | sfd->vm_ops = vma->vm_ops; | |
2e92a3ba | 391 | #ifdef CONFIG_MMU |
54cb8821 | 392 | BUG_ON(!sfd->vm_ops->fault); |
2e92a3ba | 393 | #endif |
bc56bba8 EB |
394 | vma->vm_ops = &shm_vm_ops; |
395 | shm_open(vma); | |
b0e15190 DH |
396 | |
397 | return ret; | |
1da177e4 LT |
398 | } |
399 | ||
4e982311 KK |
400 | static int shm_release(struct inode *ino, struct file *file) |
401 | { | |
bc56bba8 | 402 | struct shm_file_data *sfd = shm_file_data(file); |
4e982311 | 403 | |
bc56bba8 EB |
404 | put_ipc_ns(sfd->ns); |
405 | shm_file_data(file) = NULL; | |
406 | kfree(sfd); | |
4e982311 KK |
407 | return 0; |
408 | } | |
409 | ||
02c24a82 | 410 | static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
516dffdc | 411 | { |
516dffdc | 412 | struct shm_file_data *sfd = shm_file_data(file); |
516dffdc | 413 | |
7ea80859 CH |
414 | if (!sfd->file->f_op->fsync) |
415 | return -EINVAL; | |
02c24a82 | 416 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
516dffdc AL |
417 | } |
418 | ||
7d8a4569 WD |
419 | static long shm_fallocate(struct file *file, int mode, loff_t offset, |
420 | loff_t len) | |
421 | { | |
422 | struct shm_file_data *sfd = shm_file_data(file); | |
423 | ||
424 | if (!sfd->file->f_op->fallocate) | |
425 | return -EOPNOTSUPP; | |
426 | return sfd->file->f_op->fallocate(file, mode, offset, len); | |
427 | } | |
428 | ||
bc56bba8 EB |
429 | static unsigned long shm_get_unmapped_area(struct file *file, |
430 | unsigned long addr, unsigned long len, unsigned long pgoff, | |
431 | unsigned long flags) | |
432 | { | |
433 | struct shm_file_data *sfd = shm_file_data(file); | |
c4caa778 AV |
434 | return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, |
435 | pgoff, flags); | |
bc56bba8 | 436 | } |
bc56bba8 | 437 | |
9a32144e | 438 | static const struct file_operations shm_file_operations = { |
4e982311 | 439 | .mmap = shm_mmap, |
516dffdc | 440 | .fsync = shm_fsync, |
4e982311 | 441 | .release = shm_release, |
ed5e5894 DH |
442 | #ifndef CONFIG_MMU |
443 | .get_unmapped_area = shm_get_unmapped_area, | |
444 | #endif | |
6038f373 | 445 | .llseek = noop_llseek, |
7d8a4569 | 446 | .fallocate = shm_fallocate, |
c4caa778 AV |
447 | }; |
448 | ||
449 | static const struct file_operations shm_file_operations_huge = { | |
450 | .mmap = shm_mmap, | |
451 | .fsync = shm_fsync, | |
452 | .release = shm_release, | |
bc56bba8 | 453 | .get_unmapped_area = shm_get_unmapped_area, |
6038f373 | 454 | .llseek = noop_llseek, |
7d8a4569 | 455 | .fallocate = shm_fallocate, |
1da177e4 LT |
456 | }; |
457 | ||
c4caa778 AV |
458 | int is_file_shm_hugepages(struct file *file) |
459 | { | |
460 | return file->f_op == &shm_file_operations_huge; | |
461 | } | |
462 | ||
f0f37e2f | 463 | static const struct vm_operations_struct shm_vm_ops = { |
1da177e4 LT |
464 | .open = shm_open, /* callback for a new vm-area open */ |
465 | .close = shm_close, /* callback for when the vm-area is released */ | |
54cb8821 | 466 | .fault = shm_fault, |
bc56bba8 EB |
467 | #if defined(CONFIG_NUMA) |
468 | .set_policy = shm_set_policy, | |
469 | .get_policy = shm_get_policy, | |
1da177e4 LT |
470 | #endif |
471 | }; | |
472 | ||
f4566f04 ND |
473 | /** |
474 | * newseg - Create a new shared memory segment | |
475 | * @ns: namespace | |
476 | * @params: ptr to the structure that contains key, size and shmflg | |
477 | * | |
d9a605e4 | 478 | * Called with shm_ids.rwsem held as a writer. |
f4566f04 ND |
479 | */ |
480 | ||
7748dbfa | 481 | static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4 | 482 | { |
7748dbfa ND |
483 | key_t key = params->key; |
484 | int shmflg = params->flg; | |
485 | size_t size = params->u.size; | |
1da177e4 LT |
486 | int error; |
487 | struct shmid_kernel *shp; | |
d69f3bad | 488 | size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1da177e4 LT |
489 | struct file * file; |
490 | char name[13]; | |
491 | int id; | |
ca16d140 | 492 | vm_flags_t acctflag = 0; |
1da177e4 | 493 | |
4e982311 | 494 | if (size < SHMMIN || size > ns->shm_ctlmax) |
1da177e4 LT |
495 | return -EINVAL; |
496 | ||
f66d45e9 | 497 | if (ns->shm_tot + numpages > ns->shm_ctlall) |
1da177e4 LT |
498 | return -ENOSPC; |
499 | ||
500 | shp = ipc_rcu_alloc(sizeof(*shp)); | |
501 | if (!shp) | |
502 | return -ENOMEM; | |
503 | ||
504 | shp->shm_perm.key = key; | |
b33291c0 | 505 | shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
1da177e4 LT |
506 | shp->mlock_user = NULL; |
507 | ||
508 | shp->shm_perm.security = NULL; | |
509 | error = security_shm_alloc(shp); | |
510 | if (error) { | |
511 | ipc_rcu_putref(shp); | |
512 | return error; | |
513 | } | |
514 | ||
9d66586f | 515 | sprintf (name, "SYSV%08x", key); |
1da177e4 | 516 | if (shmflg & SHM_HUGETLB) { |
c103a4dc | 517 | struct hstate *hs; |
091d0d55 LZ |
518 | size_t hugesize; |
519 | ||
c103a4dc | 520 | hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
091d0d55 LZ |
521 | if (!hs) { |
522 | error = -EINVAL; | |
523 | goto no_file; | |
524 | } | |
525 | hugesize = ALIGN(size, huge_page_size(hs)); | |
af73e4d9 | 526 | |
5a6fe125 MG |
527 | /* hugetlb_file_setup applies strict accounting */ |
528 | if (shmflg & SHM_NORESERVE) | |
529 | acctflag = VM_NORESERVE; | |
af73e4d9 | 530 | file = hugetlb_file_setup(name, hugesize, acctflag, |
42d7395f AK |
531 | &shp->mlock_user, HUGETLB_SHMFS_INODE, |
532 | (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); | |
1da177e4 | 533 | } else { |
bf8f972d BP |
534 | /* |
535 | * Do not allow no accounting for OVERCOMMIT_NEVER, even | |
536 | * if it's asked for. | |
537 | */ | |
538 | if ((shmflg & SHM_NORESERVE) && | |
539 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) | |
fc8744ad | 540 | acctflag = VM_NORESERVE; |
bf8f972d | 541 | file = shmem_file_setup(name, size, acctflag); |
1da177e4 LT |
542 | } |
543 | error = PTR_ERR(file); | |
544 | if (IS_ERR(file)) | |
545 | goto no_file; | |
546 | ||
48dea404 | 547 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
283bb7fa PP |
548 | if (id < 0) { |
549 | error = id; | |
1da177e4 | 550 | goto no_id; |
283bb7fa | 551 | } |
1da177e4 | 552 | |
b488893a | 553 | shp->shm_cprid = task_tgid_vnr(current); |
1da177e4 LT |
554 | shp->shm_lprid = 0; |
555 | shp->shm_atim = shp->shm_dtim = 0; | |
556 | shp->shm_ctim = get_seconds(); | |
557 | shp->shm_segsz = size; | |
558 | shp->shm_nattch = 0; | |
1da177e4 | 559 | shp->shm_file = file; |
5774ed01 | 560 | shp->shm_creator = current; |
dbfcd91f | 561 | |
30475cc1 BP |
562 | /* |
563 | * shmid gets reported as "inode#" in /proc/pid/maps. | |
564 | * proc-ps tools use this. Changing this will break them. | |
565 | */ | |
496ad9aa | 566 | file_inode(file)->i_ino = shp->shm_perm.id; |
551110a9 | 567 | |
4e982311 | 568 | ns->shm_tot += numpages; |
7ca7e564 | 569 | error = shp->shm_perm.id; |
dbfcd91f | 570 | |
cf9d5d78 | 571 | ipc_unlock_object(&shp->shm_perm); |
dbfcd91f | 572 | rcu_read_unlock(); |
7ca7e564 | 573 | return error; |
1da177e4 LT |
574 | |
575 | no_id: | |
2195d281 | 576 | if (is_file_hugepages(file) && shp->mlock_user) |
353d5c30 | 577 | user_shm_unlock(size, shp->mlock_user); |
1da177e4 LT |
578 | fput(file); |
579 | no_file: | |
580 | security_shm_free(shp); | |
581 | ipc_rcu_putref(shp); | |
582 | return error; | |
583 | } | |
584 | ||
f4566f04 | 585 | /* |
d9a605e4 | 586 | * Called with shm_ids.rwsem and ipcp locked. |
f4566f04 | 587 | */ |
03f02c76 | 588 | static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) |
7748dbfa | 589 | { |
03f02c76 ND |
590 | struct shmid_kernel *shp; |
591 | ||
592 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
593 | return security_shm_associate(shp, shmflg); | |
7748dbfa ND |
594 | } |
595 | ||
f4566f04 | 596 | /* |
d9a605e4 | 597 | * Called with shm_ids.rwsem and ipcp locked. |
f4566f04 | 598 | */ |
03f02c76 ND |
599 | static inline int shm_more_checks(struct kern_ipc_perm *ipcp, |
600 | struct ipc_params *params) | |
7748dbfa | 601 | { |
03f02c76 ND |
602 | struct shmid_kernel *shp; |
603 | ||
604 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
605 | if (shp->shm_segsz < params->u.size) | |
7748dbfa ND |
606 | return -EINVAL; |
607 | ||
608 | return 0; | |
609 | } | |
610 | ||
d5460c99 | 611 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
1da177e4 | 612 | { |
4e982311 | 613 | struct ipc_namespace *ns; |
7748dbfa ND |
614 | struct ipc_ops shm_ops; |
615 | struct ipc_params shm_params; | |
4e982311 KK |
616 | |
617 | ns = current->nsproxy->ipc_ns; | |
1da177e4 | 618 | |
7748dbfa ND |
619 | shm_ops.getnew = newseg; |
620 | shm_ops.associate = shm_security; | |
621 | shm_ops.more_checks = shm_more_checks; | |
7ca7e564 | 622 | |
7748dbfa ND |
623 | shm_params.key = key; |
624 | shm_params.flg = shmflg; | |
625 | shm_params.u.size = size; | |
1da177e4 | 626 | |
7748dbfa | 627 | return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
1da177e4 LT |
628 | } |
629 | ||
630 | static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) | |
631 | { | |
632 | switch(version) { | |
633 | case IPC_64: | |
634 | return copy_to_user(buf, in, sizeof(*in)); | |
635 | case IPC_OLD: | |
636 | { | |
637 | struct shmid_ds out; | |
638 | ||
3af54c9b | 639 | memset(&out, 0, sizeof(out)); |
1da177e4 LT |
640 | ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
641 | out.shm_segsz = in->shm_segsz; | |
642 | out.shm_atime = in->shm_atime; | |
643 | out.shm_dtime = in->shm_dtime; | |
644 | out.shm_ctime = in->shm_ctime; | |
645 | out.shm_cpid = in->shm_cpid; | |
646 | out.shm_lpid = in->shm_lpid; | |
647 | out.shm_nattch = in->shm_nattch; | |
648 | ||
649 | return copy_to_user(buf, &out, sizeof(out)); | |
650 | } | |
651 | default: | |
652 | return -EINVAL; | |
653 | } | |
654 | } | |
655 | ||
016d7132 PP |
656 | static inline unsigned long |
657 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) | |
1da177e4 LT |
658 | { |
659 | switch(version) { | |
660 | case IPC_64: | |
016d7132 | 661 | if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4 | 662 | return -EFAULT; |
1da177e4 | 663 | return 0; |
1da177e4 LT |
664 | case IPC_OLD: |
665 | { | |
666 | struct shmid_ds tbuf_old; | |
667 | ||
668 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | |
669 | return -EFAULT; | |
670 | ||
016d7132 PP |
671 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
672 | out->shm_perm.gid = tbuf_old.shm_perm.gid; | |
673 | out->shm_perm.mode = tbuf_old.shm_perm.mode; | |
1da177e4 LT |
674 | |
675 | return 0; | |
676 | } | |
677 | default: | |
678 | return -EINVAL; | |
679 | } | |
680 | } | |
681 | ||
682 | static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) | |
683 | { | |
684 | switch(version) { | |
685 | case IPC_64: | |
686 | return copy_to_user(buf, in, sizeof(*in)); | |
687 | case IPC_OLD: | |
688 | { | |
689 | struct shminfo out; | |
690 | ||
691 | if(in->shmmax > INT_MAX) | |
692 | out.shmmax = INT_MAX; | |
693 | else | |
694 | out.shmmax = (int)in->shmmax; | |
695 | ||
696 | out.shmmin = in->shmmin; | |
697 | out.shmmni = in->shmmni; | |
698 | out.shmseg = in->shmseg; | |
699 | out.shmall = in->shmall; | |
700 | ||
701 | return copy_to_user(buf, &out, sizeof(out)); | |
702 | } | |
703 | default: | |
704 | return -EINVAL; | |
705 | } | |
706 | } | |
707 | ||
b7952180 HD |
708 | /* |
709 | * Calculate and add used RSS and swap pages of a shm. | |
d9a605e4 | 710 | * Called with shm_ids.rwsem held as a reader |
b7952180 HD |
711 | */ |
712 | static void shm_add_rss_swap(struct shmid_kernel *shp, | |
713 | unsigned long *rss_add, unsigned long *swp_add) | |
714 | { | |
715 | struct inode *inode; | |
716 | ||
496ad9aa | 717 | inode = file_inode(shp->shm_file); |
b7952180 HD |
718 | |
719 | if (is_file_hugepages(shp->shm_file)) { | |
720 | struct address_space *mapping = inode->i_mapping; | |
721 | struct hstate *h = hstate_file(shp->shm_file); | |
722 | *rss_add += pages_per_huge_page(h) * mapping->nrpages; | |
723 | } else { | |
724 | #ifdef CONFIG_SHMEM | |
725 | struct shmem_inode_info *info = SHMEM_I(inode); | |
726 | spin_lock(&info->lock); | |
727 | *rss_add += inode->i_mapping->nrpages; | |
728 | *swp_add += info->swapped; | |
729 | spin_unlock(&info->lock); | |
730 | #else | |
731 | *rss_add += inode->i_mapping->nrpages; | |
732 | #endif | |
733 | } | |
734 | } | |
735 | ||
f4566f04 | 736 | /* |
d9a605e4 | 737 | * Called with shm_ids.rwsem held as a reader |
f4566f04 | 738 | */ |
4e982311 KK |
739 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
740 | unsigned long *swp) | |
1da177e4 | 741 | { |
7ca7e564 ND |
742 | int next_id; |
743 | int total, in_use; | |
1da177e4 LT |
744 | |
745 | *rss = 0; | |
746 | *swp = 0; | |
747 | ||
7ca7e564 ND |
748 | in_use = shm_ids(ns).in_use; |
749 | ||
750 | for (total = 0, next_id = 0; total < in_use; next_id++) { | |
e562aebc | 751 | struct kern_ipc_perm *ipc; |
1da177e4 | 752 | struct shmid_kernel *shp; |
1da177e4 | 753 | |
e562aebc TB |
754 | ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); |
755 | if (ipc == NULL) | |
1da177e4 | 756 | continue; |
e562aebc | 757 | shp = container_of(ipc, struct shmid_kernel, shm_perm); |
1da177e4 | 758 | |
b7952180 | 759 | shm_add_rss_swap(shp, rss, swp); |
7ca7e564 ND |
760 | |
761 | total++; | |
1da177e4 LT |
762 | } |
763 | } | |
764 | ||
8d4cc8b5 | 765 | /* |
d9a605e4 | 766 | * This function handles some shmctl commands which require the rwsem |
8d4cc8b5 | 767 | * to be held in write mode. |
d9a605e4 | 768 | * NOTE: no locks must be held, the rwsem is taken inside this function. |
8d4cc8b5 PP |
769 | */ |
770 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, | |
771 | struct shmid_ds __user *buf, int version) | |
1da177e4 | 772 | { |
8d4cc8b5 | 773 | struct kern_ipc_perm *ipcp; |
016d7132 | 774 | struct shmid64_ds shmid64; |
8d4cc8b5 PP |
775 | struct shmid_kernel *shp; |
776 | int err; | |
777 | ||
778 | if (cmd == IPC_SET) { | |
016d7132 | 779 | if (copy_shmid_from_user(&shmid64, buf, version)) |
8d4cc8b5 PP |
780 | return -EFAULT; |
781 | } | |
782 | ||
d9a605e4 | 783 | down_write(&shm_ids(ns).rwsem); |
7b4cc5d8 DB |
784 | rcu_read_lock(); |
785 | ||
79ccf0f8 DB |
786 | ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, |
787 | &shmid64.shm_perm, 0); | |
7b4cc5d8 DB |
788 | if (IS_ERR(ipcp)) { |
789 | err = PTR_ERR(ipcp); | |
7b4cc5d8 DB |
790 | goto out_unlock1; |
791 | } | |
8d4cc8b5 | 792 | |
a5f75e7f | 793 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
8d4cc8b5 PP |
794 | |
795 | err = security_shm_shmctl(shp, cmd); | |
796 | if (err) | |
79ccf0f8 | 797 | goto out_unlock1; |
7b4cc5d8 | 798 | |
8d4cc8b5 PP |
799 | switch (cmd) { |
800 | case IPC_RMID: | |
79ccf0f8 | 801 | ipc_lock_object(&shp->shm_perm); |
7b4cc5d8 | 802 | /* do_shm_rmid unlocks the ipc object and rcu */ |
8d4cc8b5 PP |
803 | do_shm_rmid(ns, ipcp); |
804 | goto out_up; | |
805 | case IPC_SET: | |
79ccf0f8 | 806 | ipc_lock_object(&shp->shm_perm); |
1efdb69b EB |
807 | err = ipc_update_perm(&shmid64.shm_perm, ipcp); |
808 | if (err) | |
7b4cc5d8 | 809 | goto out_unlock0; |
8d4cc8b5 PP |
810 | shp->shm_ctim = get_seconds(); |
811 | break; | |
812 | default: | |
813 | err = -EINVAL; | |
79ccf0f8 | 814 | goto out_unlock1; |
8d4cc8b5 | 815 | } |
7b4cc5d8 DB |
816 | |
817 | out_unlock0: | |
818 | ipc_unlock_object(&shp->shm_perm); | |
819 | out_unlock1: | |
820 | rcu_read_unlock(); | |
8d4cc8b5 | 821 | out_up: |
d9a605e4 | 822 | up_write(&shm_ids(ns).rwsem); |
8d4cc8b5 PP |
823 | return err; |
824 | } | |
825 | ||
68eccc1d DB |
826 | static int shmctl_nolock(struct ipc_namespace *ns, int shmid, |
827 | int cmd, int version, void __user *buf) | |
8d4cc8b5 | 828 | { |
68eccc1d | 829 | int err; |
1da177e4 | 830 | struct shmid_kernel *shp; |
1da177e4 | 831 | |
68eccc1d DB |
832 | /* preliminary security checks for *_INFO */ |
833 | if (cmd == IPC_INFO || cmd == SHM_INFO) { | |
834 | err = security_shm_shmctl(NULL, cmd); | |
835 | if (err) | |
836 | return err; | |
1da177e4 LT |
837 | } |
838 | ||
68eccc1d | 839 | switch (cmd) { |
1da177e4 LT |
840 | case IPC_INFO: |
841 | { | |
842 | struct shminfo64 shminfo; | |
843 | ||
e8148f75 | 844 | memset(&shminfo, 0, sizeof(shminfo)); |
4e982311 KK |
845 | shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; |
846 | shminfo.shmmax = ns->shm_ctlmax; | |
847 | shminfo.shmall = ns->shm_ctlall; | |
1da177e4 LT |
848 | |
849 | shminfo.shmmin = SHMMIN; | |
850 | if(copy_shminfo_to_user (buf, &shminfo, version)) | |
851 | return -EFAULT; | |
f4566f04 | 852 | |
d9a605e4 | 853 | down_read(&shm_ids(ns).rwsem); |
7ca7e564 | 854 | err = ipc_get_maxid(&shm_ids(ns)); |
d9a605e4 | 855 | up_read(&shm_ids(ns).rwsem); |
f4566f04 | 856 | |
1da177e4 LT |
857 | if(err<0) |
858 | err = 0; | |
859 | goto out; | |
860 | } | |
861 | case SHM_INFO: | |
862 | { | |
863 | struct shm_info shm_info; | |
864 | ||
e8148f75 | 865 | memset(&shm_info, 0, sizeof(shm_info)); |
d9a605e4 | 866 | down_read(&shm_ids(ns).rwsem); |
4e982311 KK |
867 | shm_info.used_ids = shm_ids(ns).in_use; |
868 | shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); | |
869 | shm_info.shm_tot = ns->shm_tot; | |
1da177e4 LT |
870 | shm_info.swap_attempts = 0; |
871 | shm_info.swap_successes = 0; | |
7ca7e564 | 872 | err = ipc_get_maxid(&shm_ids(ns)); |
d9a605e4 | 873 | up_read(&shm_ids(ns).rwsem); |
e8148f75 | 874 | if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { |
1da177e4 LT |
875 | err = -EFAULT; |
876 | goto out; | |
877 | } | |
878 | ||
879 | err = err < 0 ? 0 : err; | |
880 | goto out; | |
881 | } | |
882 | case SHM_STAT: | |
883 | case IPC_STAT: | |
884 | { | |
885 | struct shmid64_ds tbuf; | |
886 | int result; | |
023a5355 | 887 | |
c97cb9cc | 888 | rcu_read_lock(); |
023a5355 | 889 | if (cmd == SHM_STAT) { |
c97cb9cc | 890 | shp = shm_obtain_object(ns, shmid); |
023a5355 ND |
891 | if (IS_ERR(shp)) { |
892 | err = PTR_ERR(shp); | |
c97cb9cc | 893 | goto out_unlock; |
023a5355 | 894 | } |
7ca7e564 | 895 | result = shp->shm_perm.id; |
1da177e4 | 896 | } else { |
c97cb9cc | 897 | shp = shm_obtain_object_check(ns, shmid); |
023a5355 ND |
898 | if (IS_ERR(shp)) { |
899 | err = PTR_ERR(shp); | |
c97cb9cc | 900 | goto out_unlock; |
023a5355 | 901 | } |
1da177e4 LT |
902 | result = 0; |
903 | } | |
c97cb9cc | 904 | |
e8148f75 | 905 | err = -EACCES; |
b0e77598 | 906 | if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) |
1da177e4 | 907 | goto out_unlock; |
c97cb9cc | 908 | |
1da177e4 LT |
909 | err = security_shm_shmctl(shp, cmd); |
910 | if (err) | |
911 | goto out_unlock; | |
c97cb9cc | 912 | |
023a5355 | 913 | memset(&tbuf, 0, sizeof(tbuf)); |
1da177e4 LT |
914 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); |
915 | tbuf.shm_segsz = shp->shm_segsz; | |
916 | tbuf.shm_atime = shp->shm_atim; | |
917 | tbuf.shm_dtime = shp->shm_dtim; | |
918 | tbuf.shm_ctime = shp->shm_ctim; | |
919 | tbuf.shm_cpid = shp->shm_cprid; | |
920 | tbuf.shm_lpid = shp->shm_lprid; | |
bc56bba8 | 921 | tbuf.shm_nattch = shp->shm_nattch; |
c97cb9cc DB |
922 | rcu_read_unlock(); |
923 | ||
924 | if (copy_shmid_to_user(buf, &tbuf, version)) | |
1da177e4 LT |
925 | err = -EFAULT; |
926 | else | |
927 | err = result; | |
928 | goto out; | |
929 | } | |
68eccc1d DB |
930 | default: |
931 | return -EINVAL; | |
932 | } | |
933 | ||
934 | out_unlock: | |
c97cb9cc | 935 | rcu_read_unlock(); |
68eccc1d DB |
936 | out: |
937 | return err; | |
938 | } | |
939 | ||
940 | SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) | |
941 | { | |
942 | struct shmid_kernel *shp; | |
943 | int err, version; | |
944 | struct ipc_namespace *ns; | |
945 | ||
2caacaa8 DB |
946 | if (cmd < 0 || shmid < 0) |
947 | return -EINVAL; | |
68eccc1d DB |
948 | |
949 | version = ipc_parse_version(&cmd); | |
950 | ns = current->nsproxy->ipc_ns; | |
951 | ||
952 | switch (cmd) { | |
953 | case IPC_INFO: | |
954 | case SHM_INFO: | |
955 | case SHM_STAT: | |
956 | case IPC_STAT: | |
957 | return shmctl_nolock(ns, shmid, cmd, version, buf); | |
2caacaa8 DB |
958 | case IPC_RMID: |
959 | case IPC_SET: | |
960 | return shmctl_down(ns, shmid, cmd, buf, version); | |
1da177e4 LT |
961 | case SHM_LOCK: |
962 | case SHM_UNLOCK: | |
963 | { | |
85046579 | 964 | struct file *shm_file; |
89e004ea | 965 | |
2caacaa8 DB |
966 | rcu_read_lock(); |
967 | shp = shm_obtain_object_check(ns, shmid); | |
023a5355 ND |
968 | if (IS_ERR(shp)) { |
969 | err = PTR_ERR(shp); | |
2caacaa8 | 970 | goto out_unlock1; |
1da177e4 | 971 | } |
1da177e4 | 972 | |
a33e6751 | 973 | audit_ipc_obj(&(shp->shm_perm)); |
2caacaa8 DB |
974 | err = security_shm_shmctl(shp, cmd); |
975 | if (err) | |
976 | goto out_unlock1; | |
073115d6 | 977 | |
2caacaa8 | 978 | ipc_lock_object(&shp->shm_perm); |
b0e77598 | 979 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { |
1efdb69b | 980 | kuid_t euid = current_euid(); |
1da177e4 | 981 | err = -EPERM; |
1efdb69b EB |
982 | if (!uid_eq(euid, shp->shm_perm.uid) && |
983 | !uid_eq(euid, shp->shm_perm.cuid)) | |
2caacaa8 | 984 | goto out_unlock0; |
f1eb1332 | 985 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) |
2caacaa8 | 986 | goto out_unlock0; |
1da177e4 LT |
987 | } |
988 | ||
85046579 HD |
989 | shm_file = shp->shm_file; |
990 | if (is_file_hugepages(shm_file)) | |
2caacaa8 | 991 | goto out_unlock0; |
85046579 HD |
992 | |
993 | if (cmd == SHM_LOCK) { | |
86a264ab | 994 | struct user_struct *user = current_user(); |
85046579 HD |
995 | err = shmem_lock(shm_file, 1, user); |
996 | if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { | |
997 | shp->shm_perm.mode |= SHM_LOCKED; | |
998 | shp->mlock_user = user; | |
1da177e4 | 999 | } |
2caacaa8 | 1000 | goto out_unlock0; |
1da177e4 | 1001 | } |
85046579 HD |
1002 | |
1003 | /* SHM_UNLOCK */ | |
1004 | if (!(shp->shm_perm.mode & SHM_LOCKED)) | |
2caacaa8 | 1005 | goto out_unlock0; |
85046579 HD |
1006 | shmem_lock(shm_file, 0, shp->mlock_user); |
1007 | shp->shm_perm.mode &= ~SHM_LOCKED; | |
1008 | shp->mlock_user = NULL; | |
1009 | get_file(shm_file); | |
2caacaa8 DB |
1010 | ipc_unlock_object(&shp->shm_perm); |
1011 | rcu_read_unlock(); | |
24513264 | 1012 | shmem_unlock_mapping(shm_file->f_mapping); |
2caacaa8 | 1013 | |
85046579 | 1014 | fput(shm_file); |
8d4cc8b5 | 1015 | return err; |
2caacaa8 | 1016 | } |
1da177e4 | 1017 | default: |
8d4cc8b5 | 1018 | return -EINVAL; |
1da177e4 LT |
1019 | } |
1020 | ||
2caacaa8 DB |
1021 | out_unlock0: |
1022 | ipc_unlock_object(&shp->shm_perm); | |
1023 | out_unlock1: | |
1024 | rcu_read_unlock(); | |
1da177e4 LT |
1025 | return err; |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. | |
1030 | * | |
1031 | * NOTE! Despite the name, this is NOT a direct system call entrypoint. The | |
1032 | * "raddr" thing points to kernel space, and there has to be a wrapper around | |
1033 | * this. | |
1034 | */ | |
079a96ae WD |
1035 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, |
1036 | unsigned long shmlba) | |
1da177e4 LT |
1037 | { |
1038 | struct shmid_kernel *shp; | |
1039 | unsigned long addr; | |
1040 | unsigned long size; | |
1041 | struct file * file; | |
1042 | int err; | |
1043 | unsigned long flags; | |
1044 | unsigned long prot; | |
1da177e4 | 1045 | int acc_mode; |
4e982311 | 1046 | struct ipc_namespace *ns; |
bc56bba8 EB |
1047 | struct shm_file_data *sfd; |
1048 | struct path path; | |
aeb5d727 | 1049 | fmode_t f_mode; |
41badc15 | 1050 | unsigned long populate = 0; |
1da177e4 | 1051 | |
bc56bba8 EB |
1052 | err = -EINVAL; |
1053 | if (shmid < 0) | |
1da177e4 | 1054 | goto out; |
bc56bba8 | 1055 | else if ((addr = (ulong)shmaddr)) { |
079a96ae | 1056 | if (addr & (shmlba - 1)) { |
1da177e4 | 1057 | if (shmflg & SHM_RND) |
079a96ae | 1058 | addr &= ~(shmlba - 1); /* round down */ |
1da177e4 LT |
1059 | else |
1060 | #ifndef __ARCH_FORCE_SHMLBA | |
1061 | if (addr & ~PAGE_MASK) | |
1062 | #endif | |
bc56bba8 | 1063 | goto out; |
1da177e4 LT |
1064 | } |
1065 | flags = MAP_SHARED | MAP_FIXED; | |
1066 | } else { | |
1067 | if ((shmflg & SHM_REMAP)) | |
bc56bba8 | 1068 | goto out; |
1da177e4 LT |
1069 | |
1070 | flags = MAP_SHARED; | |
1071 | } | |
1072 | ||
1073 | if (shmflg & SHM_RDONLY) { | |
1074 | prot = PROT_READ; | |
1da177e4 | 1075 | acc_mode = S_IRUGO; |
bc56bba8 | 1076 | f_mode = FMODE_READ; |
1da177e4 LT |
1077 | } else { |
1078 | prot = PROT_READ | PROT_WRITE; | |
1da177e4 | 1079 | acc_mode = S_IRUGO | S_IWUGO; |
bc56bba8 | 1080 | f_mode = FMODE_READ | FMODE_WRITE; |
1da177e4 LT |
1081 | } |
1082 | if (shmflg & SHM_EXEC) { | |
1083 | prot |= PROT_EXEC; | |
1084 | acc_mode |= S_IXUGO; | |
1085 | } | |
1086 | ||
1087 | /* | |
1088 | * We cannot rely on the fs check since SYSV IPC does have an | |
1089 | * additional creator id... | |
1090 | */ | |
4e982311 | 1091 | ns = current->nsproxy->ipc_ns; |
c2c737a0 DB |
1092 | rcu_read_lock(); |
1093 | shp = shm_obtain_object_check(ns, shmid); | |
023a5355 ND |
1094 | if (IS_ERR(shp)) { |
1095 | err = PTR_ERR(shp); | |
c2c737a0 | 1096 | goto out_unlock; |
023a5355 | 1097 | } |
bc56bba8 EB |
1098 | |
1099 | err = -EACCES; | |
b0e77598 | 1100 | if (ipcperms(ns, &shp->shm_perm, acc_mode)) |
bc56bba8 | 1101 | goto out_unlock; |
1da177e4 LT |
1102 | |
1103 | err = security_shm_shmat(shp, shmaddr, shmflg); | |
bc56bba8 EB |
1104 | if (err) |
1105 | goto out_unlock; | |
1106 | ||
c2c737a0 | 1107 | ipc_lock_object(&shp->shm_perm); |
2c48b9c4 AV |
1108 | path = shp->shm_file->f_path; |
1109 | path_get(&path); | |
1da177e4 | 1110 | shp->shm_nattch++; |
bc56bba8 | 1111 | size = i_size_read(path.dentry->d_inode); |
c2c737a0 DB |
1112 | ipc_unlock_object(&shp->shm_perm); |
1113 | rcu_read_unlock(); | |
1da177e4 | 1114 | |
bc56bba8 EB |
1115 | err = -ENOMEM; |
1116 | sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); | |
f42569b1 DB |
1117 | if (!sfd) { |
1118 | path_put(&path); | |
1119 | goto out_nattch; | |
1120 | } | |
bc56bba8 | 1121 | |
2c48b9c4 AV |
1122 | file = alloc_file(&path, f_mode, |
1123 | is_file_hugepages(shp->shm_file) ? | |
c4caa778 AV |
1124 | &shm_file_operations_huge : |
1125 | &shm_file_operations); | |
39b65252 | 1126 | err = PTR_ERR(file); |
f42569b1 DB |
1127 | if (IS_ERR(file)) { |
1128 | kfree(sfd); | |
1129 | path_put(&path); | |
1130 | goto out_nattch; | |
1131 | } | |
bc56bba8 | 1132 | |
bc56bba8 | 1133 | file->private_data = sfd; |
bc56bba8 | 1134 | file->f_mapping = shp->shm_file->f_mapping; |
7ca7e564 | 1135 | sfd->id = shp->shm_perm.id; |
bc56bba8 EB |
1136 | sfd->ns = get_ipc_ns(ns); |
1137 | sfd->file = shp->shm_file; | |
1138 | sfd->vm_ops = NULL; | |
1139 | ||
8b3ec681 AV |
1140 | err = security_mmap_file(file, prot, flags); |
1141 | if (err) | |
1142 | goto out_fput; | |
1143 | ||
1da177e4 LT |
1144 | down_write(¤t->mm->mmap_sem); |
1145 | if (addr && !(shmflg & SHM_REMAP)) { | |
bc56bba8 | 1146 | err = -EINVAL; |
1da177e4 LT |
1147 | if (find_vma_intersection(current->mm, addr, addr + size)) |
1148 | goto invalid; | |
1149 | /* | |
1150 | * If shm segment goes below stack, make sure there is some | |
1151 | * space left for the stack to grow (at least 4 pages). | |
1152 | */ | |
1153 | if (addr < current->mm->start_stack && | |
1154 | addr > current->mm->start_stack - size - PAGE_SIZE * 5) | |
1155 | goto invalid; | |
1156 | } | |
f42569b1 | 1157 | |
bebeb3d6 ML |
1158 | addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); |
1159 | *raddr = addr; | |
bc56bba8 | 1160 | err = 0; |
bebeb3d6 ML |
1161 | if (IS_ERR_VALUE(addr)) |
1162 | err = (long)addr; | |
1da177e4 LT |
1163 | invalid: |
1164 | up_write(¤t->mm->mmap_sem); | |
bebeb3d6 | 1165 | if (populate) |
41badc15 | 1166 | mm_populate(addr, populate); |
1da177e4 | 1167 | |
8b3ec681 | 1168 | out_fput: |
bc56bba8 EB |
1169 | fput(file); |
1170 | ||
1171 | out_nattch: | |
d9a605e4 | 1172 | down_write(&shm_ids(ns).rwsem); |
00c2bf85 | 1173 | shp = shm_lock(ns, shmid); |
023a5355 | 1174 | BUG_ON(IS_ERR(shp)); |
1da177e4 | 1175 | shp->shm_nattch--; |
b34a6b1d | 1176 | if (shm_may_destroy(ns, shp)) |
4e982311 | 1177 | shm_destroy(ns, shp); |
1da177e4 LT |
1178 | else |
1179 | shm_unlock(shp); | |
d9a605e4 | 1180 | up_write(&shm_ids(ns).rwsem); |
1da177e4 | 1181 | return err; |
bc56bba8 EB |
1182 | |
1183 | out_unlock: | |
c2c737a0 | 1184 | rcu_read_unlock(); |
f42569b1 DB |
1185 | out: |
1186 | return err; | |
1da177e4 LT |
1187 | } |
1188 | ||
d5460c99 | 1189 | SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
7d87e14c SR |
1190 | { |
1191 | unsigned long ret; | |
1192 | long err; | |
1193 | ||
079a96ae | 1194 | err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); |
7d87e14c SR |
1195 | if (err) |
1196 | return err; | |
1197 | force_successful_syscall_return(); | |
1198 | return (long)ret; | |
1199 | } | |
1200 | ||
1da177e4 LT |
1201 | /* |
1202 | * detach and kill segment if marked destroyed. | |
1203 | * The work is done in shm_close. | |
1204 | */ | |
d5460c99 | 1205 | SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) |
1da177e4 LT |
1206 | { |
1207 | struct mm_struct *mm = current->mm; | |
586c7e6a | 1208 | struct vm_area_struct *vma; |
1da177e4 | 1209 | unsigned long addr = (unsigned long)shmaddr; |
1da177e4 | 1210 | int retval = -EINVAL; |
586c7e6a MF |
1211 | #ifdef CONFIG_MMU |
1212 | loff_t size = 0; | |
1213 | struct vm_area_struct *next; | |
1214 | #endif | |
1da177e4 | 1215 | |
df1e2fb5 HD |
1216 | if (addr & ~PAGE_MASK) |
1217 | return retval; | |
1218 | ||
1da177e4 LT |
1219 | down_write(&mm->mmap_sem); |
1220 | ||
1221 | /* | |
1222 | * This function tries to be smart and unmap shm segments that | |
1223 | * were modified by partial mlock or munmap calls: | |
1224 | * - It first determines the size of the shm segment that should be | |
1225 | * unmapped: It searches for a vma that is backed by shm and that | |
1226 | * started at address shmaddr. It records it's size and then unmaps | |
1227 | * it. | |
1228 | * - Then it unmaps all shm vmas that started at shmaddr and that | |
1229 | * are within the initially determined size. | |
1230 | * Errors from do_munmap are ignored: the function only fails if | |
1231 | * it's called with invalid parameters or if it's called to unmap | |
1232 | * a part of a vma. Both calls in this function are for full vmas, | |
1233 | * the parameters are directly copied from the vma itself and always | |
1234 | * valid - therefore do_munmap cannot fail. (famous last words?) | |
1235 | */ | |
1236 | /* | |
1237 | * If it had been mremap()'d, the starting address would not | |
1238 | * match the usual checks anyway. So assume all vma's are | |
1239 | * above the starting address given. | |
1240 | */ | |
1241 | vma = find_vma(mm, addr); | |
1242 | ||
8feae131 | 1243 | #ifdef CONFIG_MMU |
1da177e4 LT |
1244 | while (vma) { |
1245 | next = vma->vm_next; | |
1246 | ||
1247 | /* | |
1248 | * Check if the starting address would match, i.e. it's | |
1249 | * a fragment created by mprotect() and/or munmap(), or it | |
1250 | * otherwise it starts at this address with no hassles. | |
1251 | */ | |
bc56bba8 | 1252 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1253 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
1254 | ||
1255 | ||
496ad9aa | 1256 | size = file_inode(vma->vm_file)->i_size; |
1da177e4 LT |
1257 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); |
1258 | /* | |
1259 | * We discovered the size of the shm segment, so | |
1260 | * break out of here and fall through to the next | |
1261 | * loop that uses the size information to stop | |
1262 | * searching for matching vma's. | |
1263 | */ | |
1264 | retval = 0; | |
1265 | vma = next; | |
1266 | break; | |
1267 | } | |
1268 | vma = next; | |
1269 | } | |
1270 | ||
1271 | /* | |
1272 | * We need look no further than the maximum address a fragment | |
1273 | * could possibly have landed at. Also cast things to loff_t to | |
25985edc | 1274 | * prevent overflows and make comparisons vs. equal-width types. |
1da177e4 | 1275 | */ |
8e36709d | 1276 | size = PAGE_ALIGN(size); |
1da177e4 LT |
1277 | while (vma && (loff_t)(vma->vm_end - addr) <= size) { |
1278 | next = vma->vm_next; | |
1279 | ||
1280 | /* finding a matching vma now does not alter retval */ | |
bc56bba8 | 1281 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1282 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) |
1283 | ||
1284 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | |
1285 | vma = next; | |
1286 | } | |
1287 | ||
8feae131 DH |
1288 | #else /* CONFIG_MMU */ |
1289 | /* under NOMMU conditions, the exact address to be destroyed must be | |
1290 | * given */ | |
1291 | retval = -EINVAL; | |
1292 | if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { | |
1293 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | |
1294 | retval = 0; | |
1295 | } | |
1296 | ||
1297 | #endif | |
1298 | ||
1da177e4 LT |
1299 | up_write(&mm->mmap_sem); |
1300 | return retval; | |
1301 | } | |
1302 | ||
1303 | #ifdef CONFIG_PROC_FS | |
19b4946c | 1304 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
1da177e4 | 1305 | { |
1efdb69b | 1306 | struct user_namespace *user_ns = seq_user_ns(s); |
19b4946c | 1307 | struct shmid_kernel *shp = it; |
b7952180 HD |
1308 | unsigned long rss = 0, swp = 0; |
1309 | ||
1310 | shm_add_rss_swap(shp, &rss, &swp); | |
1da177e4 | 1311 | |
6c826818 PM |
1312 | #if BITS_PER_LONG <= 32 |
1313 | #define SIZE_SPEC "%10lu" | |
1314 | #else | |
1315 | #define SIZE_SPEC "%21lu" | |
1316 | #endif | |
1da177e4 | 1317 | |
6c826818 PM |
1318 | return seq_printf(s, |
1319 | "%10d %10d %4o " SIZE_SPEC " %5u %5u " | |
b7952180 HD |
1320 | "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " |
1321 | SIZE_SPEC " " SIZE_SPEC "\n", | |
19b4946c | 1322 | shp->shm_perm.key, |
7ca7e564 | 1323 | shp->shm_perm.id, |
b33291c0 | 1324 | shp->shm_perm.mode, |
19b4946c MW |
1325 | shp->shm_segsz, |
1326 | shp->shm_cprid, | |
1327 | shp->shm_lprid, | |
bc56bba8 | 1328 | shp->shm_nattch, |
1efdb69b EB |
1329 | from_kuid_munged(user_ns, shp->shm_perm.uid), |
1330 | from_kgid_munged(user_ns, shp->shm_perm.gid), | |
1331 | from_kuid_munged(user_ns, shp->shm_perm.cuid), | |
1332 | from_kgid_munged(user_ns, shp->shm_perm.cgid), | |
19b4946c MW |
1333 | shp->shm_atim, |
1334 | shp->shm_dtim, | |
b7952180 HD |
1335 | shp->shm_ctim, |
1336 | rss * PAGE_SIZE, | |
1337 | swp * PAGE_SIZE); | |
1da177e4 LT |
1338 | } |
1339 | #endif |