]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/util.c | |
3 | * Copyright (C) 1992 Krishna Balasubramanian | |
4 | * | |
5 | * Sep 1997 - Call suser() last after "normal" permission checks so we | |
6 | * get BSD style process accounting right. | |
7 | * Occurs in several places in the IPC code. | |
8 | * Chris Evans, <[email protected]> | |
9 | * Nov 1999 - ipc helper functions, unified SMP locking | |
624dffcb | 10 | * Manfred Spraul <[email protected]> |
1da177e4 LT |
11 | * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary(). |
12 | * Mingming Cao <[email protected]> | |
073115d6 SG |
13 | * Mar 2006 - support for audit of ipc object properties |
14 | * Dustin Kirkland <[email protected]> | |
73ea4130 KK |
15 | * Jun 2006 - namespaces ssupport |
16 | * OpenVZ, SWsoft Inc. | |
17 | * Pavel Emelianov <[email protected]> | |
1da177e4 LT |
18 | */ |
19 | ||
1da177e4 LT |
20 | #include <linux/mm.h> |
21 | #include <linux/shm.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/msg.h> | |
1da177e4 LT |
24 | #include <linux/vmalloc.h> |
25 | #include <linux/slab.h> | |
c59ede7b | 26 | #include <linux/capability.h> |
1da177e4 LT |
27 | #include <linux/highuid.h> |
28 | #include <linux/security.h> | |
29 | #include <linux/rcupdate.h> | |
30 | #include <linux/workqueue.h> | |
ae781774 MW |
31 | #include <linux/seq_file.h> |
32 | #include <linux/proc_fs.h> | |
073115d6 | 33 | #include <linux/audit.h> |
73ea4130 | 34 | #include <linux/nsproxy.h> |
3e148c79 | 35 | #include <linux/rwsem.h> |
b6b337ad | 36 | #include <linux/memory.h> |
ae5e1b22 | 37 | #include <linux/ipc_namespace.h> |
1da177e4 LT |
38 | |
39 | #include <asm/unistd.h> | |
40 | ||
41 | #include "util.h" | |
42 | ||
ae781774 MW |
43 | struct ipc_proc_iface { |
44 | const char *path; | |
45 | const char *header; | |
73ea4130 | 46 | int ids; |
ae781774 MW |
47 | int (*show)(struct seq_file *, void *); |
48 | }; | |
49 | ||
73ea4130 KK |
50 | struct ipc_namespace init_ipc_ns = { |
51 | .kref = { | |
52 | .refcount = ATOMIC_INIT(2), | |
53 | }, | |
54 | }; | |
55 | ||
4d89dc6a ND |
56 | atomic_t nr_ipc_ns = ATOMIC_INIT(1); |
57 | ||
58 | ||
b6b337ad ND |
59 | #ifdef CONFIG_MEMORY_HOTPLUG |
60 | ||
424450c1 ND |
61 | static void ipc_memory_notifier(struct work_struct *work) |
62 | { | |
63 | ipcns_notify(IPCNS_MEMCHANGED); | |
64 | } | |
65 | ||
66 | static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); | |
67 | ||
68 | ||
b6b337ad ND |
69 | static int ipc_memory_callback(struct notifier_block *self, |
70 | unsigned long action, void *arg) | |
71 | { | |
72 | switch (action) { | |
73 | case MEM_ONLINE: /* memory successfully brought online */ | |
74 | case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ | |
75 | /* | |
76 | * This is done by invoking the ipcns notifier chain with the | |
77 | * IPC_MEMCHANGED event. | |
424450c1 ND |
78 | * In order not to keep the lock on the hotplug memory chain |
79 | * for too long, queue a work item that will, when waken up, | |
80 | * activate the ipcns notification chain. | |
81 | * No need to keep several ipc work items on the queue. | |
b6b337ad | 82 | */ |
424450c1 ND |
83 | if (!work_pending(&ipc_memory_wq)) |
84 | schedule_work(&ipc_memory_wq); | |
b6b337ad ND |
85 | break; |
86 | case MEM_GOING_ONLINE: | |
87 | case MEM_GOING_OFFLINE: | |
88 | case MEM_CANCEL_ONLINE: | |
89 | case MEM_CANCEL_OFFLINE: | |
90 | default: | |
91 | break; | |
92 | } | |
93 | ||
94 | return NOTIFY_OK; | |
95 | } | |
96 | ||
97 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
98 | ||
1da177e4 LT |
99 | /** |
100 | * ipc_init - initialise IPC subsystem | |
101 | * | |
102 | * The various system5 IPC resources (semaphores, messages and shared | |
72fd4a35 | 103 | * memory) are initialised |
b6b337ad ND |
104 | * A callback routine is registered into the memory hotplug notifier |
105 | * chain: since msgmni scales to lowmem this callback routine will be | |
106 | * called upon successful memory add / remove to recompute msmgni. | |
1da177e4 LT |
107 | */ |
108 | ||
109 | static int __init ipc_init(void) | |
110 | { | |
111 | sem_init(); | |
112 | msg_init(); | |
113 | shm_init(); | |
b6b337ad ND |
114 | hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI); |
115 | register_ipcns_notifier(&init_ipc_ns); | |
1da177e4 LT |
116 | return 0; |
117 | } | |
118 | __initcall(ipc_init); | |
119 | ||
120 | /** | |
121 | * ipc_init_ids - initialise IPC identifiers | |
122 | * @ids: Identifier set | |
1da177e4 | 123 | * |
7ca7e564 ND |
124 | * Set up the sequence range to use for the ipc identifier range (limited |
125 | * below IPCMNI) then initialise the ids idr. | |
1da177e4 LT |
126 | */ |
127 | ||
7ca7e564 | 128 | void ipc_init_ids(struct ipc_ids *ids) |
1da177e4 | 129 | { |
3e148c79 | 130 | init_rwsem(&ids->rw_mutex); |
1da177e4 | 131 | |
1da177e4 | 132 | ids->in_use = 0; |
1da177e4 LT |
133 | ids->seq = 0; |
134 | { | |
135 | int seq_limit = INT_MAX/SEQ_MULTIPLIER; | |
136 | if(seq_limit > USHRT_MAX) | |
137 | ids->seq_max = USHRT_MAX; | |
138 | else | |
139 | ids->seq_max = seq_limit; | |
140 | } | |
141 | ||
7ca7e564 | 142 | idr_init(&ids->ipcs_idr); |
1da177e4 LT |
143 | } |
144 | ||
ae781774 | 145 | #ifdef CONFIG_PROC_FS |
9a32144e | 146 | static const struct file_operations sysvipc_proc_fops; |
ae781774 | 147 | /** |
72fd4a35 | 148 | * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. |
ae781774 MW |
149 | * @path: Path in procfs |
150 | * @header: Banner to be printed at the beginning of the file. | |
151 | * @ids: ipc id table to iterate. | |
152 | * @show: show routine. | |
153 | */ | |
154 | void __init ipc_init_proc_interface(const char *path, const char *header, | |
73ea4130 | 155 | int ids, int (*show)(struct seq_file *, void *)) |
ae781774 MW |
156 | { |
157 | struct proc_dir_entry *pde; | |
158 | struct ipc_proc_iface *iface; | |
159 | ||
160 | iface = kmalloc(sizeof(*iface), GFP_KERNEL); | |
161 | if (!iface) | |
162 | return; | |
163 | iface->path = path; | |
164 | iface->header = header; | |
165 | iface->ids = ids; | |
166 | iface->show = show; | |
167 | ||
168 | pde = create_proc_entry(path, | |
169 | S_IRUGO, /* world readable */ | |
170 | NULL /* parent dir */); | |
171 | if (pde) { | |
172 | pde->data = iface; | |
173 | pde->proc_fops = &sysvipc_proc_fops; | |
174 | } else { | |
175 | kfree(iface); | |
176 | } | |
177 | } | |
178 | #endif | |
179 | ||
1da177e4 LT |
180 | /** |
181 | * ipc_findkey - find a key in an ipc identifier set | |
182 | * @ids: Identifier set | |
183 | * @key: The key to find | |
184 | * | |
3e148c79 | 185 | * Requires ipc_ids.rw_mutex locked. |
7ca7e564 ND |
186 | * Returns the LOCKED pointer to the ipc structure if found or NULL |
187 | * if not. | |
f4566f04 | 188 | * If key is found ipc points to the owning ipc structure |
1da177e4 LT |
189 | */ |
190 | ||
7748dbfa | 191 | static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) |
1da177e4 | 192 | { |
7ca7e564 ND |
193 | struct kern_ipc_perm *ipc; |
194 | int next_id; | |
195 | int total; | |
1da177e4 | 196 | |
7ca7e564 ND |
197 | for (total = 0, next_id = 0; total < ids->in_use; next_id++) { |
198 | ipc = idr_find(&ids->ipcs_idr, next_id); | |
199 | ||
200 | if (ipc == NULL) | |
1da177e4 | 201 | continue; |
7ca7e564 ND |
202 | |
203 | if (ipc->key != key) { | |
204 | total++; | |
205 | continue; | |
206 | } | |
207 | ||
208 | ipc_lock_by_ptr(ipc); | |
209 | return ipc; | |
1da177e4 | 210 | } |
7ca7e564 ND |
211 | |
212 | return NULL; | |
1da177e4 LT |
213 | } |
214 | ||
7ca7e564 ND |
215 | /** |
216 | * ipc_get_maxid - get the last assigned id | |
217 | * @ids: IPC identifier set | |
218 | * | |
3e148c79 | 219 | * Called with ipc_ids.rw_mutex held. |
1da177e4 | 220 | */ |
1da177e4 | 221 | |
7ca7e564 ND |
222 | int ipc_get_maxid(struct ipc_ids *ids) |
223 | { | |
224 | struct kern_ipc_perm *ipc; | |
225 | int max_id = -1; | |
226 | int total, id; | |
227 | ||
228 | if (ids->in_use == 0) | |
229 | return -1; | |
1da177e4 | 230 | |
7ca7e564 ND |
231 | if (ids->in_use == IPCMNI) |
232 | return IPCMNI - 1; | |
233 | ||
234 | /* Look for the last assigned id */ | |
235 | total = 0; | |
236 | for (id = 0; id < IPCMNI && total < ids->in_use; id++) { | |
237 | ipc = idr_find(&ids->ipcs_idr, id); | |
238 | if (ipc != NULL) { | |
239 | max_id = id; | |
240 | total++; | |
241 | } | |
242 | } | |
243 | return max_id; | |
1da177e4 LT |
244 | } |
245 | ||
246 | /** | |
247 | * ipc_addid - add an IPC identifier | |
248 | * @ids: IPC identifier set | |
249 | * @new: new IPC permission set | |
7ca7e564 | 250 | * @size: limit for the number of used ids |
1da177e4 | 251 | * |
f4566f04 | 252 | * Add an entry 'new' to the IPC ids idr. The permissions object is |
1da177e4 | 253 | * initialised and the first free entry is set up and the id assigned |
f4566f04 | 254 | * is returned. The 'new' entry is returned in a locked state on success. |
283bb7fa | 255 | * On failure the entry is not locked and a negative err-code is returned. |
1da177e4 | 256 | * |
3e148c79 | 257 | * Called with ipc_ids.rw_mutex held as a writer. |
1da177e4 LT |
258 | */ |
259 | ||
260 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | |
261 | { | |
7ca7e564 | 262 | int id, err; |
1da177e4 | 263 | |
7ca7e564 ND |
264 | if (size > IPCMNI) |
265 | size = IPCMNI; | |
266 | ||
267 | if (ids->in_use >= size) | |
283bb7fa | 268 | return -ENOSPC; |
7ca7e564 ND |
269 | |
270 | err = idr_get_new(&ids->ipcs_idr, new, &id); | |
271 | if (err) | |
283bb7fa | 272 | return err; |
7ca7e564 | 273 | |
1da177e4 | 274 | ids->in_use++; |
1da177e4 LT |
275 | |
276 | new->cuid = new->uid = current->euid; | |
277 | new->gid = new->cgid = current->egid; | |
278 | ||
279 | new->seq = ids->seq++; | |
280 | if(ids->seq > ids->seq_max) | |
281 | ids->seq = 0; | |
282 | ||
48dea404 | 283 | new->id = ipc_buildid(id, new->seq); |
1da177e4 LT |
284 | spin_lock_init(&new->lock); |
285 | new->deleted = 0; | |
286 | rcu_read_lock(); | |
287 | spin_lock(&new->lock); | |
1da177e4 LT |
288 | return id; |
289 | } | |
290 | ||
7748dbfa ND |
291 | /** |
292 | * ipcget_new - create a new ipc object | |
293 | * @ns: namespace | |
f4566f04 | 294 | * @ids: IPC identifer set |
7748dbfa ND |
295 | * @ops: the actual creation routine to call |
296 | * @params: its parameters | |
297 | * | |
f4566f04 ND |
298 | * This routine is called by sys_msgget, sys_semget() and sys_shmget() |
299 | * when the key is IPC_PRIVATE. | |
7748dbfa | 300 | */ |
b2d75cdd | 301 | static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, |
7748dbfa ND |
302 | struct ipc_ops *ops, struct ipc_params *params) |
303 | { | |
304 | int err; | |
283bb7fa | 305 | retry: |
7748dbfa ND |
306 | err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); |
307 | ||
308 | if (!err) | |
309 | return -ENOMEM; | |
310 | ||
3e148c79 | 311 | down_write(&ids->rw_mutex); |
7748dbfa | 312 | err = ops->getnew(ns, params); |
3e148c79 | 313 | up_write(&ids->rw_mutex); |
7748dbfa | 314 | |
283bb7fa PP |
315 | if (err == -EAGAIN) |
316 | goto retry; | |
317 | ||
7748dbfa ND |
318 | return err; |
319 | } | |
320 | ||
321 | /** | |
322 | * ipc_check_perms - check security and permissions for an IPC | |
323 | * @ipcp: ipc permission set | |
7748dbfa ND |
324 | * @ops: the actual security routine to call |
325 | * @params: its parameters | |
f4566f04 ND |
326 | * |
327 | * This routine is called by sys_msgget(), sys_semget() and sys_shmget() | |
328 | * when the key is not IPC_PRIVATE and that key already exists in the | |
329 | * ids IDR. | |
330 | * | |
331 | * On success, the IPC id is returned. | |
332 | * | |
3e148c79 | 333 | * It is called with ipc_ids.rw_mutex and ipcp->lock held. |
7748dbfa ND |
334 | */ |
335 | static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, | |
336 | struct ipc_params *params) | |
337 | { | |
338 | int err; | |
339 | ||
340 | if (ipcperms(ipcp, params->flg)) | |
341 | err = -EACCES; | |
342 | else { | |
343 | err = ops->associate(ipcp, params->flg); | |
344 | if (!err) | |
345 | err = ipcp->id; | |
346 | } | |
347 | ||
348 | return err; | |
349 | } | |
350 | ||
351 | /** | |
352 | * ipcget_public - get an ipc object or create a new one | |
353 | * @ns: namespace | |
f4566f04 | 354 | * @ids: IPC identifer set |
7748dbfa ND |
355 | * @ops: the actual creation routine to call |
356 | * @params: its parameters | |
357 | * | |
f4566f04 ND |
358 | * This routine is called by sys_msgget, sys_semget() and sys_shmget() |
359 | * when the key is not IPC_PRIVATE. | |
360 | * It adds a new entry if the key is not found and does some permission | |
361 | * / security checkings if the key is found. | |
362 | * | |
363 | * On success, the ipc id is returned. | |
7748dbfa | 364 | */ |
b2d75cdd | 365 | static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, |
7748dbfa ND |
366 | struct ipc_ops *ops, struct ipc_params *params) |
367 | { | |
368 | struct kern_ipc_perm *ipcp; | |
369 | int flg = params->flg; | |
370 | int err; | |
283bb7fa | 371 | retry: |
7748dbfa ND |
372 | err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); |
373 | ||
3e148c79 ND |
374 | /* |
375 | * Take the lock as a writer since we are potentially going to add | |
376 | * a new entry + read locks are not "upgradable" | |
377 | */ | |
378 | down_write(&ids->rw_mutex); | |
7748dbfa ND |
379 | ipcp = ipc_findkey(ids, params->key); |
380 | if (ipcp == NULL) { | |
381 | /* key not used */ | |
382 | if (!(flg & IPC_CREAT)) | |
383 | err = -ENOENT; | |
384 | else if (!err) | |
385 | err = -ENOMEM; | |
386 | else | |
387 | err = ops->getnew(ns, params); | |
388 | } else { | |
389 | /* ipc object has been locked by ipc_findkey() */ | |
390 | ||
391 | if (flg & IPC_CREAT && flg & IPC_EXCL) | |
392 | err = -EEXIST; | |
393 | else { | |
394 | err = 0; | |
395 | if (ops->more_checks) | |
396 | err = ops->more_checks(ipcp, params); | |
397 | if (!err) | |
f4566f04 ND |
398 | /* |
399 | * ipc_check_perms returns the IPC id on | |
400 | * success | |
401 | */ | |
7748dbfa ND |
402 | err = ipc_check_perms(ipcp, ops, params); |
403 | } | |
404 | ipc_unlock(ipcp); | |
405 | } | |
3e148c79 | 406 | up_write(&ids->rw_mutex); |
7748dbfa | 407 | |
283bb7fa PP |
408 | if (err == -EAGAIN) |
409 | goto retry; | |
410 | ||
7748dbfa ND |
411 | return err; |
412 | } | |
413 | ||
414 | ||
1da177e4 LT |
415 | /** |
416 | * ipc_rmid - remove an IPC identifier | |
f4566f04 ND |
417 | * @ids: IPC identifier set |
418 | * @ipcp: ipc perm structure containing the identifier to remove | |
1da177e4 | 419 | * |
3e148c79 ND |
420 | * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held |
421 | * before this function is called, and remain locked on the exit. | |
1da177e4 LT |
422 | */ |
423 | ||
7ca7e564 | 424 | void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) |
1da177e4 | 425 | { |
ce621f5b | 426 | int lid = ipcid_to_idx(ipcp->id); |
7ca7e564 ND |
427 | |
428 | idr_remove(&ids->ipcs_idr, lid); | |
1da177e4 | 429 | |
1da177e4 LT |
430 | ids->in_use--; |
431 | ||
7ca7e564 ND |
432 | ipcp->deleted = 1; |
433 | ||
434 | return; | |
1da177e4 LT |
435 | } |
436 | ||
437 | /** | |
438 | * ipc_alloc - allocate ipc space | |
439 | * @size: size desired | |
440 | * | |
441 | * Allocate memory from the appropriate pools and return a pointer to it. | |
442 | * NULL is returned if the allocation fails | |
443 | */ | |
444 | ||
445 | void* ipc_alloc(int size) | |
446 | { | |
447 | void* out; | |
448 | if(size > PAGE_SIZE) | |
449 | out = vmalloc(size); | |
450 | else | |
451 | out = kmalloc(size, GFP_KERNEL); | |
452 | return out; | |
453 | } | |
454 | ||
455 | /** | |
456 | * ipc_free - free ipc space | |
457 | * @ptr: pointer returned by ipc_alloc | |
458 | * @size: size of block | |
459 | * | |
72fd4a35 | 460 | * Free a block created with ipc_alloc(). The caller must know the size |
1da177e4 LT |
461 | * used in the allocation call. |
462 | */ | |
463 | ||
464 | void ipc_free(void* ptr, int size) | |
465 | { | |
466 | if(size > PAGE_SIZE) | |
467 | vfree(ptr); | |
468 | else | |
469 | kfree(ptr); | |
470 | } | |
471 | ||
472 | /* | |
473 | * rcu allocations: | |
474 | * There are three headers that are prepended to the actual allocation: | |
475 | * - during use: ipc_rcu_hdr. | |
476 | * - during the rcu grace period: ipc_rcu_grace. | |
477 | * - [only if vmalloc]: ipc_rcu_sched. | |
478 | * Their lifetime doesn't overlap, thus the headers share the same memory. | |
479 | * Unlike a normal union, they are right-aligned, thus some container_of | |
480 | * forward/backward casting is necessary: | |
481 | */ | |
482 | struct ipc_rcu_hdr | |
483 | { | |
484 | int refcount; | |
485 | int is_vmalloc; | |
486 | void *data[0]; | |
487 | }; | |
488 | ||
489 | ||
490 | struct ipc_rcu_grace | |
491 | { | |
492 | struct rcu_head rcu; | |
493 | /* "void *" makes sure alignment of following data is sane. */ | |
494 | void *data[0]; | |
495 | }; | |
496 | ||
497 | struct ipc_rcu_sched | |
498 | { | |
499 | struct work_struct work; | |
500 | /* "void *" makes sure alignment of following data is sane. */ | |
501 | void *data[0]; | |
502 | }; | |
503 | ||
504 | #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ | |
505 | sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) | |
506 | #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ | |
507 | sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) | |
508 | ||
509 | static inline int rcu_use_vmalloc(int size) | |
510 | { | |
511 | /* Too big for a single page? */ | |
512 | if (HDRLEN_KMALLOC + size > PAGE_SIZE) | |
513 | return 1; | |
514 | return 0; | |
515 | } | |
516 | ||
517 | /** | |
518 | * ipc_rcu_alloc - allocate ipc and rcu space | |
519 | * @size: size desired | |
520 | * | |
521 | * Allocate memory for the rcu header structure + the object. | |
522 | * Returns the pointer to the object. | |
523 | * NULL is returned if the allocation fails. | |
524 | */ | |
525 | ||
526 | void* ipc_rcu_alloc(int size) | |
527 | { | |
528 | void* out; | |
529 | /* | |
530 | * We prepend the allocation with the rcu struct, and | |
531 | * workqueue if necessary (for vmalloc). | |
532 | */ | |
533 | if (rcu_use_vmalloc(size)) { | |
534 | out = vmalloc(HDRLEN_VMALLOC + size); | |
535 | if (out) { | |
536 | out += HDRLEN_VMALLOC; | |
537 | container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; | |
538 | container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; | |
539 | } | |
540 | } else { | |
541 | out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); | |
542 | if (out) { | |
543 | out += HDRLEN_KMALLOC; | |
544 | container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; | |
545 | container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; | |
546 | } | |
547 | } | |
548 | ||
549 | return out; | |
550 | } | |
551 | ||
552 | void ipc_rcu_getref(void *ptr) | |
553 | { | |
554 | container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; | |
555 | } | |
556 | ||
65f27f38 DH |
557 | static void ipc_do_vfree(struct work_struct *work) |
558 | { | |
559 | vfree(container_of(work, struct ipc_rcu_sched, work)); | |
560 | } | |
561 | ||
1da177e4 | 562 | /** |
1e5d5331 RD |
563 | * ipc_schedule_free - free ipc + rcu space |
564 | * @head: RCU callback structure for queued work | |
1da177e4 LT |
565 | * |
566 | * Since RCU callback function is called in bh, | |
72fd4a35 | 567 | * we need to defer the vfree to schedule_work(). |
1da177e4 LT |
568 | */ |
569 | static void ipc_schedule_free(struct rcu_head *head) | |
570 | { | |
f4566f04 ND |
571 | struct ipc_rcu_grace *grace; |
572 | struct ipc_rcu_sched *sched; | |
573 | ||
574 | grace = container_of(head, struct ipc_rcu_grace, rcu); | |
575 | sched = container_of(&(grace->data[0]), struct ipc_rcu_sched, | |
576 | data[0]); | |
1da177e4 | 577 | |
65f27f38 | 578 | INIT_WORK(&sched->work, ipc_do_vfree); |
1da177e4 LT |
579 | schedule_work(&sched->work); |
580 | } | |
581 | ||
582 | /** | |
1e5d5331 RD |
583 | * ipc_immediate_free - free ipc + rcu space |
584 | * @head: RCU callback structure that contains pointer to be freed | |
1da177e4 | 585 | * |
72fd4a35 | 586 | * Free from the RCU callback context. |
1da177e4 LT |
587 | */ |
588 | static void ipc_immediate_free(struct rcu_head *head) | |
589 | { | |
590 | struct ipc_rcu_grace *free = | |
591 | container_of(head, struct ipc_rcu_grace, rcu); | |
592 | kfree(free); | |
593 | } | |
594 | ||
595 | void ipc_rcu_putref(void *ptr) | |
596 | { | |
597 | if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) | |
598 | return; | |
599 | ||
600 | if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | |
601 | call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, | |
602 | ipc_schedule_free); | |
603 | } else { | |
604 | call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, | |
605 | ipc_immediate_free); | |
606 | } | |
607 | } | |
608 | ||
609 | /** | |
610 | * ipcperms - check IPC permissions | |
611 | * @ipcp: IPC permission set | |
612 | * @flag: desired permission set. | |
613 | * | |
614 | * Check user, group, other permissions for access | |
615 | * to ipc resources. return 0 if allowed | |
616 | */ | |
617 | ||
618 | int ipcperms (struct kern_ipc_perm *ipcp, short flag) | |
619 | { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ | |
073115d6 | 620 | int requested_mode, granted_mode, err; |
1da177e4 | 621 | |
073115d6 SG |
622 | if (unlikely((err = audit_ipc_obj(ipcp)))) |
623 | return err; | |
1da177e4 LT |
624 | requested_mode = (flag >> 6) | (flag >> 3) | flag; |
625 | granted_mode = ipcp->mode; | |
626 | if (current->euid == ipcp->cuid || current->euid == ipcp->uid) | |
627 | granted_mode >>= 6; | |
628 | else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) | |
629 | granted_mode >>= 3; | |
630 | /* is there some bit set in requested_mode but not in granted_mode? */ | |
631 | if ((requested_mode & ~granted_mode & 0007) && | |
632 | !capable(CAP_IPC_OWNER)) | |
633 | return -1; | |
634 | ||
635 | return security_ipc_permission(ipcp, flag); | |
636 | } | |
637 | ||
638 | /* | |
639 | * Functions to convert between the kern_ipc_perm structure and the | |
640 | * old/new ipc_perm structures | |
641 | */ | |
642 | ||
643 | /** | |
644 | * kernel_to_ipc64_perm - convert kernel ipc permissions to user | |
645 | * @in: kernel permissions | |
646 | * @out: new style IPC permissions | |
647 | * | |
72fd4a35 RD |
648 | * Turn the kernel object @in into a set of permissions descriptions |
649 | * for returning to userspace (@out). | |
1da177e4 LT |
650 | */ |
651 | ||
652 | ||
653 | void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) | |
654 | { | |
655 | out->key = in->key; | |
656 | out->uid = in->uid; | |
657 | out->gid = in->gid; | |
658 | out->cuid = in->cuid; | |
659 | out->cgid = in->cgid; | |
660 | out->mode = in->mode; | |
661 | out->seq = in->seq; | |
662 | } | |
663 | ||
664 | /** | |
f4566f04 | 665 | * ipc64_perm_to_ipc_perm - convert new ipc permissions to old |
1da177e4 LT |
666 | * @in: new style IPC permissions |
667 | * @out: old style IPC permissions | |
668 | * | |
72fd4a35 RD |
669 | * Turn the new style permissions object @in into a compatibility |
670 | * object and store it into the @out pointer. | |
1da177e4 LT |
671 | */ |
672 | ||
673 | void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) | |
674 | { | |
675 | out->key = in->key; | |
676 | SET_UID(out->uid, in->uid); | |
677 | SET_GID(out->gid, in->gid); | |
678 | SET_UID(out->cuid, in->cuid); | |
679 | SET_GID(out->cgid, in->cgid); | |
680 | out->mode = in->mode; | |
681 | out->seq = in->seq; | |
682 | } | |
683 | ||
f4566f04 | 684 | /** |
3e148c79 | 685 | * ipc_lock - Lock an ipc structure without rw_mutex held |
f4566f04 ND |
686 | * @ids: IPC identifier set |
687 | * @id: ipc id to look for | |
688 | * | |
689 | * Look for an id in the ipc ids idr and lock the associated ipc object. | |
690 | * | |
f4566f04 | 691 | * The ipc object is locked on exit. |
3e148c79 ND |
692 | * |
693 | * This is the routine that should be called when the rw_mutex is not already | |
694 | * held, i.e. idr tree not protected: it protects the idr tree in read mode | |
695 | * during the idr_find(). | |
f4566f04 ND |
696 | */ |
697 | ||
7ca7e564 | 698 | struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) |
1da177e4 | 699 | { |
7ca7e564 | 700 | struct kern_ipc_perm *out; |
ce621f5b | 701 | int lid = ipcid_to_idx(id); |
1da177e4 | 702 | |
3e148c79 ND |
703 | down_read(&ids->rw_mutex); |
704 | ||
1da177e4 | 705 | rcu_read_lock(); |
7ca7e564 ND |
706 | out = idr_find(&ids->ipcs_idr, lid); |
707 | if (out == NULL) { | |
1da177e4 | 708 | rcu_read_unlock(); |
3e148c79 | 709 | up_read(&ids->rw_mutex); |
023a5355 | 710 | return ERR_PTR(-EINVAL); |
1da177e4 | 711 | } |
7ca7e564 | 712 | |
3e148c79 ND |
713 | up_read(&ids->rw_mutex); |
714 | ||
1da177e4 LT |
715 | spin_lock(&out->lock); |
716 | ||
717 | /* ipc_rmid() may have already freed the ID while ipc_lock | |
718 | * was spinning: here verify that the structure is still valid | |
719 | */ | |
720 | if (out->deleted) { | |
721 | spin_unlock(&out->lock); | |
722 | rcu_read_unlock(); | |
023a5355 | 723 | return ERR_PTR(-EINVAL); |
1da177e4 | 724 | } |
7ca7e564 | 725 | |
1da177e4 LT |
726 | return out; |
727 | } | |
728 | ||
3e148c79 ND |
729 | /** |
730 | * ipc_lock_down - Lock an ipc structure with rw_sem held | |
731 | * @ids: IPC identifier set | |
732 | * @id: ipc id to look for | |
733 | * | |
734 | * Look for an id in the ipc ids idr and lock the associated ipc object. | |
735 | * | |
736 | * The ipc object is locked on exit. | |
737 | * | |
738 | * This is the routine that should be called when the rw_mutex is already | |
739 | * held, i.e. idr tree protected. | |
740 | */ | |
741 | ||
742 | struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id) | |
743 | { | |
744 | struct kern_ipc_perm *out; | |
745 | int lid = ipcid_to_idx(id); | |
746 | ||
747 | rcu_read_lock(); | |
748 | out = idr_find(&ids->ipcs_idr, lid); | |
749 | if (out == NULL) { | |
750 | rcu_read_unlock(); | |
751 | return ERR_PTR(-EINVAL); | |
752 | } | |
753 | ||
754 | spin_lock(&out->lock); | |
755 | ||
756 | /* | |
757 | * No need to verify that the structure is still valid since the | |
758 | * rw_mutex is held. | |
759 | */ | |
760 | return out; | |
761 | } | |
762 | ||
b2d75cdd PE |
763 | struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id) |
764 | { | |
765 | struct kern_ipc_perm *out; | |
766 | ||
767 | out = ipc_lock_down(ids, id); | |
768 | if (IS_ERR(out)) | |
769 | return out; | |
770 | ||
771 | if (ipc_checkid(out, id)) { | |
772 | ipc_unlock(out); | |
773 | return ERR_PTR(-EIDRM); | |
774 | } | |
775 | ||
776 | return out; | |
777 | } | |
778 | ||
779 | struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) | |
780 | { | |
781 | struct kern_ipc_perm *out; | |
782 | ||
783 | out = ipc_lock(ids, id); | |
784 | if (IS_ERR(out)) | |
785 | return out; | |
786 | ||
787 | if (ipc_checkid(out, id)) { | |
788 | ipc_unlock(out); | |
789 | return ERR_PTR(-EIDRM); | |
790 | } | |
791 | ||
792 | return out; | |
793 | } | |
794 | ||
795 | /** | |
796 | * ipcget - Common sys_*get() code | |
797 | * @ns : namsepace | |
798 | * @ids : IPC identifier set | |
799 | * @ops : operations to be called on ipc object creation, permission checks | |
800 | * and further checks | |
801 | * @params : the parameters needed by the previous operations. | |
802 | * | |
803 | * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). | |
804 | */ | |
805 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, | |
806 | struct ipc_ops *ops, struct ipc_params *params) | |
807 | { | |
808 | if (params->key == IPC_PRIVATE) | |
809 | return ipcget_new(ns, ids, ops, params); | |
810 | else | |
811 | return ipcget_public(ns, ids, ops, params); | |
812 | } | |
813 | ||
8f4a3809 PP |
814 | /** |
815 | * ipc_update_perm - update the permissions of an IPC. | |
816 | * @in: the permission given as input. | |
817 | * @out: the permission of the ipc to set. | |
818 | */ | |
819 | void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) | |
820 | { | |
821 | out->uid = in->uid; | |
822 | out->gid = in->gid; | |
823 | out->mode = (out->mode & ~S_IRWXUGO) | |
824 | | (in->mode & S_IRWXUGO); | |
825 | } | |
826 | ||
a5f75e7f PP |
827 | /** |
828 | * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd | |
829 | * @ids: the table of ids where to look for the ipc | |
830 | * @id: the id of the ipc to retrieve | |
831 | * @cmd: the cmd to check | |
832 | * @perm: the permission to set | |
833 | * @extra_perm: one extra permission parameter used by msq | |
834 | * | |
835 | * This function does some common audit and permissions check for some IPC_XXX | |
836 | * cmd and is called from semctl_down, shmctl_down and msgctl_down. | |
837 | * It must be called without any lock held and | |
838 | * - retrieves the ipc with the given id in the given table. | |
839 | * - performs some audit and permission check, depending on the given cmd | |
840 | * - returns the ipc with both ipc and rw_mutex locks held in case of success | |
841 | * or an err-code without any lock held otherwise. | |
842 | */ | |
843 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, | |
844 | struct ipc64_perm *perm, int extra_perm) | |
845 | { | |
846 | struct kern_ipc_perm *ipcp; | |
847 | int err; | |
848 | ||
849 | down_write(&ids->rw_mutex); | |
850 | ipcp = ipc_lock_check_down(ids, id); | |
851 | if (IS_ERR(ipcp)) { | |
852 | err = PTR_ERR(ipcp); | |
853 | goto out_up; | |
854 | } | |
855 | ||
856 | err = audit_ipc_obj(ipcp); | |
857 | if (err) | |
858 | goto out_unlock; | |
859 | ||
860 | if (cmd == IPC_SET) { | |
861 | err = audit_ipc_set_perm(extra_perm, perm->uid, | |
862 | perm->gid, perm->mode); | |
863 | if (err) | |
864 | goto out_unlock; | |
865 | } | |
866 | if (current->euid == ipcp->cuid || | |
867 | current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) | |
868 | return ipcp; | |
869 | ||
870 | err = -EPERM; | |
871 | out_unlock: | |
872 | ipc_unlock(ipcp); | |
873 | out_up: | |
874 | up_write(&ids->rw_mutex); | |
875 | return ERR_PTR(err); | |
876 | } | |
877 | ||
1da177e4 LT |
878 | #ifdef __ARCH_WANT_IPC_PARSE_VERSION |
879 | ||
880 | ||
881 | /** | |
882 | * ipc_parse_version - IPC call version | |
883 | * @cmd: pointer to command | |
884 | * | |
885 | * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. | |
72fd4a35 | 886 | * The @cmd value is turned from an encoding command and version into |
1da177e4 LT |
887 | * just the command code. |
888 | */ | |
889 | ||
890 | int ipc_parse_version (int *cmd) | |
891 | { | |
892 | if (*cmd & IPC_64) { | |
893 | *cmd ^= IPC_64; | |
894 | return IPC_64; | |
895 | } else { | |
896 | return IPC_OLD; | |
897 | } | |
898 | } | |
899 | ||
900 | #endif /* __ARCH_WANT_IPC_PARSE_VERSION */ | |
ae781774 MW |
901 | |
902 | #ifdef CONFIG_PROC_FS | |
bc1fc6d8 EB |
903 | struct ipc_proc_iter { |
904 | struct ipc_namespace *ns; | |
905 | struct ipc_proc_iface *iface; | |
906 | }; | |
907 | ||
7ca7e564 ND |
908 | /* |
909 | * This routine locks the ipc structure found at least at position pos. | |
910 | */ | |
b524b9ad AB |
911 | static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, |
912 | loff_t *new_pos) | |
ae781774 | 913 | { |
7ca7e564 ND |
914 | struct kern_ipc_perm *ipc; |
915 | int total, id; | |
73ea4130 | 916 | |
7ca7e564 ND |
917 | total = 0; |
918 | for (id = 0; id < pos && total < ids->in_use; id++) { | |
919 | ipc = idr_find(&ids->ipcs_idr, id); | |
920 | if (ipc != NULL) | |
921 | total++; | |
922 | } | |
ae781774 | 923 | |
7ca7e564 ND |
924 | if (total >= ids->in_use) |
925 | return NULL; | |
ae781774 | 926 | |
7ca7e564 ND |
927 | for ( ; pos < IPCMNI; pos++) { |
928 | ipc = idr_find(&ids->ipcs_idr, pos); | |
929 | if (ipc != NULL) { | |
930 | *new_pos = pos + 1; | |
931 | ipc_lock_by_ptr(ipc); | |
ae781774 MW |
932 | return ipc; |
933 | } | |
934 | } | |
935 | ||
936 | /* Out of range - return NULL to terminate iteration */ | |
937 | return NULL; | |
938 | } | |
939 | ||
7ca7e564 ND |
940 | static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) |
941 | { | |
942 | struct ipc_proc_iter *iter = s->private; | |
943 | struct ipc_proc_iface *iface = iter->iface; | |
944 | struct kern_ipc_perm *ipc = it; | |
945 | ||
946 | /* If we had an ipc id locked before, unlock it */ | |
947 | if (ipc && ipc != SEQ_START_TOKEN) | |
948 | ipc_unlock(ipc); | |
949 | ||
ed2ddbf8 | 950 | return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); |
7ca7e564 ND |
951 | } |
952 | ||
ae781774 | 953 | /* |
f4566f04 ND |
954 | * File positions: pos 0 -> header, pos n -> ipc id = n - 1. |
955 | * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. | |
ae781774 MW |
956 | */ |
957 | static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) | |
958 | { | |
bc1fc6d8 EB |
959 | struct ipc_proc_iter *iter = s->private; |
960 | struct ipc_proc_iface *iface = iter->iface; | |
73ea4130 KK |
961 | struct ipc_ids *ids; |
962 | ||
ed2ddbf8 | 963 | ids = &iter->ns->ids[iface->ids]; |
ae781774 MW |
964 | |
965 | /* | |
966 | * Take the lock - this will be released by the corresponding | |
967 | * call to stop(). | |
968 | */ | |
3e148c79 | 969 | down_read(&ids->rw_mutex); |
ae781774 MW |
970 | |
971 | /* pos < 0 is invalid */ | |
972 | if (*pos < 0) | |
973 | return NULL; | |
974 | ||
975 | /* pos == 0 means header */ | |
976 | if (*pos == 0) | |
977 | return SEQ_START_TOKEN; | |
978 | ||
979 | /* Find the (pos-1)th ipc */ | |
7ca7e564 | 980 | return sysvipc_find_ipc(ids, *pos - 1, pos); |
ae781774 MW |
981 | } |
982 | ||
983 | static void sysvipc_proc_stop(struct seq_file *s, void *it) | |
984 | { | |
985 | struct kern_ipc_perm *ipc = it; | |
bc1fc6d8 EB |
986 | struct ipc_proc_iter *iter = s->private; |
987 | struct ipc_proc_iface *iface = iter->iface; | |
73ea4130 | 988 | struct ipc_ids *ids; |
ae781774 | 989 | |
f4566f04 | 990 | /* If we had a locked structure, release it */ |
ae781774 MW |
991 | if (ipc && ipc != SEQ_START_TOKEN) |
992 | ipc_unlock(ipc); | |
993 | ||
ed2ddbf8 | 994 | ids = &iter->ns->ids[iface->ids]; |
ae781774 | 995 | /* Release the lock we took in start() */ |
3e148c79 | 996 | up_read(&ids->rw_mutex); |
ae781774 MW |
997 | } |
998 | ||
999 | static int sysvipc_proc_show(struct seq_file *s, void *it) | |
1000 | { | |
bc1fc6d8 EB |
1001 | struct ipc_proc_iter *iter = s->private; |
1002 | struct ipc_proc_iface *iface = iter->iface; | |
ae781774 MW |
1003 | |
1004 | if (it == SEQ_START_TOKEN) | |
1005 | return seq_puts(s, iface->header); | |
1006 | ||
1007 | return iface->show(s, it); | |
1008 | } | |
1009 | ||
1010 | static struct seq_operations sysvipc_proc_seqops = { | |
1011 | .start = sysvipc_proc_start, | |
1012 | .stop = sysvipc_proc_stop, | |
1013 | .next = sysvipc_proc_next, | |
1014 | .show = sysvipc_proc_show, | |
1015 | }; | |
1016 | ||
bc1fc6d8 EB |
1017 | static int sysvipc_proc_open(struct inode *inode, struct file *file) |
1018 | { | |
ae781774 MW |
1019 | int ret; |
1020 | struct seq_file *seq; | |
bc1fc6d8 EB |
1021 | struct ipc_proc_iter *iter; |
1022 | ||
1023 | ret = -ENOMEM; | |
1024 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | |
1025 | if (!iter) | |
1026 | goto out; | |
ae781774 MW |
1027 | |
1028 | ret = seq_open(file, &sysvipc_proc_seqops); | |
bc1fc6d8 EB |
1029 | if (ret) |
1030 | goto out_kfree; | |
1031 | ||
1032 | seq = file->private_data; | |
1033 | seq->private = iter; | |
1034 | ||
1035 | iter->iface = PDE(inode)->data; | |
1036 | iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); | |
1037 | out: | |
ae781774 | 1038 | return ret; |
bc1fc6d8 EB |
1039 | out_kfree: |
1040 | kfree(iter); | |
1041 | goto out; | |
1042 | } | |
1043 | ||
1044 | static int sysvipc_proc_release(struct inode *inode, struct file *file) | |
1045 | { | |
1046 | struct seq_file *seq = file->private_data; | |
1047 | struct ipc_proc_iter *iter = seq->private; | |
1048 | put_ipc_ns(iter->ns); | |
1049 | return seq_release_private(inode, file); | |
ae781774 MW |
1050 | } |
1051 | ||
9a32144e | 1052 | static const struct file_operations sysvipc_proc_fops = { |
ae781774 MW |
1053 | .open = sysvipc_proc_open, |
1054 | .read = seq_read, | |
1055 | .llseek = seq_lseek, | |
bc1fc6d8 | 1056 | .release = sysvipc_proc_release, |
ae781774 MW |
1057 | }; |
1058 | #endif /* CONFIG_PROC_FS */ |