]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/sem.c | |
3 | * Copyright (C) 1992 Krishna Balasubramanian | |
4 | * Copyright (C) 1995 Eric Schenk, Bruno Haible | |
5 | * | |
6 | * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): | |
7 | * This code underwent a massive rewrite in order to solve some problems | |
8 | * with the original code. In particular the original code failed to | |
9 | * wake up processes that were waiting for semval to go to 0 if the | |
10 | * value went to 0 and was then incremented rapidly enough. In solving | |
11 | * this problem I have also modified the implementation so that it | |
12 | * processes pending operations in a FIFO manner, thus give a guarantee | |
13 | * that processes waiting for a lock on the semaphore won't starve | |
14 | * unless another locking process fails to unlock. | |
15 | * In addition the following two changes in behavior have been introduced: | |
16 | * - The original implementation of semop returned the value | |
17 | * last semaphore element examined on success. This does not | |
18 | * match the manual page specifications, and effectively | |
19 | * allows the user to read the semaphore even if they do not | |
20 | * have read permissions. The implementation now returns 0 | |
21 | * on success as stated in the manual page. | |
22 | * - There is some confusion over whether the set of undo adjustments | |
23 | * to be performed at exit should be done in an atomic manner. | |
24 | * That is, if we are attempting to decrement the semval should we queue | |
25 | * up and wait until we can do so legally? | |
26 | * The original implementation attempted to do this. | |
27 | * The current implementation does not do so. This is because I don't | |
28 | * think it is the right thing (TM) to do, and because I couldn't | |
29 | * see a clean way to get the old behavior with the new design. | |
30 | * The POSIX standard and SVID should be consulted to determine | |
31 | * what behavior is mandated. | |
32 | * | |
33 | * Further notes on refinement (Christoph Rohland, December 1998): | |
34 | * - The POSIX standard says, that the undo adjustments simply should | |
35 | * redo. So the current implementation is o.K. | |
36 | * - The previous code had two flaws: | |
37 | * 1) It actively gave the semaphore to the next waiting process | |
38 | * sleeping on the semaphore. Since this process did not have the | |
39 | * cpu this led to many unnecessary context switches and bad | |
40 | * performance. Now we only check which process should be able to | |
41 | * get the semaphore and if this process wants to reduce some | |
42 | * semaphore value we simply wake it up without doing the | |
43 | * operation. So it has to try to get it later. Thus e.g. the | |
44 | * running process may reacquire the semaphore during the current | |
45 | * time slice. If it only waits for zero or increases the semaphore, | |
46 | * we do the operation in advance and wake it up. | |
47 | * 2) It did not wake up all zero waiting processes. We try to do | |
48 | * better but only get the semops right which only wait for zero or | |
49 | * increase. If there are decrement operations in the operations | |
50 | * array we do the same as before. | |
51 | * | |
52 | * With the incarnation of O(1) scheduler, it becomes unnecessary to perform | |
53 | * check/retry algorithm for waking up blocked processes as the new scheduler | |
54 | * is better at handling thread switch than the old one. | |
55 | * | |
56 | * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <[email protected]> | |
57 | * | |
58 | * SMP-threaded, sysctl's added | |
59 | * (c) 1999 Manfred Spraul <[email protected]> | |
60 | * Enforced range limit on SEM_UNDO | |
61 | * (c) 2001 Red Hat Inc <[email protected]> | |
62 | * Lockless wakeup | |
63 | * (c) 2003 Manfred Spraul <[email protected]> | |
64 | */ | |
65 | ||
66 | #include <linux/config.h> | |
67 | #include <linux/slab.h> | |
68 | #include <linux/spinlock.h> | |
69 | #include <linux/init.h> | |
70 | #include <linux/proc_fs.h> | |
71 | #include <linux/time.h> | |
72 | #include <linux/smp_lock.h> | |
73 | #include <linux/security.h> | |
74 | #include <linux/syscalls.h> | |
75 | #include <linux/audit.h> | |
19b4946c | 76 | #include <linux/seq_file.h> |
1da177e4 LT |
77 | #include <asm/uaccess.h> |
78 | #include "util.h" | |
79 | ||
80 | ||
81 | #define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id)) | |
82 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) | |
83 | #define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id)) | |
84 | #define sem_checkid(sma, semid) \ | |
85 | ipc_checkid(&sem_ids,&sma->sem_perm,semid) | |
86 | #define sem_buildid(id, seq) \ | |
87 | ipc_buildid(&sem_ids, id, seq) | |
88 | static struct ipc_ids sem_ids; | |
89 | ||
90 | static int newary (key_t, int, int); | |
91 | static void freeary (struct sem_array *sma, int id); | |
92 | #ifdef CONFIG_PROC_FS | |
19b4946c | 93 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
1da177e4 LT |
94 | #endif |
95 | ||
96 | #define SEMMSL_FAST 256 /* 512 bytes on stack */ | |
97 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ | |
98 | ||
99 | /* | |
100 | * linked list protection: | |
101 | * sem_undo.id_next, | |
102 | * sem_array.sem_pending{,last}, | |
103 | * sem_array.sem_undo: sem_lock() for read/write | |
104 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | |
105 | * | |
106 | */ | |
107 | ||
108 | int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI}; | |
109 | #define sc_semmsl (sem_ctls[0]) | |
110 | #define sc_semmns (sem_ctls[1]) | |
111 | #define sc_semopm (sem_ctls[2]) | |
112 | #define sc_semmni (sem_ctls[3]) | |
113 | ||
114 | static int used_sems; | |
115 | ||
116 | void __init sem_init (void) | |
117 | { | |
118 | used_sems = 0; | |
119 | ipc_init_ids(&sem_ids,sc_semmni); | |
19b4946c MW |
120 | ipc_init_proc_interface("sysvipc/sem", |
121 | " key semid perms nsems uid gid cuid cgid otime ctime\n", | |
122 | &sem_ids, | |
123 | sysvipc_sem_proc_show); | |
1da177e4 LT |
124 | } |
125 | ||
126 | /* | |
127 | * Lockless wakeup algorithm: | |
128 | * Without the check/retry algorithm a lockless wakeup is possible: | |
129 | * - queue.status is initialized to -EINTR before blocking. | |
130 | * - wakeup is performed by | |
131 | * * unlinking the queue entry from sma->sem_pending | |
132 | * * setting queue.status to IN_WAKEUP | |
133 | * This is the notification for the blocked thread that a | |
134 | * result value is imminent. | |
135 | * * call wake_up_process | |
136 | * * set queue.status to the final value. | |
137 | * - the previously blocked thread checks queue.status: | |
138 | * * if it's IN_WAKEUP, then it must wait until the value changes | |
139 | * * if it's not -EINTR, then the operation was completed by | |
140 | * update_queue. semtimedop can return queue.status without | |
141 | * performing any operation on the semaphore array. | |
142 | * * otherwise it must acquire the spinlock and check what's up. | |
143 | * | |
144 | * The two-stage algorithm is necessary to protect against the following | |
145 | * races: | |
146 | * - if queue.status is set after wake_up_process, then the woken up idle | |
147 | * thread could race forward and try (and fail) to acquire sma->lock | |
148 | * before update_queue had a chance to set queue.status | |
149 | * - if queue.status is written before wake_up_process and if the | |
150 | * blocked process is woken up by a signal between writing | |
151 | * queue.status and the wake_up_process, then the woken up | |
152 | * process could return from semtimedop and die by calling | |
153 | * sys_exit before wake_up_process is called. Then wake_up_process | |
154 | * will oops, because the task structure is already invalid. | |
155 | * (yes, this happened on s390 with sysv msg). | |
156 | * | |
157 | */ | |
158 | #define IN_WAKEUP 1 | |
159 | ||
160 | static int newary (key_t key, int nsems, int semflg) | |
161 | { | |
162 | int id; | |
163 | int retval; | |
164 | struct sem_array *sma; | |
165 | int size; | |
166 | ||
167 | if (!nsems) | |
168 | return -EINVAL; | |
169 | if (used_sems + nsems > sc_semmns) | |
170 | return -ENOSPC; | |
171 | ||
172 | size = sizeof (*sma) + nsems * sizeof (struct sem); | |
173 | sma = ipc_rcu_alloc(size); | |
174 | if (!sma) { | |
175 | return -ENOMEM; | |
176 | } | |
177 | memset (sma, 0, size); | |
178 | ||
179 | sma->sem_perm.mode = (semflg & S_IRWXUGO); | |
180 | sma->sem_perm.key = key; | |
181 | ||
182 | sma->sem_perm.security = NULL; | |
183 | retval = security_sem_alloc(sma); | |
184 | if (retval) { | |
185 | ipc_rcu_putref(sma); | |
186 | return retval; | |
187 | } | |
188 | ||
189 | id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); | |
190 | if(id == -1) { | |
191 | security_sem_free(sma); | |
192 | ipc_rcu_putref(sma); | |
193 | return -ENOSPC; | |
194 | } | |
195 | used_sems += nsems; | |
196 | ||
19b4946c | 197 | sma->sem_id = sem_buildid(id, sma->sem_perm.seq); |
1da177e4 LT |
198 | sma->sem_base = (struct sem *) &sma[1]; |
199 | /* sma->sem_pending = NULL; */ | |
200 | sma->sem_pending_last = &sma->sem_pending; | |
201 | /* sma->undo = NULL; */ | |
202 | sma->sem_nsems = nsems; | |
203 | sma->sem_ctime = get_seconds(); | |
204 | sem_unlock(sma); | |
205 | ||
19b4946c | 206 | return sma->sem_id; |
1da177e4 LT |
207 | } |
208 | ||
209 | asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |
210 | { | |
211 | int id, err = -EINVAL; | |
212 | struct sem_array *sma; | |
213 | ||
214 | if (nsems < 0 || nsems > sc_semmsl) | |
215 | return -EINVAL; | |
216 | down(&sem_ids.sem); | |
217 | ||
218 | if (key == IPC_PRIVATE) { | |
219 | err = newary(key, nsems, semflg); | |
220 | } else if ((id = ipc_findkey(&sem_ids, key)) == -1) { /* key not used */ | |
221 | if (!(semflg & IPC_CREAT)) | |
222 | err = -ENOENT; | |
223 | else | |
224 | err = newary(key, nsems, semflg); | |
225 | } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { | |
226 | err = -EEXIST; | |
227 | } else { | |
228 | sma = sem_lock(id); | |
229 | if(sma==NULL) | |
230 | BUG(); | |
231 | if (nsems > sma->sem_nsems) | |
232 | err = -EINVAL; | |
233 | else if (ipcperms(&sma->sem_perm, semflg)) | |
234 | err = -EACCES; | |
235 | else { | |
236 | int semid = sem_buildid(id, sma->sem_perm.seq); | |
237 | err = security_sem_associate(sma, semflg); | |
238 | if (!err) | |
239 | err = semid; | |
240 | } | |
241 | sem_unlock(sma); | |
242 | } | |
243 | ||
244 | up(&sem_ids.sem); | |
245 | return err; | |
246 | } | |
247 | ||
248 | /* Manage the doubly linked list sma->sem_pending as a FIFO: | |
249 | * insert new queue elements at the tail sma->sem_pending_last. | |
250 | */ | |
251 | static inline void append_to_queue (struct sem_array * sma, | |
252 | struct sem_queue * q) | |
253 | { | |
254 | *(q->prev = sma->sem_pending_last) = q; | |
255 | *(sma->sem_pending_last = &q->next) = NULL; | |
256 | } | |
257 | ||
258 | static inline void prepend_to_queue (struct sem_array * sma, | |
259 | struct sem_queue * q) | |
260 | { | |
261 | q->next = sma->sem_pending; | |
262 | *(q->prev = &sma->sem_pending) = q; | |
263 | if (q->next) | |
264 | q->next->prev = &q->next; | |
265 | else /* sma->sem_pending_last == &sma->sem_pending */ | |
266 | sma->sem_pending_last = &q->next; | |
267 | } | |
268 | ||
269 | static inline void remove_from_queue (struct sem_array * sma, | |
270 | struct sem_queue * q) | |
271 | { | |
272 | *(q->prev) = q->next; | |
273 | if (q->next) | |
274 | q->next->prev = q->prev; | |
275 | else /* sma->sem_pending_last == &q->next */ | |
276 | sma->sem_pending_last = q->prev; | |
277 | q->prev = NULL; /* mark as removed */ | |
278 | } | |
279 | ||
280 | /* | |
281 | * Determine whether a sequence of semaphore operations would succeed | |
282 | * all at once. Return 0 if yes, 1 if need to sleep, else return error code. | |
283 | */ | |
284 | ||
285 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, | |
286 | int nsops, struct sem_undo *un, int pid) | |
287 | { | |
288 | int result, sem_op; | |
289 | struct sembuf *sop; | |
290 | struct sem * curr; | |
291 | ||
292 | for (sop = sops; sop < sops + nsops; sop++) { | |
293 | curr = sma->sem_base + sop->sem_num; | |
294 | sem_op = sop->sem_op; | |
295 | result = curr->semval; | |
296 | ||
297 | if (!sem_op && result) | |
298 | goto would_block; | |
299 | ||
300 | result += sem_op; | |
301 | if (result < 0) | |
302 | goto would_block; | |
303 | if (result > SEMVMX) | |
304 | goto out_of_range; | |
305 | if (sop->sem_flg & SEM_UNDO) { | |
306 | int undo = un->semadj[sop->sem_num] - sem_op; | |
307 | /* | |
308 | * Exceeding the undo range is an error. | |
309 | */ | |
310 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) | |
311 | goto out_of_range; | |
312 | } | |
313 | curr->semval = result; | |
314 | } | |
315 | ||
316 | sop--; | |
317 | while (sop >= sops) { | |
318 | sma->sem_base[sop->sem_num].sempid = pid; | |
319 | if (sop->sem_flg & SEM_UNDO) | |
320 | un->semadj[sop->sem_num] -= sop->sem_op; | |
321 | sop--; | |
322 | } | |
323 | ||
324 | sma->sem_otime = get_seconds(); | |
325 | return 0; | |
326 | ||
327 | out_of_range: | |
328 | result = -ERANGE; | |
329 | goto undo; | |
330 | ||
331 | would_block: | |
332 | if (sop->sem_flg & IPC_NOWAIT) | |
333 | result = -EAGAIN; | |
334 | else | |
335 | result = 1; | |
336 | ||
337 | undo: | |
338 | sop--; | |
339 | while (sop >= sops) { | |
340 | sma->sem_base[sop->sem_num].semval -= sop->sem_op; | |
341 | sop--; | |
342 | } | |
343 | ||
344 | return result; | |
345 | } | |
346 | ||
347 | /* Go through the pending queue for the indicated semaphore | |
348 | * looking for tasks that can be completed. | |
349 | */ | |
350 | static void update_queue (struct sem_array * sma) | |
351 | { | |
352 | int error; | |
353 | struct sem_queue * q; | |
354 | ||
355 | q = sma->sem_pending; | |
356 | while(q) { | |
357 | error = try_atomic_semop(sma, q->sops, q->nsops, | |
358 | q->undo, q->pid); | |
359 | ||
360 | /* Does q->sleeper still need to sleep? */ | |
361 | if (error <= 0) { | |
362 | struct sem_queue *n; | |
363 | remove_from_queue(sma,q); | |
364 | q->status = IN_WAKEUP; | |
365 | /* | |
366 | * Continue scanning. The next operation | |
367 | * that must be checked depends on the type of the | |
368 | * completed operation: | |
369 | * - if the operation modified the array, then | |
370 | * restart from the head of the queue and | |
371 | * check for threads that might be waiting | |
372 | * for semaphore values to become 0. | |
373 | * - if the operation didn't modify the array, | |
374 | * then just continue. | |
375 | */ | |
376 | if (q->alter) | |
377 | n = sma->sem_pending; | |
378 | else | |
379 | n = q->next; | |
380 | wake_up_process(q->sleeper); | |
381 | /* hands-off: q will disappear immediately after | |
382 | * writing q->status. | |
383 | */ | |
384 | q->status = error; | |
385 | q = n; | |
386 | } else { | |
387 | q = q->next; | |
388 | } | |
389 | } | |
390 | } | |
391 | ||
392 | /* The following counts are associated to each semaphore: | |
393 | * semncnt number of tasks waiting on semval being nonzero | |
394 | * semzcnt number of tasks waiting on semval being zero | |
395 | * This model assumes that a task waits on exactly one semaphore. | |
396 | * Since semaphore operations are to be performed atomically, tasks actually | |
397 | * wait on a whole sequence of semaphores simultaneously. | |
398 | * The counts we return here are a rough approximation, but still | |
399 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | |
400 | */ | |
401 | static int count_semncnt (struct sem_array * sma, ushort semnum) | |
402 | { | |
403 | int semncnt; | |
404 | struct sem_queue * q; | |
405 | ||
406 | semncnt = 0; | |
407 | for (q = sma->sem_pending; q; q = q->next) { | |
408 | struct sembuf * sops = q->sops; | |
409 | int nsops = q->nsops; | |
410 | int i; | |
411 | for (i = 0; i < nsops; i++) | |
412 | if (sops[i].sem_num == semnum | |
413 | && (sops[i].sem_op < 0) | |
414 | && !(sops[i].sem_flg & IPC_NOWAIT)) | |
415 | semncnt++; | |
416 | } | |
417 | return semncnt; | |
418 | } | |
419 | static int count_semzcnt (struct sem_array * sma, ushort semnum) | |
420 | { | |
421 | int semzcnt; | |
422 | struct sem_queue * q; | |
423 | ||
424 | semzcnt = 0; | |
425 | for (q = sma->sem_pending; q; q = q->next) { | |
426 | struct sembuf * sops = q->sops; | |
427 | int nsops = q->nsops; | |
428 | int i; | |
429 | for (i = 0; i < nsops; i++) | |
430 | if (sops[i].sem_num == semnum | |
431 | && (sops[i].sem_op == 0) | |
432 | && !(sops[i].sem_flg & IPC_NOWAIT)) | |
433 | semzcnt++; | |
434 | } | |
435 | return semzcnt; | |
436 | } | |
437 | ||
438 | /* Free a semaphore set. freeary() is called with sem_ids.sem down and | |
439 | * the spinlock for this semaphore set hold. sem_ids.sem remains locked | |
440 | * on exit. | |
441 | */ | |
442 | static void freeary (struct sem_array *sma, int id) | |
443 | { | |
444 | struct sem_undo *un; | |
445 | struct sem_queue *q; | |
446 | int size; | |
447 | ||
448 | /* Invalidate the existing undo structures for this semaphore set. | |
449 | * (They will be freed without any further action in exit_sem() | |
450 | * or during the next semop.) | |
451 | */ | |
452 | for (un = sma->undo; un; un = un->id_next) | |
453 | un->semid = -1; | |
454 | ||
455 | /* Wake up all pending processes and let them fail with EIDRM. */ | |
456 | q = sma->sem_pending; | |
457 | while(q) { | |
458 | struct sem_queue *n; | |
459 | /* lazy remove_from_queue: we are killing the whole queue */ | |
460 | q->prev = NULL; | |
461 | n = q->next; | |
462 | q->status = IN_WAKEUP; | |
463 | wake_up_process(q->sleeper); /* doesn't sleep */ | |
464 | q->status = -EIDRM; /* hands-off q */ | |
465 | q = n; | |
466 | } | |
467 | ||
468 | /* Remove the semaphore set from the ID array*/ | |
469 | sma = sem_rmid(id); | |
470 | sem_unlock(sma); | |
471 | ||
472 | used_sems -= sma->sem_nsems; | |
473 | size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); | |
474 | security_sem_free(sma); | |
475 | ipc_rcu_putref(sma); | |
476 | } | |
477 | ||
478 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) | |
479 | { | |
480 | switch(version) { | |
481 | case IPC_64: | |
482 | return copy_to_user(buf, in, sizeof(*in)); | |
483 | case IPC_OLD: | |
484 | { | |
485 | struct semid_ds out; | |
486 | ||
487 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); | |
488 | ||
489 | out.sem_otime = in->sem_otime; | |
490 | out.sem_ctime = in->sem_ctime; | |
491 | out.sem_nsems = in->sem_nsems; | |
492 | ||
493 | return copy_to_user(buf, &out, sizeof(out)); | |
494 | } | |
495 | default: | |
496 | return -EINVAL; | |
497 | } | |
498 | } | |
499 | ||
500 | static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg) | |
501 | { | |
502 | int err = -EINVAL; | |
503 | struct sem_array *sma; | |
504 | ||
505 | switch(cmd) { | |
506 | case IPC_INFO: | |
507 | case SEM_INFO: | |
508 | { | |
509 | struct seminfo seminfo; | |
510 | int max_id; | |
511 | ||
512 | err = security_sem_semctl(NULL, cmd); | |
513 | if (err) | |
514 | return err; | |
515 | ||
516 | memset(&seminfo,0,sizeof(seminfo)); | |
517 | seminfo.semmni = sc_semmni; | |
518 | seminfo.semmns = sc_semmns; | |
519 | seminfo.semmsl = sc_semmsl; | |
520 | seminfo.semopm = sc_semopm; | |
521 | seminfo.semvmx = SEMVMX; | |
522 | seminfo.semmnu = SEMMNU; | |
523 | seminfo.semmap = SEMMAP; | |
524 | seminfo.semume = SEMUME; | |
525 | down(&sem_ids.sem); | |
526 | if (cmd == SEM_INFO) { | |
527 | seminfo.semusz = sem_ids.in_use; | |
528 | seminfo.semaem = used_sems; | |
529 | } else { | |
530 | seminfo.semusz = SEMUSZ; | |
531 | seminfo.semaem = SEMAEM; | |
532 | } | |
533 | max_id = sem_ids.max_id; | |
534 | up(&sem_ids.sem); | |
535 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | |
536 | return -EFAULT; | |
537 | return (max_id < 0) ? 0: max_id; | |
538 | } | |
539 | case SEM_STAT: | |
540 | { | |
541 | struct semid64_ds tbuf; | |
542 | int id; | |
543 | ||
544 | if(semid >= sem_ids.entries->size) | |
545 | return -EINVAL; | |
546 | ||
547 | memset(&tbuf,0,sizeof(tbuf)); | |
548 | ||
549 | sma = sem_lock(semid); | |
550 | if(sma == NULL) | |
551 | return -EINVAL; | |
552 | ||
553 | err = -EACCES; | |
554 | if (ipcperms (&sma->sem_perm, S_IRUGO)) | |
555 | goto out_unlock; | |
556 | ||
557 | err = security_sem_semctl(sma, cmd); | |
558 | if (err) | |
559 | goto out_unlock; | |
560 | ||
561 | id = sem_buildid(semid, sma->sem_perm.seq); | |
562 | ||
563 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | |
564 | tbuf.sem_otime = sma->sem_otime; | |
565 | tbuf.sem_ctime = sma->sem_ctime; | |
566 | tbuf.sem_nsems = sma->sem_nsems; | |
567 | sem_unlock(sma); | |
568 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | |
569 | return -EFAULT; | |
570 | return id; | |
571 | } | |
572 | default: | |
573 | return -EINVAL; | |
574 | } | |
575 | return err; | |
576 | out_unlock: | |
577 | sem_unlock(sma); | |
578 | return err; | |
579 | } | |
580 | ||
581 | static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) | |
582 | { | |
583 | struct sem_array *sma; | |
584 | struct sem* curr; | |
585 | int err; | |
586 | ushort fast_sem_io[SEMMSL_FAST]; | |
587 | ushort* sem_io = fast_sem_io; | |
588 | int nsems; | |
589 | ||
590 | sma = sem_lock(semid); | |
591 | if(sma==NULL) | |
592 | return -EINVAL; | |
593 | ||
594 | nsems = sma->sem_nsems; | |
595 | ||
596 | err=-EIDRM; | |
597 | if (sem_checkid(sma,semid)) | |
598 | goto out_unlock; | |
599 | ||
600 | err = -EACCES; | |
601 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) | |
602 | goto out_unlock; | |
603 | ||
604 | err = security_sem_semctl(sma, cmd); | |
605 | if (err) | |
606 | goto out_unlock; | |
607 | ||
608 | err = -EACCES; | |
609 | switch (cmd) { | |
610 | case GETALL: | |
611 | { | |
612 | ushort __user *array = arg.array; | |
613 | int i; | |
614 | ||
615 | if(nsems > SEMMSL_FAST) { | |
616 | ipc_rcu_getref(sma); | |
617 | sem_unlock(sma); | |
618 | ||
619 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | |
620 | if(sem_io == NULL) { | |
621 | ipc_lock_by_ptr(&sma->sem_perm); | |
622 | ipc_rcu_putref(sma); | |
623 | sem_unlock(sma); | |
624 | return -ENOMEM; | |
625 | } | |
626 | ||
627 | ipc_lock_by_ptr(&sma->sem_perm); | |
628 | ipc_rcu_putref(sma); | |
629 | if (sma->sem_perm.deleted) { | |
630 | sem_unlock(sma); | |
631 | err = -EIDRM; | |
632 | goto out_free; | |
633 | } | |
634 | } | |
635 | ||
636 | for (i = 0; i < sma->sem_nsems; i++) | |
637 | sem_io[i] = sma->sem_base[i].semval; | |
638 | sem_unlock(sma); | |
639 | err = 0; | |
640 | if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) | |
641 | err = -EFAULT; | |
642 | goto out_free; | |
643 | } | |
644 | case SETALL: | |
645 | { | |
646 | int i; | |
647 | struct sem_undo *un; | |
648 | ||
649 | ipc_rcu_getref(sma); | |
650 | sem_unlock(sma); | |
651 | ||
652 | if(nsems > SEMMSL_FAST) { | |
653 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | |
654 | if(sem_io == NULL) { | |
655 | ipc_lock_by_ptr(&sma->sem_perm); | |
656 | ipc_rcu_putref(sma); | |
657 | sem_unlock(sma); | |
658 | return -ENOMEM; | |
659 | } | |
660 | } | |
661 | ||
662 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { | |
663 | ipc_lock_by_ptr(&sma->sem_perm); | |
664 | ipc_rcu_putref(sma); | |
665 | sem_unlock(sma); | |
666 | err = -EFAULT; | |
667 | goto out_free; | |
668 | } | |
669 | ||
670 | for (i = 0; i < nsems; i++) { | |
671 | if (sem_io[i] > SEMVMX) { | |
672 | ipc_lock_by_ptr(&sma->sem_perm); | |
673 | ipc_rcu_putref(sma); | |
674 | sem_unlock(sma); | |
675 | err = -ERANGE; | |
676 | goto out_free; | |
677 | } | |
678 | } | |
679 | ipc_lock_by_ptr(&sma->sem_perm); | |
680 | ipc_rcu_putref(sma); | |
681 | if (sma->sem_perm.deleted) { | |
682 | sem_unlock(sma); | |
683 | err = -EIDRM; | |
684 | goto out_free; | |
685 | } | |
686 | ||
687 | for (i = 0; i < nsems; i++) | |
688 | sma->sem_base[i].semval = sem_io[i]; | |
689 | for (un = sma->undo; un; un = un->id_next) | |
690 | for (i = 0; i < nsems; i++) | |
691 | un->semadj[i] = 0; | |
692 | sma->sem_ctime = get_seconds(); | |
693 | /* maybe some queued-up processes were waiting for this */ | |
694 | update_queue(sma); | |
695 | err = 0; | |
696 | goto out_unlock; | |
697 | } | |
698 | case IPC_STAT: | |
699 | { | |
700 | struct semid64_ds tbuf; | |
701 | memset(&tbuf,0,sizeof(tbuf)); | |
702 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); | |
703 | tbuf.sem_otime = sma->sem_otime; | |
704 | tbuf.sem_ctime = sma->sem_ctime; | |
705 | tbuf.sem_nsems = sma->sem_nsems; | |
706 | sem_unlock(sma); | |
707 | if (copy_semid_to_user (arg.buf, &tbuf, version)) | |
708 | return -EFAULT; | |
709 | return 0; | |
710 | } | |
711 | /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ | |
712 | } | |
713 | err = -EINVAL; | |
714 | if(semnum < 0 || semnum >= nsems) | |
715 | goto out_unlock; | |
716 | ||
717 | curr = &sma->sem_base[semnum]; | |
718 | ||
719 | switch (cmd) { | |
720 | case GETVAL: | |
721 | err = curr->semval; | |
722 | goto out_unlock; | |
723 | case GETPID: | |
724 | err = curr->sempid; | |
725 | goto out_unlock; | |
726 | case GETNCNT: | |
727 | err = count_semncnt(sma,semnum); | |
728 | goto out_unlock; | |
729 | case GETZCNT: | |
730 | err = count_semzcnt(sma,semnum); | |
731 | goto out_unlock; | |
732 | case SETVAL: | |
733 | { | |
734 | int val = arg.val; | |
735 | struct sem_undo *un; | |
736 | err = -ERANGE; | |
737 | if (val > SEMVMX || val < 0) | |
738 | goto out_unlock; | |
739 | ||
740 | for (un = sma->undo; un; un = un->id_next) | |
741 | un->semadj[semnum] = 0; | |
742 | curr->semval = val; | |
743 | curr->sempid = current->tgid; | |
744 | sma->sem_ctime = get_seconds(); | |
745 | /* maybe some queued-up processes were waiting for this */ | |
746 | update_queue(sma); | |
747 | err = 0; | |
748 | goto out_unlock; | |
749 | } | |
750 | } | |
751 | out_unlock: | |
752 | sem_unlock(sma); | |
753 | out_free: | |
754 | if(sem_io != fast_sem_io) | |
755 | ipc_free(sem_io, sizeof(ushort)*nsems); | |
756 | return err; | |
757 | } | |
758 | ||
759 | struct sem_setbuf { | |
760 | uid_t uid; | |
761 | gid_t gid; | |
762 | mode_t mode; | |
763 | }; | |
764 | ||
765 | static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) | |
766 | { | |
767 | switch(version) { | |
768 | case IPC_64: | |
769 | { | |
770 | struct semid64_ds tbuf; | |
771 | ||
772 | if(copy_from_user(&tbuf, buf, sizeof(tbuf))) | |
773 | return -EFAULT; | |
774 | ||
775 | out->uid = tbuf.sem_perm.uid; | |
776 | out->gid = tbuf.sem_perm.gid; | |
777 | out->mode = tbuf.sem_perm.mode; | |
778 | ||
779 | return 0; | |
780 | } | |
781 | case IPC_OLD: | |
782 | { | |
783 | struct semid_ds tbuf_old; | |
784 | ||
785 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | |
786 | return -EFAULT; | |
787 | ||
788 | out->uid = tbuf_old.sem_perm.uid; | |
789 | out->gid = tbuf_old.sem_perm.gid; | |
790 | out->mode = tbuf_old.sem_perm.mode; | |
791 | ||
792 | return 0; | |
793 | } | |
794 | default: | |
795 | return -EINVAL; | |
796 | } | |
797 | } | |
798 | ||
799 | static int semctl_down(int semid, int semnum, int cmd, int version, union semun arg) | |
800 | { | |
801 | struct sem_array *sma; | |
802 | int err; | |
803 | struct sem_setbuf setbuf; | |
804 | struct kern_ipc_perm *ipcp; | |
805 | ||
806 | if(cmd == IPC_SET) { | |
807 | if(copy_semid_from_user (&setbuf, arg.buf, version)) | |
808 | return -EFAULT; | |
809 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode))) | |
810 | return err; | |
811 | } | |
812 | sma = sem_lock(semid); | |
813 | if(sma==NULL) | |
814 | return -EINVAL; | |
815 | ||
816 | if (sem_checkid(sma,semid)) { | |
817 | err=-EIDRM; | |
818 | goto out_unlock; | |
819 | } | |
820 | ipcp = &sma->sem_perm; | |
821 | ||
822 | if (current->euid != ipcp->cuid && | |
823 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { | |
824 | err=-EPERM; | |
825 | goto out_unlock; | |
826 | } | |
827 | ||
828 | err = security_sem_semctl(sma, cmd); | |
829 | if (err) | |
830 | goto out_unlock; | |
831 | ||
832 | switch(cmd){ | |
833 | case IPC_RMID: | |
834 | freeary(sma, semid); | |
835 | err = 0; | |
836 | break; | |
837 | case IPC_SET: | |
838 | ipcp->uid = setbuf.uid; | |
839 | ipcp->gid = setbuf.gid; | |
840 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | |
841 | | (setbuf.mode & S_IRWXUGO); | |
842 | sma->sem_ctime = get_seconds(); | |
843 | sem_unlock(sma); | |
844 | err = 0; | |
845 | break; | |
846 | default: | |
847 | sem_unlock(sma); | |
848 | err = -EINVAL; | |
849 | break; | |
850 | } | |
851 | return err; | |
852 | ||
853 | out_unlock: | |
854 | sem_unlock(sma); | |
855 | return err; | |
856 | } | |
857 | ||
858 | asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |
859 | { | |
860 | int err = -EINVAL; | |
861 | int version; | |
862 | ||
863 | if (semid < 0) | |
864 | return -EINVAL; | |
865 | ||
866 | version = ipc_parse_version(&cmd); | |
867 | ||
868 | switch(cmd) { | |
869 | case IPC_INFO: | |
870 | case SEM_INFO: | |
871 | case SEM_STAT: | |
872 | err = semctl_nolock(semid,semnum,cmd,version,arg); | |
873 | return err; | |
874 | case GETALL: | |
875 | case GETVAL: | |
876 | case GETPID: | |
877 | case GETNCNT: | |
878 | case GETZCNT: | |
879 | case IPC_STAT: | |
880 | case SETVAL: | |
881 | case SETALL: | |
882 | err = semctl_main(semid,semnum,cmd,version,arg); | |
883 | return err; | |
884 | case IPC_RMID: | |
885 | case IPC_SET: | |
886 | down(&sem_ids.sem); | |
887 | err = semctl_down(semid,semnum,cmd,version,arg); | |
888 | up(&sem_ids.sem); | |
889 | return err; | |
890 | default: | |
891 | return -EINVAL; | |
892 | } | |
893 | } | |
894 | ||
895 | static inline void lock_semundo(void) | |
896 | { | |
897 | struct sem_undo_list *undo_list; | |
898 | ||
899 | undo_list = current->sysvsem.undo_list; | |
00a5dfdb | 900 | if (undo_list) |
1da177e4 LT |
901 | spin_lock(&undo_list->lock); |
902 | } | |
903 | ||
904 | /* This code has an interaction with copy_semundo(). | |
905 | * Consider; two tasks are sharing the undo_list. task1 | |
906 | * acquires the undo_list lock in lock_semundo(). If task2 now | |
907 | * exits before task1 releases the lock (by calling | |
908 | * unlock_semundo()), then task1 will never call spin_unlock(). | |
909 | * This leave the sem_undo_list in a locked state. If task1 now creats task3 | |
910 | * and once again shares the sem_undo_list, the sem_undo_list will still be | |
911 | * locked, and future SEM_UNDO operations will deadlock. This case is | |
912 | * dealt with in copy_semundo() by having it reinitialize the spin lock when | |
913 | * the refcnt goes from 1 to 2. | |
914 | */ | |
915 | static inline void unlock_semundo(void) | |
916 | { | |
917 | struct sem_undo_list *undo_list; | |
918 | ||
919 | undo_list = current->sysvsem.undo_list; | |
00a5dfdb | 920 | if (undo_list) |
1da177e4 LT |
921 | spin_unlock(&undo_list->lock); |
922 | } | |
923 | ||
924 | ||
925 | /* If the task doesn't already have a undo_list, then allocate one | |
926 | * here. We guarantee there is only one thread using this undo list, | |
927 | * and current is THE ONE | |
928 | * | |
929 | * If this allocation and assignment succeeds, but later | |
930 | * portions of this code fail, there is no need to free the sem_undo_list. | |
931 | * Just let it stay associated with the task, and it'll be freed later | |
932 | * at exit time. | |
933 | * | |
934 | * This can block, so callers must hold no locks. | |
935 | */ | |
936 | static inline int get_undo_list(struct sem_undo_list **undo_listp) | |
937 | { | |
938 | struct sem_undo_list *undo_list; | |
939 | int size; | |
940 | ||
941 | undo_list = current->sysvsem.undo_list; | |
942 | if (!undo_list) { | |
943 | size = sizeof(struct sem_undo_list); | |
944 | undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL); | |
945 | if (undo_list == NULL) | |
946 | return -ENOMEM; | |
947 | memset(undo_list, 0, size); | |
00a5dfdb | 948 | spin_lock_init(&undo_list->lock); |
1da177e4 LT |
949 | atomic_set(&undo_list->refcnt, 1); |
950 | current->sysvsem.undo_list = undo_list; | |
951 | } | |
952 | *undo_listp = undo_list; | |
953 | return 0; | |
954 | } | |
955 | ||
956 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | |
957 | { | |
958 | struct sem_undo **last, *un; | |
959 | ||
960 | last = &ulp->proc_list; | |
961 | un = *last; | |
962 | while(un != NULL) { | |
963 | if(un->semid==semid) | |
964 | break; | |
965 | if(un->semid==-1) { | |
966 | *last=un->proc_next; | |
967 | kfree(un); | |
968 | } else { | |
969 | last=&un->proc_next; | |
970 | } | |
971 | un=*last; | |
972 | } | |
973 | return un; | |
974 | } | |
975 | ||
976 | static struct sem_undo *find_undo(int semid) | |
977 | { | |
978 | struct sem_array *sma; | |
979 | struct sem_undo_list *ulp; | |
980 | struct sem_undo *un, *new; | |
981 | int nsems; | |
982 | int error; | |
983 | ||
984 | error = get_undo_list(&ulp); | |
985 | if (error) | |
986 | return ERR_PTR(error); | |
987 | ||
988 | lock_semundo(); | |
989 | un = lookup_undo(ulp, semid); | |
990 | unlock_semundo(); | |
991 | if (likely(un!=NULL)) | |
992 | goto out; | |
993 | ||
994 | /* no undo structure around - allocate one. */ | |
995 | sma = sem_lock(semid); | |
996 | un = ERR_PTR(-EINVAL); | |
997 | if(sma==NULL) | |
998 | goto out; | |
999 | un = ERR_PTR(-EIDRM); | |
1000 | if (sem_checkid(sma,semid)) { | |
1001 | sem_unlock(sma); | |
1002 | goto out; | |
1003 | } | |
1004 | nsems = sma->sem_nsems; | |
1005 | ipc_rcu_getref(sma); | |
1006 | sem_unlock(sma); | |
1007 | ||
1008 | new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); | |
1009 | if (!new) { | |
1010 | ipc_lock_by_ptr(&sma->sem_perm); | |
1011 | ipc_rcu_putref(sma); | |
1012 | sem_unlock(sma); | |
1013 | return ERR_PTR(-ENOMEM); | |
1014 | } | |
1015 | memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); | |
1016 | new->semadj = (short *) &new[1]; | |
1017 | new->semid = semid; | |
1018 | ||
1019 | lock_semundo(); | |
1020 | un = lookup_undo(ulp, semid); | |
1021 | if (un) { | |
1022 | unlock_semundo(); | |
1023 | kfree(new); | |
1024 | ipc_lock_by_ptr(&sma->sem_perm); | |
1025 | ipc_rcu_putref(sma); | |
1026 | sem_unlock(sma); | |
1027 | goto out; | |
1028 | } | |
1029 | ipc_lock_by_ptr(&sma->sem_perm); | |
1030 | ipc_rcu_putref(sma); | |
1031 | if (sma->sem_perm.deleted) { | |
1032 | sem_unlock(sma); | |
1033 | unlock_semundo(); | |
1034 | kfree(new); | |
1035 | un = ERR_PTR(-EIDRM); | |
1036 | goto out; | |
1037 | } | |
1038 | new->proc_next = ulp->proc_list; | |
1039 | ulp->proc_list = new; | |
1040 | new->id_next = sma->undo; | |
1041 | sma->undo = new; | |
1042 | sem_unlock(sma); | |
1043 | un = new; | |
1044 | unlock_semundo(); | |
1045 | out: | |
1046 | return un; | |
1047 | } | |
1048 | ||
1049 | asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, | |
1050 | unsigned nsops, const struct timespec __user *timeout) | |
1051 | { | |
1052 | int error = -EINVAL; | |
1053 | struct sem_array *sma; | |
1054 | struct sembuf fast_sops[SEMOPM_FAST]; | |
1055 | struct sembuf* sops = fast_sops, *sop; | |
1056 | struct sem_undo *un; | |
b78755ab | 1057 | int undos = 0, alter = 0, max; |
1da177e4 LT |
1058 | struct sem_queue queue; |
1059 | unsigned long jiffies_left = 0; | |
1060 | ||
1061 | if (nsops < 1 || semid < 0) | |
1062 | return -EINVAL; | |
1063 | if (nsops > sc_semopm) | |
1064 | return -E2BIG; | |
1065 | if(nsops > SEMOPM_FAST) { | |
1066 | sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); | |
1067 | if(sops==NULL) | |
1068 | return -ENOMEM; | |
1069 | } | |
1070 | if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { | |
1071 | error=-EFAULT; | |
1072 | goto out_free; | |
1073 | } | |
1074 | if (timeout) { | |
1075 | struct timespec _timeout; | |
1076 | if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { | |
1077 | error = -EFAULT; | |
1078 | goto out_free; | |
1079 | } | |
1080 | if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || | |
1081 | _timeout.tv_nsec >= 1000000000L) { | |
1082 | error = -EINVAL; | |
1083 | goto out_free; | |
1084 | } | |
1085 | jiffies_left = timespec_to_jiffies(&_timeout); | |
1086 | } | |
1087 | max = 0; | |
1088 | for (sop = sops; sop < sops + nsops; sop++) { | |
1089 | if (sop->sem_num >= max) | |
1090 | max = sop->sem_num; | |
1091 | if (sop->sem_flg & SEM_UNDO) | |
b78755ab MS |
1092 | undos = 1; |
1093 | if (sop->sem_op != 0) | |
1da177e4 LT |
1094 | alter = 1; |
1095 | } | |
1da177e4 LT |
1096 | |
1097 | retry_undos: | |
1098 | if (undos) { | |
1099 | un = find_undo(semid); | |
1100 | if (IS_ERR(un)) { | |
1101 | error = PTR_ERR(un); | |
1102 | goto out_free; | |
1103 | } | |
1104 | } else | |
1105 | un = NULL; | |
1106 | ||
1107 | sma = sem_lock(semid); | |
1108 | error=-EINVAL; | |
1109 | if(sma==NULL) | |
1110 | goto out_free; | |
1111 | error = -EIDRM; | |
1112 | if (sem_checkid(sma,semid)) | |
1113 | goto out_unlock_free; | |
1114 | /* | |
1115 | * semid identifies are not unique - find_undo may have | |
1116 | * allocated an undo structure, it was invalidated by an RMID | |
1117 | * and now a new array with received the same id. Check and retry. | |
1118 | */ | |
1119 | if (un && un->semid == -1) { | |
1120 | sem_unlock(sma); | |
1121 | goto retry_undos; | |
1122 | } | |
1123 | error = -EFBIG; | |
1124 | if (max >= sma->sem_nsems) | |
1125 | goto out_unlock_free; | |
1126 | ||
1127 | error = -EACCES; | |
1128 | if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) | |
1129 | goto out_unlock_free; | |
1130 | ||
1131 | error = security_sem_semop(sma, sops, nsops, alter); | |
1132 | if (error) | |
1133 | goto out_unlock_free; | |
1134 | ||
1135 | error = try_atomic_semop (sma, sops, nsops, un, current->tgid); | |
1136 | if (error <= 0) { | |
1137 | if (alter && error == 0) | |
1138 | update_queue (sma); | |
1139 | goto out_unlock_free; | |
1140 | } | |
1141 | ||
1142 | /* We need to sleep on this operation, so we put the current | |
1143 | * task into the pending queue and go to sleep. | |
1144 | */ | |
1145 | ||
1146 | queue.sma = sma; | |
1147 | queue.sops = sops; | |
1148 | queue.nsops = nsops; | |
1149 | queue.undo = un; | |
1150 | queue.pid = current->tgid; | |
1151 | queue.id = semid; | |
1152 | queue.alter = alter; | |
1153 | if (alter) | |
1154 | append_to_queue(sma ,&queue); | |
1155 | else | |
1156 | prepend_to_queue(sma ,&queue); | |
1157 | ||
1158 | queue.status = -EINTR; | |
1159 | queue.sleeper = current; | |
1160 | current->state = TASK_INTERRUPTIBLE; | |
1161 | sem_unlock(sma); | |
1162 | ||
1163 | if (timeout) | |
1164 | jiffies_left = schedule_timeout(jiffies_left); | |
1165 | else | |
1166 | schedule(); | |
1167 | ||
1168 | error = queue.status; | |
1169 | while(unlikely(error == IN_WAKEUP)) { | |
1170 | cpu_relax(); | |
1171 | error = queue.status; | |
1172 | } | |
1173 | ||
1174 | if (error != -EINTR) { | |
1175 | /* fast path: update_queue already obtained all requested | |
1176 | * resources */ | |
1177 | goto out_free; | |
1178 | } | |
1179 | ||
1180 | sma = sem_lock(semid); | |
1181 | if(sma==NULL) { | |
1182 | if(queue.prev != NULL) | |
1183 | BUG(); | |
1184 | error = -EIDRM; | |
1185 | goto out_free; | |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * If queue.status != -EINTR we are woken up by another process | |
1190 | */ | |
1191 | error = queue.status; | |
1192 | if (error != -EINTR) { | |
1193 | goto out_unlock_free; | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * If an interrupt occurred we have to clean up the queue | |
1198 | */ | |
1199 | if (timeout && jiffies_left == 0) | |
1200 | error = -EAGAIN; | |
1201 | remove_from_queue(sma,&queue); | |
1202 | goto out_unlock_free; | |
1203 | ||
1204 | out_unlock_free: | |
1205 | sem_unlock(sma); | |
1206 | out_free: | |
1207 | if(sops != fast_sops) | |
1208 | kfree(sops); | |
1209 | return error; | |
1210 | } | |
1211 | ||
1212 | asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) | |
1213 | { | |
1214 | return sys_semtimedop(semid, tsops, nsops, NULL); | |
1215 | } | |
1216 | ||
1217 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between | |
1218 | * parent and child tasks. | |
1219 | * | |
1220 | * See the notes above unlock_semundo() regarding the spin_lock_init() | |
1221 | * in this code. Initialize the undo_list->lock here instead of get_undo_list() | |
1222 | * because of the reasoning in the comment above unlock_semundo. | |
1223 | */ | |
1224 | ||
1225 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | |
1226 | { | |
1227 | struct sem_undo_list *undo_list; | |
1228 | int error; | |
1229 | ||
1230 | if (clone_flags & CLONE_SYSVSEM) { | |
1231 | error = get_undo_list(&undo_list); | |
1232 | if (error) | |
1233 | return error; | |
1da177e4 LT |
1234 | atomic_inc(&undo_list->refcnt); |
1235 | tsk->sysvsem.undo_list = undo_list; | |
1236 | } else | |
1237 | tsk->sysvsem.undo_list = NULL; | |
1238 | ||
1239 | return 0; | |
1240 | } | |
1241 | ||
1242 | /* | |
1243 | * add semadj values to semaphores, free undo structures. | |
1244 | * undo structures are not freed when semaphore arrays are destroyed | |
1245 | * so some of them may be out of date. | |
1246 | * IMPLEMENTATION NOTE: There is some confusion over whether the | |
1247 | * set of adjustments that needs to be done should be done in an atomic | |
1248 | * manner or not. That is, if we are attempting to decrement the semval | |
1249 | * should we queue up and wait until we can do so legally? | |
1250 | * The original implementation attempted to do this (queue and wait). | |
1251 | * The current implementation does not do so. The POSIX standard | |
1252 | * and SVID should be consulted to determine what behavior is mandated. | |
1253 | */ | |
1254 | void exit_sem(struct task_struct *tsk) | |
1255 | { | |
1256 | struct sem_undo_list *undo_list; | |
1257 | struct sem_undo *u, **up; | |
1258 | ||
1259 | undo_list = tsk->sysvsem.undo_list; | |
1260 | if (!undo_list) | |
1261 | return; | |
1262 | ||
1263 | if (!atomic_dec_and_test(&undo_list->refcnt)) | |
1264 | return; | |
1265 | ||
1266 | /* There's no need to hold the semundo list lock, as current | |
1267 | * is the last task exiting for this undo list. | |
1268 | */ | |
1269 | for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { | |
1270 | struct sem_array *sma; | |
1271 | int nsems, i; | |
1272 | struct sem_undo *un, **unp; | |
1273 | int semid; | |
1274 | ||
1275 | semid = u->semid; | |
1276 | ||
1277 | if(semid == -1) | |
1278 | continue; | |
1279 | sma = sem_lock(semid); | |
1280 | if (sma == NULL) | |
1281 | continue; | |
1282 | ||
1283 | if (u->semid == -1) | |
1284 | goto next_entry; | |
1285 | ||
1286 | BUG_ON(sem_checkid(sma,u->semid)); | |
1287 | ||
1288 | /* remove u from the sma->undo list */ | |
1289 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { | |
1290 | if (u == un) | |
1291 | goto found; | |
1292 | } | |
1293 | printk ("exit_sem undo list error id=%d\n", u->semid); | |
1294 | goto next_entry; | |
1295 | found: | |
1296 | *unp = un->id_next; | |
1297 | /* perform adjustments registered in u */ | |
1298 | nsems = sma->sem_nsems; | |
1299 | for (i = 0; i < nsems; i++) { | |
1300 | struct sem * sem = &sma->sem_base[i]; | |
1301 | if (u->semadj[i]) { | |
1302 | sem->semval += u->semadj[i]; | |
1303 | /* | |
1304 | * Range checks of the new semaphore value, | |
1305 | * not defined by sus: | |
1306 | * - Some unices ignore the undo entirely | |
1307 | * (e.g. HP UX 11i 11.22, Tru64 V5.1) | |
1308 | * - some cap the value (e.g. FreeBSD caps | |
1309 | * at 0, but doesn't enforce SEMVMX) | |
1310 | * | |
1311 | * Linux caps the semaphore value, both at 0 | |
1312 | * and at SEMVMX. | |
1313 | * | |
1314 | * Manfred <[email protected]> | |
1315 | */ | |
1316 | if (sem->semval < 0) | |
1317 | sem->semval = 0; | |
1318 | if (sem->semval > SEMVMX) | |
1319 | sem->semval = SEMVMX; | |
1320 | sem->sempid = current->tgid; | |
1321 | } | |
1322 | } | |
1323 | sma->sem_otime = get_seconds(); | |
1324 | /* maybe some queued-up processes were waiting for this */ | |
1325 | update_queue(sma); | |
1326 | next_entry: | |
1327 | sem_unlock(sma); | |
1328 | } | |
1329 | kfree(undo_list); | |
1330 | } | |
1331 | ||
1332 | #ifdef CONFIG_PROC_FS | |
19b4946c | 1333 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it) |
1da177e4 | 1334 | { |
19b4946c MW |
1335 | struct sem_array *sma = it; |
1336 | ||
1337 | return seq_printf(s, | |
1338 | "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", | |
1339 | sma->sem_perm.key, | |
1340 | sma->sem_id, | |
1341 | sma->sem_perm.mode, | |
1342 | sma->sem_nsems, | |
1343 | sma->sem_perm.uid, | |
1344 | sma->sem_perm.gid, | |
1345 | sma->sem_perm.cuid, | |
1346 | sma->sem_perm.cgid, | |
1347 | sma->sem_otime, | |
1348 | sma->sem_ctime); | |
1da177e4 LT |
1349 | } |
1350 | #endif |