]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/fcntl.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/syscalls.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/fs.h> | |
11 | #include <linux/file.h> | |
9f3acc31 | 12 | #include <linux/fdtable.h> |
16f7e0fe | 13 | #include <linux/capability.h> |
1da177e4 | 14 | #include <linux/dnotify.h> |
1da177e4 LT |
15 | #include <linux/slab.h> |
16 | #include <linux/module.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/ptrace.h> | |
7ed20e1a | 19 | #include <linux/signal.h> |
ab2af1f5 | 20 | #include <linux/rcupdate.h> |
b488893a | 21 | #include <linux/pid_namespace.h> |
218d11a8 | 22 | #include <linux/smp_lock.h> |
1da177e4 LT |
23 | |
24 | #include <asm/poll.h> | |
25 | #include <asm/siginfo.h> | |
26 | #include <asm/uaccess.h> | |
27 | ||
fc9b52cd | 28 | void set_close_on_exec(unsigned int fd, int flag) |
1da177e4 LT |
29 | { |
30 | struct files_struct *files = current->files; | |
badf1662 | 31 | struct fdtable *fdt; |
1da177e4 | 32 | spin_lock(&files->file_lock); |
badf1662 | 33 | fdt = files_fdtable(files); |
1da177e4 | 34 | if (flag) |
badf1662 | 35 | FD_SET(fd, fdt->close_on_exec); |
1da177e4 | 36 | else |
badf1662 | 37 | FD_CLR(fd, fdt->close_on_exec); |
1da177e4 LT |
38 | spin_unlock(&files->file_lock); |
39 | } | |
40 | ||
858119e1 | 41 | static int get_close_on_exec(unsigned int fd) |
1da177e4 LT |
42 | { |
43 | struct files_struct *files = current->files; | |
badf1662 | 44 | struct fdtable *fdt; |
1da177e4 | 45 | int res; |
b835996f | 46 | rcu_read_lock(); |
badf1662 DS |
47 | fdt = files_fdtable(files); |
48 | res = FD_ISSET(fd, fdt->close_on_exec); | |
b835996f | 49 | rcu_read_unlock(); |
1da177e4 LT |
50 | return res; |
51 | } | |
52 | ||
a26eab24 | 53 | SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) |
1da177e4 LT |
54 | { |
55 | int err = -EBADF; | |
56 | struct file * file, *tofree; | |
57 | struct files_struct * files = current->files; | |
badf1662 | 58 | struct fdtable *fdt; |
1da177e4 | 59 | |
336dd1f7 UD |
60 | if ((flags & ~O_CLOEXEC) != 0) |
61 | return -EINVAL; | |
62 | ||
6c5d0512 AV |
63 | if (unlikely(oldfd == newfd)) |
64 | return -EINVAL; | |
65 | ||
1da177e4 | 66 | spin_lock(&files->file_lock); |
1da177e4 | 67 | err = expand_files(files, newfd); |
1b7e190b AV |
68 | file = fcheck(oldfd); |
69 | if (unlikely(!file)) | |
70 | goto Ebadf; | |
4e1e018e AV |
71 | if (unlikely(err < 0)) { |
72 | if (err == -EMFILE) | |
1b7e190b AV |
73 | goto Ebadf; |
74 | goto out_unlock; | |
4e1e018e | 75 | } |
1b7e190b AV |
76 | /* |
77 | * We need to detect attempts to do dup2() over allocated but still | |
78 | * not finished descriptor. NB: OpenBSD avoids that at the price of | |
79 | * extra work in their equivalent of fget() - they insert struct | |
80 | * file immediately after grabbing descriptor, mark it larval if | |
81 | * more work (e.g. actual opening) is needed and make sure that | |
82 | * fget() treats larval files as absent. Potentially interesting, | |
83 | * but while extra work in fget() is trivial, locking implications | |
84 | * and amount of surgery on open()-related paths in VFS are not. | |
85 | * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" | |
86 | * deadlocks in rather amusing ways, AFAICS. All of that is out of | |
87 | * scope of POSIX or SUS, since neither considers shared descriptor | |
88 | * tables and this condition does not arise without those. | |
89 | */ | |
1da177e4 | 90 | err = -EBUSY; |
badf1662 DS |
91 | fdt = files_fdtable(files); |
92 | tofree = fdt->fd[newfd]; | |
93 | if (!tofree && FD_ISSET(newfd, fdt->open_fds)) | |
1b7e190b AV |
94 | goto out_unlock; |
95 | get_file(file); | |
ab2af1f5 | 96 | rcu_assign_pointer(fdt->fd[newfd], file); |
badf1662 | 97 | FD_SET(newfd, fdt->open_fds); |
336dd1f7 UD |
98 | if (flags & O_CLOEXEC) |
99 | FD_SET(newfd, fdt->close_on_exec); | |
100 | else | |
101 | FD_CLR(newfd, fdt->close_on_exec); | |
1da177e4 LT |
102 | spin_unlock(&files->file_lock); |
103 | ||
104 | if (tofree) | |
105 | filp_close(tofree, files); | |
1da177e4 | 106 | |
1b7e190b AV |
107 | return newfd; |
108 | ||
109 | Ebadf: | |
110 | err = -EBADF; | |
111 | out_unlock: | |
1da177e4 | 112 | spin_unlock(&files->file_lock); |
1b7e190b | 113 | return err; |
1da177e4 | 114 | } |
336dd1f7 | 115 | |
a26eab24 | 116 | SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) |
336dd1f7 | 117 | { |
6c5d0512 AV |
118 | if (unlikely(newfd == oldfd)) { /* corner case */ |
119 | struct files_struct *files = current->files; | |
2b79bc4f JM |
120 | int retval = oldfd; |
121 | ||
6c5d0512 AV |
122 | rcu_read_lock(); |
123 | if (!fcheck_files(files, oldfd)) | |
2b79bc4f | 124 | retval = -EBADF; |
6c5d0512 | 125 | rcu_read_unlock(); |
2b79bc4f | 126 | return retval; |
6c5d0512 | 127 | } |
336dd1f7 UD |
128 | return sys_dup3(oldfd, newfd, 0); |
129 | } | |
1da177e4 | 130 | |
a26eab24 | 131 | SYSCALL_DEFINE1(dup, unsigned int, fildes) |
1da177e4 LT |
132 | { |
133 | int ret = -EBADF; | |
1027abe8 AV |
134 | struct file *file = fget(fildes); |
135 | ||
136 | if (file) { | |
137 | ret = get_unused_fd(); | |
138 | if (ret >= 0) | |
139 | fd_install(ret, file); | |
140 | else | |
141 | fput(file); | |
142 | } | |
1da177e4 LT |
143 | return ret; |
144 | } | |
145 | ||
76398425 | 146 | #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) |
1da177e4 LT |
147 | |
148 | static int setfl(int fd, struct file * filp, unsigned long arg) | |
149 | { | |
0f7fc9e4 | 150 | struct inode * inode = filp->f_path.dentry->d_inode; |
1da177e4 LT |
151 | int error = 0; |
152 | ||
7d95c8f2 | 153 | /* |
154 | * O_APPEND cannot be cleared if the file is marked as append-only | |
155 | * and the file is open for write. | |
156 | */ | |
157 | if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) | |
1da177e4 LT |
158 | return -EPERM; |
159 | ||
160 | /* O_NOATIME can only be set by the owner or superuser */ | |
161 | if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) | |
3bd858ab | 162 | if (!is_owner_or_cap(inode)) |
1da177e4 LT |
163 | return -EPERM; |
164 | ||
165 | /* required for strict SunOS emulation */ | |
166 | if (O_NONBLOCK != O_NDELAY) | |
167 | if (arg & O_NDELAY) | |
168 | arg |= O_NONBLOCK; | |
169 | ||
170 | if (arg & O_DIRECT) { | |
171 | if (!filp->f_mapping || !filp->f_mapping->a_ops || | |
172 | !filp->f_mapping->a_ops->direct_IO) | |
173 | return -EINVAL; | |
174 | } | |
175 | ||
176 | if (filp->f_op && filp->f_op->check_flags) | |
177 | error = filp->f_op->check_flags(arg); | |
178 | if (error) | |
179 | return error; | |
180 | ||
218d11a8 | 181 | /* |
76398425 | 182 | * ->fasync() is responsible for setting the FASYNC bit. |
218d11a8 | 183 | */ |
76398425 JC |
184 | if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op && |
185 | filp->f_op->fasync) { | |
186 | error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); | |
187 | if (error < 0) | |
188 | goto out; | |
60aa4924 JC |
189 | if (error > 0) |
190 | error = 0; | |
1da177e4 | 191 | } |
db1dd4d3 | 192 | spin_lock(&filp->f_lock); |
1da177e4 | 193 | filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); |
db1dd4d3 | 194 | spin_unlock(&filp->f_lock); |
76398425 | 195 | |
1da177e4 | 196 | out: |
1da177e4 LT |
197 | return error; |
198 | } | |
199 | ||
609d7fa9 | 200 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
2f38d70f | 201 | int force) |
1da177e4 LT |
202 | { |
203 | write_lock_irq(&filp->f_owner.lock); | |
204 | if (force || !filp->f_owner.pid) { | |
609d7fa9 EB |
205 | put_pid(filp->f_owner.pid); |
206 | filp->f_owner.pid = get_pid(pid); | |
207 | filp->f_owner.pid_type = type; | |
2f38d70f ON |
208 | |
209 | if (pid) { | |
210 | const struct cred *cred = current_cred(); | |
211 | filp->f_owner.uid = cred->uid; | |
212 | filp->f_owner.euid = cred->euid; | |
213 | } | |
1da177e4 LT |
214 | } |
215 | write_unlock_irq(&filp->f_owner.lock); | |
216 | } | |
217 | ||
609d7fa9 EB |
218 | int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, |
219 | int force) | |
1da177e4 LT |
220 | { |
221 | int err; | |
2f38d70f | 222 | |
1da177e4 LT |
223 | err = security_file_set_fowner(filp); |
224 | if (err) | |
225 | return err; | |
226 | ||
2f38d70f | 227 | f_modown(filp, pid, type, force); |
1da177e4 LT |
228 | return 0; |
229 | } | |
609d7fa9 | 230 | EXPORT_SYMBOL(__f_setown); |
1da177e4 | 231 | |
609d7fa9 EB |
232 | int f_setown(struct file *filp, unsigned long arg, int force) |
233 | { | |
234 | enum pid_type type; | |
235 | struct pid *pid; | |
236 | int who = arg; | |
237 | int result; | |
238 | type = PIDTYPE_PID; | |
239 | if (who < 0) { | |
240 | type = PIDTYPE_PGID; | |
241 | who = -who; | |
242 | } | |
243 | rcu_read_lock(); | |
b488893a | 244 | pid = find_vpid(who); |
609d7fa9 EB |
245 | result = __f_setown(filp, pid, type, force); |
246 | rcu_read_unlock(); | |
247 | return result; | |
248 | } | |
1da177e4 LT |
249 | EXPORT_SYMBOL(f_setown); |
250 | ||
251 | void f_delown(struct file *filp) | |
252 | { | |
2f38d70f | 253 | f_modown(filp, NULL, PIDTYPE_PID, 1); |
609d7fa9 EB |
254 | } |
255 | ||
256 | pid_t f_getown(struct file *filp) | |
257 | { | |
258 | pid_t pid; | |
43fa1adb | 259 | read_lock(&filp->f_owner.lock); |
6c5f3e7b | 260 | pid = pid_vnr(filp->f_owner.pid); |
609d7fa9 EB |
261 | if (filp->f_owner.pid_type == PIDTYPE_PGID) |
262 | pid = -pid; | |
43fa1adb | 263 | read_unlock(&filp->f_owner.lock); |
609d7fa9 | 264 | return pid; |
1da177e4 LT |
265 | } |
266 | ||
267 | static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, | |
268 | struct file *filp) | |
269 | { | |
270 | long err = -EINVAL; | |
271 | ||
272 | switch (cmd) { | |
273 | case F_DUPFD: | |
22d2b35b | 274 | case F_DUPFD_CLOEXEC: |
4e1e018e AV |
275 | if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) |
276 | break; | |
1027abe8 AV |
277 | err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); |
278 | if (err >= 0) { | |
279 | get_file(filp); | |
280 | fd_install(err, filp); | |
281 | } | |
1da177e4 LT |
282 | break; |
283 | case F_GETFD: | |
284 | err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; | |
285 | break; | |
286 | case F_SETFD: | |
287 | err = 0; | |
288 | set_close_on_exec(fd, arg & FD_CLOEXEC); | |
289 | break; | |
290 | case F_GETFL: | |
291 | err = filp->f_flags; | |
292 | break; | |
293 | case F_SETFL: | |
294 | err = setfl(fd, filp, arg); | |
295 | break; | |
296 | case F_GETLK: | |
297 | err = fcntl_getlk(filp, (struct flock __user *) arg); | |
298 | break; | |
299 | case F_SETLK: | |
300 | case F_SETLKW: | |
c293621b | 301 | err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); |
1da177e4 LT |
302 | break; |
303 | case F_GETOWN: | |
304 | /* | |
305 | * XXX If f_owner is a process group, the | |
306 | * negative return value will get converted | |
307 | * into an error. Oops. If we keep the | |
308 | * current syscall conventions, the only way | |
309 | * to fix this will be in libc. | |
310 | */ | |
609d7fa9 | 311 | err = f_getown(filp); |
1da177e4 LT |
312 | force_successful_syscall_return(); |
313 | break; | |
314 | case F_SETOWN: | |
315 | err = f_setown(filp, arg, 1); | |
316 | break; | |
317 | case F_GETSIG: | |
318 | err = filp->f_owner.signum; | |
319 | break; | |
320 | case F_SETSIG: | |
321 | /* arg == 0 restores default behaviour. */ | |
7ed20e1a | 322 | if (!valid_signal(arg)) { |
1da177e4 LT |
323 | break; |
324 | } | |
325 | err = 0; | |
326 | filp->f_owner.signum = arg; | |
327 | break; | |
328 | case F_GETLEASE: | |
329 | err = fcntl_getlease(filp); | |
330 | break; | |
331 | case F_SETLEASE: | |
332 | err = fcntl_setlease(fd, filp, arg); | |
333 | break; | |
334 | case F_NOTIFY: | |
335 | err = fcntl_dirnotify(fd, filp, arg); | |
336 | break; | |
337 | default: | |
338 | break; | |
339 | } | |
340 | return err; | |
341 | } | |
342 | ||
a26eab24 | 343 | SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) |
1da177e4 LT |
344 | { |
345 | struct file *filp; | |
346 | long err = -EBADF; | |
347 | ||
348 | filp = fget(fd); | |
349 | if (!filp) | |
350 | goto out; | |
351 | ||
352 | err = security_file_fcntl(filp, cmd, arg); | |
353 | if (err) { | |
354 | fput(filp); | |
355 | return err; | |
356 | } | |
357 | ||
358 | err = do_fcntl(fd, cmd, arg, filp); | |
359 | ||
360 | fput(filp); | |
361 | out: | |
362 | return err; | |
363 | } | |
364 | ||
365 | #if BITS_PER_LONG == 32 | |
a26eab24 HC |
366 | SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, |
367 | unsigned long, arg) | |
1da177e4 LT |
368 | { |
369 | struct file * filp; | |
370 | long err; | |
371 | ||
372 | err = -EBADF; | |
373 | filp = fget(fd); | |
374 | if (!filp) | |
375 | goto out; | |
376 | ||
377 | err = security_file_fcntl(filp, cmd, arg); | |
378 | if (err) { | |
379 | fput(filp); | |
380 | return err; | |
381 | } | |
382 | err = -EBADF; | |
383 | ||
384 | switch (cmd) { | |
385 | case F_GETLK64: | |
386 | err = fcntl_getlk64(filp, (struct flock64 __user *) arg); | |
387 | break; | |
388 | case F_SETLK64: | |
389 | case F_SETLKW64: | |
c293621b PS |
390 | err = fcntl_setlk64(fd, filp, cmd, |
391 | (struct flock64 __user *) arg); | |
1da177e4 LT |
392 | break; |
393 | default: | |
394 | err = do_fcntl(fd, cmd, arg, filp); | |
395 | break; | |
396 | } | |
397 | fput(filp); | |
398 | out: | |
399 | return err; | |
400 | } | |
401 | #endif | |
402 | ||
403 | /* Table to convert sigio signal codes into poll band bitmaps */ | |
404 | ||
fa3536cc | 405 | static const long band_table[NSIGPOLL] = { |
1da177e4 LT |
406 | POLLIN | POLLRDNORM, /* POLL_IN */ |
407 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ | |
408 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ | |
409 | POLLERR, /* POLL_ERR */ | |
410 | POLLPRI | POLLRDBAND, /* POLL_PRI */ | |
411 | POLLHUP | POLLERR /* POLL_HUP */ | |
412 | }; | |
413 | ||
414 | static inline int sigio_perm(struct task_struct *p, | |
415 | struct fown_struct *fown, int sig) | |
416 | { | |
c69e8d9c DH |
417 | const struct cred *cred; |
418 | int ret; | |
419 | ||
420 | rcu_read_lock(); | |
421 | cred = __task_cred(p); | |
422 | ret = ((fown->euid == 0 || | |
423 | fown->euid == cred->suid || fown->euid == cred->uid || | |
424 | fown->uid == cred->suid || fown->uid == cred->uid) && | |
425 | !security_file_send_sigiotask(p, fown, sig)); | |
426 | rcu_read_unlock(); | |
427 | return ret; | |
1da177e4 LT |
428 | } |
429 | ||
430 | static void send_sigio_to_task(struct task_struct *p, | |
8eeee4e2 | 431 | struct fown_struct *fown, |
1da177e4 LT |
432 | int fd, |
433 | int reason) | |
434 | { | |
8eeee4e2 ON |
435 | /* |
436 | * F_SETSIG can change ->signum lockless in parallel, make | |
437 | * sure we read it once and use the same value throughout. | |
438 | */ | |
439 | int signum = ACCESS_ONCE(fown->signum); | |
440 | ||
441 | if (!sigio_perm(p, fown, signum)) | |
1da177e4 LT |
442 | return; |
443 | ||
8eeee4e2 | 444 | switch (signum) { |
1da177e4 LT |
445 | siginfo_t si; |
446 | default: | |
447 | /* Queue a rt signal with the appropriate fd as its | |
448 | value. We use SI_SIGIO as the source, not | |
449 | SI_KERNEL, since kernel signals always get | |
450 | delivered even if we can't queue. Failure to | |
451 | queue in this case _should_ be reported; we fall | |
452 | back to SIGIO in that case. --sct */ | |
8eeee4e2 | 453 | si.si_signo = signum; |
1da177e4 LT |
454 | si.si_errno = 0; |
455 | si.si_code = reason; | |
456 | /* Make sure we are called with one of the POLL_* | |
457 | reasons, otherwise we could leak kernel stack into | |
458 | userspace. */ | |
f6298aab | 459 | BUG_ON((reason & __SI_MASK) != __SI_POLL); |
1da177e4 LT |
460 | if (reason - POLL_IN >= NSIGPOLL) |
461 | si.si_band = ~0L; | |
462 | else | |
463 | si.si_band = band_table[reason - POLL_IN]; | |
464 | si.si_fd = fd; | |
8eeee4e2 | 465 | if (!group_send_sig_info(signum, &si, p)) |
1da177e4 LT |
466 | break; |
467 | /* fall-through: fall back on the old plain SIGIO signal */ | |
468 | case 0: | |
850d6fbe | 469 | group_send_sig_info(SIGIO, SEND_SIG_PRIV, p); |
1da177e4 LT |
470 | } |
471 | } | |
472 | ||
473 | void send_sigio(struct fown_struct *fown, int fd, int band) | |
474 | { | |
475 | struct task_struct *p; | |
609d7fa9 EB |
476 | enum pid_type type; |
477 | struct pid *pid; | |
1da177e4 LT |
478 | |
479 | read_lock(&fown->lock); | |
609d7fa9 | 480 | type = fown->pid_type; |
1da177e4 LT |
481 | pid = fown->pid; |
482 | if (!pid) | |
483 | goto out_unlock_fown; | |
484 | ||
485 | read_lock(&tasklist_lock); | |
609d7fa9 EB |
486 | do_each_pid_task(pid, type, p) { |
487 | send_sigio_to_task(p, fown, fd, band); | |
488 | } while_each_pid_task(pid, type, p); | |
1da177e4 LT |
489 | read_unlock(&tasklist_lock); |
490 | out_unlock_fown: | |
491 | read_unlock(&fown->lock); | |
492 | } | |
493 | ||
494 | static void send_sigurg_to_task(struct task_struct *p, | |
495 | struct fown_struct *fown) | |
496 | { | |
497 | if (sigio_perm(p, fown, SIGURG)) | |
850d6fbe | 498 | group_send_sig_info(SIGURG, SEND_SIG_PRIV, p); |
1da177e4 LT |
499 | } |
500 | ||
501 | int send_sigurg(struct fown_struct *fown) | |
502 | { | |
503 | struct task_struct *p; | |
609d7fa9 EB |
504 | enum pid_type type; |
505 | struct pid *pid; | |
506 | int ret = 0; | |
1da177e4 LT |
507 | |
508 | read_lock(&fown->lock); | |
609d7fa9 | 509 | type = fown->pid_type; |
1da177e4 LT |
510 | pid = fown->pid; |
511 | if (!pid) | |
512 | goto out_unlock_fown; | |
513 | ||
514 | ret = 1; | |
515 | ||
516 | read_lock(&tasklist_lock); | |
609d7fa9 EB |
517 | do_each_pid_task(pid, type, p) { |
518 | send_sigurg_to_task(p, fown); | |
519 | } while_each_pid_task(pid, type, p); | |
1da177e4 LT |
520 | read_unlock(&tasklist_lock); |
521 | out_unlock_fown: | |
522 | read_unlock(&fown->lock); | |
523 | return ret; | |
524 | } | |
525 | ||
526 | static DEFINE_RWLOCK(fasync_lock); | |
e18b890b | 527 | static struct kmem_cache *fasync_cache __read_mostly; |
1da177e4 LT |
528 | |
529 | /* | |
76398425 | 530 | * fasync_helper() is used by almost all character device drivers |
1da177e4 LT |
531 | * to set up the fasync queue. It returns negative on error, 0 if it did |
532 | * no changes and positive if it added/deleted the entry. | |
533 | */ | |
534 | int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) | |
535 | { | |
536 | struct fasync_struct *fa, **fp; | |
537 | struct fasync_struct *new = NULL; | |
538 | int result = 0; | |
539 | ||
540 | if (on) { | |
e94b1766 | 541 | new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); |
1da177e4 LT |
542 | if (!new) |
543 | return -ENOMEM; | |
544 | } | |
4a6a4499 JC |
545 | |
546 | /* | |
547 | * We need to take f_lock first since it's not an IRQ-safe | |
548 | * lock. | |
549 | */ | |
550 | spin_lock(&filp->f_lock); | |
1da177e4 LT |
551 | write_lock_irq(&fasync_lock); |
552 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { | |
553 | if (fa->fa_file == filp) { | |
554 | if(on) { | |
555 | fa->fa_fd = fd; | |
556 | kmem_cache_free(fasync_cache, new); | |
557 | } else { | |
558 | *fp = fa->fa_next; | |
559 | kmem_cache_free(fasync_cache, fa); | |
560 | result = 1; | |
561 | } | |
562 | goto out; | |
563 | } | |
564 | } | |
565 | ||
566 | if (on) { | |
567 | new->magic = FASYNC_MAGIC; | |
568 | new->fa_file = filp; | |
569 | new->fa_fd = fd; | |
570 | new->fa_next = *fapp; | |
571 | *fapp = new; | |
572 | result = 1; | |
573 | } | |
574 | out: | |
76398425 JC |
575 | if (on) |
576 | filp->f_flags |= FASYNC; | |
577 | else | |
578 | filp->f_flags &= ~FASYNC; | |
1da177e4 | 579 | write_unlock_irq(&fasync_lock); |
4a6a4499 | 580 | spin_unlock(&filp->f_lock); |
1da177e4 LT |
581 | return result; |
582 | } | |
583 | ||
584 | EXPORT_SYMBOL(fasync_helper); | |
585 | ||
586 | void __kill_fasync(struct fasync_struct *fa, int sig, int band) | |
587 | { | |
588 | while (fa) { | |
589 | struct fown_struct * fown; | |
590 | if (fa->magic != FASYNC_MAGIC) { | |
591 | printk(KERN_ERR "kill_fasync: bad magic number in " | |
592 | "fasync_struct!\n"); | |
593 | return; | |
594 | } | |
595 | fown = &fa->fa_file->f_owner; | |
596 | /* Don't send SIGURG to processes which have not set a | |
597 | queued signum: SIGURG has its own default signalling | |
598 | mechanism. */ | |
599 | if (!(sig == SIGURG && fown->signum == 0)) | |
600 | send_sigio(fown, fa->fa_fd, band); | |
601 | fa = fa->fa_next; | |
602 | } | |
603 | } | |
604 | ||
605 | EXPORT_SYMBOL(__kill_fasync); | |
606 | ||
607 | void kill_fasync(struct fasync_struct **fp, int sig, int band) | |
608 | { | |
609 | /* First a quick test without locking: usually | |
610 | * the list is empty. | |
611 | */ | |
612 | if (*fp) { | |
613 | read_lock(&fasync_lock); | |
614 | /* reread *fp after obtaining the lock */ | |
615 | __kill_fasync(*fp, sig, band); | |
616 | read_unlock(&fasync_lock); | |
617 | } | |
618 | } | |
619 | EXPORT_SYMBOL(kill_fasync); | |
620 | ||
621 | static int __init fasync_init(void) | |
622 | { | |
623 | fasync_cache = kmem_cache_create("fasync_cache", | |
20c2df83 | 624 | sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); |
1da177e4 LT |
625 | return 0; |
626 | } | |
627 | ||
628 | module_init(fasync_init) |