1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/syscalls.h>
9 #include <linux/init.h>
11 #include <linux/sched/task.h>
13 #include <linux/filelock.h>
14 #include <linux/file.h>
15 #include <linux/fdtable.h>
16 #include <linux/capability.h>
17 #include <linux/dnotify.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/pipe_fs_i.h>
21 #include <linux/security.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/rcupdate.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/user_namespace.h>
27 #include <linux/memfd.h>
28 #include <linux/compat.h>
29 #include <linux/mount.h>
30 #include <linux/rw_hint.h>
32 #include <linux/poll.h>
33 #include <asm/siginfo.h>
34 #include <linux/uaccess.h>
38 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
40 static int setfl(int fd, struct file * filp, unsigned int arg)
42 struct inode * inode = file_inode(filp);
46 * O_APPEND cannot be cleared if the file is marked as append-only
47 * and the file is open for write.
49 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
52 /* O_NOATIME can only be set by the owner or superuser */
53 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
54 if (!inode_owner_or_capable(file_mnt_idmap(filp), inode))
57 /* required for strict SunOS emulation */
58 if (O_NONBLOCK != O_NDELAY)
62 /* Pipe packetized mode is controlled by O_DIRECT flag */
63 if (!S_ISFIFO(inode->i_mode) &&
65 !(filp->f_mode & FMODE_CAN_ODIRECT))
68 if (filp->f_op->check_flags)
69 error = filp->f_op->check_flags(arg);
74 * ->fasync() is responsible for setting the FASYNC bit.
76 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
77 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
83 spin_lock(&filp->f_lock);
84 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
85 filp->f_iocb_flags = iocb_flags(filp);
86 spin_unlock(&filp->f_lock);
93 * Allocate an file->f_owner struct if it doesn't exist, handling racing
94 * allocations correctly.
96 int file_f_owner_allocate(struct file *file)
98 struct fown_struct *f_owner;
100 f_owner = file_f_owner(file);
104 f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL);
108 rwlock_init(&f_owner->lock);
109 f_owner->file = file;
110 /* If someone else raced us, drop our allocation. */
111 if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner)))
115 EXPORT_SYMBOL(file_f_owner_allocate);
117 void file_f_owner_release(struct file *file)
119 struct fown_struct *f_owner;
121 f_owner = file_f_owner(file);
123 put_pid(f_owner->pid);
128 void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
131 struct fown_struct *f_owner;
133 f_owner = file_f_owner(filp);
134 if (WARN_ON_ONCE(!f_owner))
137 write_lock_irq(&f_owner->lock);
138 if (force || !f_owner->pid) {
139 put_pid(f_owner->pid);
140 f_owner->pid = get_pid(pid);
141 f_owner->pid_type = type;
144 const struct cred *cred = current_cred();
145 security_file_set_fowner(filp);
146 f_owner->uid = cred->uid;
147 f_owner->euid = cred->euid;
150 write_unlock_irq(&f_owner->lock);
152 EXPORT_SYMBOL(__f_setown);
154 int f_setown(struct file *filp, int who, int force)
157 struct pid *pid = NULL;
164 /* avoid overflow below */
172 ret = file_f_owner_allocate(filp);
178 pid = find_vpid(who);
184 __f_setown(filp, pid, type, force);
189 EXPORT_SYMBOL(f_setown);
191 void f_delown(struct file *filp)
193 __f_setown(filp, NULL, PIDTYPE_TGID, 1);
196 pid_t f_getown(struct file *filp)
199 struct fown_struct *f_owner;
201 f_owner = file_f_owner(filp);
205 read_lock_irq(&f_owner->lock);
207 if (pid_task(f_owner->pid, f_owner->pid_type)) {
208 pid = pid_vnr(f_owner->pid);
209 if (f_owner->pid_type == PIDTYPE_PGID)
213 read_unlock_irq(&f_owner->lock);
217 static int f_setown_ex(struct file *filp, unsigned long arg)
219 struct f_owner_ex __user *owner_p = (void __user *)arg;
220 struct f_owner_ex owner;
225 ret = copy_from_user(&owner, owner_p, sizeof(owner));
229 switch (owner.type) {
246 ret = file_f_owner_allocate(filp);
251 pid = find_vpid(owner.pid);
252 if (owner.pid && !pid)
255 __f_setown(filp, pid, type, 1);
261 static int f_getown_ex(struct file *filp, unsigned long arg)
263 struct f_owner_ex __user *owner_p = (void __user *)arg;
264 struct f_owner_ex owner = {};
266 struct fown_struct *f_owner;
267 enum pid_type pid_type = PIDTYPE_PID;
269 f_owner = file_f_owner(filp);
271 read_lock_irq(&f_owner->lock);
273 if (pid_task(f_owner->pid, f_owner->pid_type))
274 owner.pid = pid_vnr(f_owner->pid);
276 pid_type = f_owner->pid_type;
281 owner.type = F_OWNER_TID;
285 owner.type = F_OWNER_PID;
289 owner.type = F_OWNER_PGRP;
298 read_unlock_irq(&f_owner->lock);
301 ret = copy_to_user(owner_p, &owner, sizeof(owner));
308 #ifdef CONFIG_CHECKPOINT_RESTORE
309 static int f_getowner_uids(struct file *filp, unsigned long arg)
311 struct user_namespace *user_ns = current_user_ns();
312 struct fown_struct *f_owner;
313 uid_t __user *dst = (void __user *)arg;
314 uid_t src[2] = {0, 0};
317 f_owner = file_f_owner(filp);
319 read_lock_irq(&f_owner->lock);
320 src[0] = from_kuid(user_ns, f_owner->uid);
321 src[1] = from_kuid(user_ns, f_owner->euid);
322 read_unlock_irq(&f_owner->lock);
325 err = put_user(src[0], &dst[0]);
326 err |= put_user(src[1], &dst[1]);
331 static int f_getowner_uids(struct file *filp, unsigned long arg)
337 static bool rw_hint_valid(u64 hint)
339 BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET);
340 BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE);
341 BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT);
342 BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM);
343 BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG);
344 BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME);
347 case RWH_WRITE_LIFE_NOT_SET:
348 case RWH_WRITE_LIFE_NONE:
349 case RWH_WRITE_LIFE_SHORT:
350 case RWH_WRITE_LIFE_MEDIUM:
351 case RWH_WRITE_LIFE_LONG:
352 case RWH_WRITE_LIFE_EXTREME:
359 static long fcntl_get_rw_hint(struct file *file, unsigned int cmd,
362 struct inode *inode = file_inode(file);
363 u64 __user *argp = (u64 __user *)arg;
364 u64 hint = READ_ONCE(inode->i_write_hint);
366 if (copy_to_user(argp, &hint, sizeof(*argp)))
371 static long fcntl_set_rw_hint(struct file *file, unsigned int cmd,
374 struct inode *inode = file_inode(file);
375 u64 __user *argp = (u64 __user *)arg;
378 if (copy_from_user(&hint, argp, sizeof(hint)))
380 if (!rw_hint_valid(hint))
383 WRITE_ONCE(inode->i_write_hint, hint);
386 * file->f_mapping->host may differ from inode. As an example,
387 * blkdev_open() modifies file->f_mapping.
389 if (file->f_mapping->host != inode)
390 WRITE_ONCE(file->f_mapping->host->i_write_hint, hint);
395 /* Is the file descriptor a dup of the file? */
396 static long f_dupfd_query(int fd, struct file *filp)
398 CLASS(fd_raw, f)(fd);
401 * We can do the 'fdput()' immediately, as the only thing that
402 * matters is the pointer value which isn't changed by the fdput.
404 * Technically we didn't need a ref at all, and 'fdget()' was
405 * overkill, but given our lockless file pointer lookup, the
406 * alternatives are complicated.
408 return fd_file(f) == filp;
411 /* Let the caller figure out whether a given file was just created. */
412 static long f_created_query(const struct file *filp)
414 return !!(filp->f_mode & FMODE_CREATED);
417 static int f_owner_sig(struct file *filp, int signum, bool setsig)
420 struct fown_struct *f_owner;
425 if (!valid_signal(signum))
428 ret = file_f_owner_allocate(filp);
433 f_owner = file_f_owner(filp);
435 f_owner->signum = signum;
437 ret = f_owner->signum;
441 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
444 void __user *argp = (void __user *)arg;
450 case F_CREATED_QUERY:
451 err = f_created_query(filp);
454 err = f_dupfd(argi, filp, 0);
456 case F_DUPFD_CLOEXEC:
457 err = f_dupfd(argi, filp, O_CLOEXEC);
460 err = f_dupfd_query(argi, filp);
463 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
467 set_close_on_exec(fd, argi & FD_CLOEXEC);
473 err = setfl(fd, filp, argi);
475 #if BITS_PER_LONG != 32
476 /* 32-bit arches must use fcntl64() */
480 if (copy_from_user(&flock, argp, sizeof(flock)))
482 err = fcntl_getlk(filp, cmd, &flock);
483 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
486 #if BITS_PER_LONG != 32
487 /* 32-bit arches must use fcntl64() */
494 if (copy_from_user(&flock, argp, sizeof(flock)))
496 err = fcntl_setlk(fd, filp, cmd, &flock);
500 * XXX If f_owner is a process group, the
501 * negative return value will get converted
502 * into an error. Oops. If we keep the
503 * current syscall conventions, the only way
504 * to fix this will be in libc.
506 err = f_getown(filp);
507 force_successful_syscall_return();
510 err = f_setown(filp, argi, 1);
513 err = f_getown_ex(filp, arg);
516 err = f_setown_ex(filp, arg);
518 case F_GETOWNER_UIDS:
519 err = f_getowner_uids(filp, arg);
522 err = f_owner_sig(filp, 0, false);
525 err = f_owner_sig(filp, argi, true);
528 err = fcntl_getlease(filp);
531 err = fcntl_setlease(fd, filp, argi);
534 err = fcntl_dirnotify(fd, filp, argi);
538 err = pipe_fcntl(filp, cmd, argi);
542 err = memfd_fcntl(filp, cmd, argi);
545 err = fcntl_get_rw_hint(filp, cmd, arg);
548 err = fcntl_set_rw_hint(filp, cmd, arg);
556 static int check_fcntl_cmd(unsigned cmd)
559 case F_CREATED_QUERY:
561 case F_DUPFD_CLOEXEC:
571 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
573 struct fd f = fdget_raw(fd);
579 if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
580 if (!check_fcntl_cmd(cmd))
584 err = security_file_fcntl(fd_file(f), cmd, arg);
586 err = do_fcntl(fd, cmd, arg, fd_file(f));
594 #if BITS_PER_LONG == 32
595 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
598 void __user *argp = (void __user *)arg;
599 struct fd f = fdget_raw(fd);
600 struct flock64 flock;
606 if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
607 if (!check_fcntl_cmd(cmd))
611 err = security_file_fcntl(fd_file(f), cmd, arg);
619 if (copy_from_user(&flock, argp, sizeof(flock)))
621 err = fcntl_getlk64(fd_file(f), cmd, &flock);
622 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
630 if (copy_from_user(&flock, argp, sizeof(flock)))
632 err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
635 err = do_fcntl(fd, cmd, arg, fd_file(f));
646 /* careful - don't use anywhere else */
647 #define copy_flock_fields(dst, src) \
648 (dst)->l_type = (src)->l_type; \
649 (dst)->l_whence = (src)->l_whence; \
650 (dst)->l_start = (src)->l_start; \
651 (dst)->l_len = (src)->l_len; \
652 (dst)->l_pid = (src)->l_pid;
654 static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
656 struct compat_flock fl;
658 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
660 copy_flock_fields(kfl, &fl);
664 static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
666 struct compat_flock64 fl;
668 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
670 copy_flock_fields(kfl, &fl);
674 static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
676 struct compat_flock fl;
678 memset(&fl, 0, sizeof(struct compat_flock));
679 copy_flock_fields(&fl, kfl);
680 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
685 static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
687 struct compat_flock64 fl;
689 BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
690 BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
692 memset(&fl, 0, sizeof(struct compat_flock64));
693 copy_flock_fields(&fl, kfl);
694 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
698 #undef copy_flock_fields
701 convert_fcntl_cmd(unsigned int cmd)
716 * GETLK was successful and we need to return the data, but it needs to fit in
717 * the compat structure.
718 * l_start shouldn't be too big, unless the original start + end is greater than
719 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
720 * -EOVERFLOW in that case. l_len could be too big, in which case we just
721 * truncate it, and only allow the app to see that part of the conflicting lock
722 * that might make sense to it anyway
724 static int fixup_compat_flock(struct flock *flock)
726 if (flock->l_start > COMPAT_OFF_T_MAX)
728 if (flock->l_len > COMPAT_OFF_T_MAX)
729 flock->l_len = COMPAT_OFF_T_MAX;
733 static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
736 struct fd f = fdget_raw(fd);
743 if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) {
744 if (!check_fcntl_cmd(cmd))
748 err = security_file_fcntl(fd_file(f), cmd, arg);
754 err = get_compat_flock(&flock, compat_ptr(arg));
757 err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
760 err = fixup_compat_flock(&flock);
762 err = put_compat_flock(&flock, compat_ptr(arg));
766 err = get_compat_flock64(&flock, compat_ptr(arg));
769 err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock);
771 err = put_compat_flock64(&flock, compat_ptr(arg));
775 err = get_compat_flock(&flock, compat_ptr(arg));
778 err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
784 err = get_compat_flock64(&flock, compat_ptr(arg));
787 err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock);
790 err = do_fcntl(fd, cmd, arg, fd_file(f));
798 COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
801 return do_compat_fcntl64(fd, cmd, arg);
804 COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
816 return do_compat_fcntl64(fd, cmd, arg);
820 /* Table to convert sigio signal codes into poll band bitmaps */
822 static const __poll_t band_table[NSIGPOLL] = {
823 EPOLLIN | EPOLLRDNORM, /* POLL_IN */
824 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
825 EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
826 EPOLLERR, /* POLL_ERR */
827 EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
828 EPOLLHUP | EPOLLERR /* POLL_HUP */
831 static inline int sigio_perm(struct task_struct *p,
832 struct fown_struct *fown, int sig)
834 const struct cred *cred;
838 cred = __task_cred(p);
839 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
840 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
841 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
842 !security_file_send_sigiotask(p, fown, sig));
847 static void send_sigio_to_task(struct task_struct *p,
848 struct fown_struct *fown,
849 int fd, int reason, enum pid_type type)
852 * F_SETSIG can change ->signum lockless in parallel, make
853 * sure we read it once and use the same value throughout.
855 int signum = READ_ONCE(fown->signum);
857 if (!sigio_perm(p, fown, signum))
864 /* Queue a rt signal with the appropriate fd as its
865 value. We use SI_SIGIO as the source, not
866 SI_KERNEL, since kernel signals always get
867 delivered even if we can't queue. Failure to
868 queue in this case _should_ be reported; we fall
869 back to SIGIO in that case. --sct */
871 si.si_signo = signum;
875 * Posix definies POLL_IN and friends to be signal
876 * specific si_codes for SIG_POLL. Linux extended
877 * these si_codes to other signals in a way that is
878 * ambiguous if other signals also have signal
879 * specific si_codes. In that case use SI_SIGIO instead
880 * to remove the ambiguity.
882 if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
883 si.si_code = SI_SIGIO;
885 /* Make sure we are called with one of the POLL_*
886 reasons, otherwise we could leak kernel stack into
888 BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
889 if (reason - POLL_IN >= NSIGPOLL)
892 si.si_band = mangle_poll(band_table[reason - POLL_IN]);
894 if (!do_send_sig_info(signum, &si, p, type))
897 fallthrough; /* fall back on the old plain SIGIO signal */
899 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
903 void send_sigio(struct fown_struct *fown, int fd, int band)
905 struct task_struct *p;
910 read_lock_irqsave(&fown->lock, flags);
912 type = fown->pid_type;
915 goto out_unlock_fown;
917 if (type <= PIDTYPE_TGID) {
919 p = pid_task(pid, PIDTYPE_PID);
921 send_sigio_to_task(p, fown, fd, band, type);
924 read_lock(&tasklist_lock);
925 do_each_pid_task(pid, type, p) {
926 send_sigio_to_task(p, fown, fd, band, type);
927 } while_each_pid_task(pid, type, p);
928 read_unlock(&tasklist_lock);
931 read_unlock_irqrestore(&fown->lock, flags);
934 static void send_sigurg_to_task(struct task_struct *p,
935 struct fown_struct *fown, enum pid_type type)
937 if (sigio_perm(p, fown, SIGURG))
938 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
941 int send_sigurg(struct file *file)
943 struct fown_struct *fown;
944 struct task_struct *p;
950 fown = file_f_owner(file);
954 read_lock_irqsave(&fown->lock, flags);
956 type = fown->pid_type;
959 goto out_unlock_fown;
963 if (type <= PIDTYPE_TGID) {
965 p = pid_task(pid, PIDTYPE_PID);
967 send_sigurg_to_task(p, fown, type);
970 read_lock(&tasklist_lock);
971 do_each_pid_task(pid, type, p) {
972 send_sigurg_to_task(p, fown, type);
973 } while_each_pid_task(pid, type, p);
974 read_unlock(&tasklist_lock);
977 read_unlock_irqrestore(&fown->lock, flags);
981 static DEFINE_SPINLOCK(fasync_lock);
982 static struct kmem_cache *fasync_cache __ro_after_init;
985 * Remove a fasync entry. If successfully removed, return
986 * positive and clear the FASYNC flag. If no entry exists,
987 * do nothing and return 0.
989 * NOTE! It is very important that the FASYNC flag always
990 * match the state "is the filp on a fasync list".
993 int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
995 struct fasync_struct *fa, **fp;
998 spin_lock(&filp->f_lock);
999 spin_lock(&fasync_lock);
1000 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
1001 if (fa->fa_file != filp)
1004 write_lock_irq(&fa->fa_lock);
1006 write_unlock_irq(&fa->fa_lock);
1009 kfree_rcu(fa, fa_rcu);
1010 filp->f_flags &= ~FASYNC;
1014 spin_unlock(&fasync_lock);
1015 spin_unlock(&filp->f_lock);
1019 struct fasync_struct *fasync_alloc(void)
1021 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
1025 * NOTE! This can be used only for unused fasync entries:
1026 * entries that actually got inserted on the fasync list
1027 * need to be released by rcu - see fasync_remove_entry.
1029 void fasync_free(struct fasync_struct *new)
1031 kmem_cache_free(fasync_cache, new);
1035 * Insert a new entry into the fasync list. Return the pointer to the
1036 * old one if we didn't use the new one.
1038 * NOTE! It is very important that the FASYNC flag always
1039 * match the state "is the filp on a fasync list".
1041 struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
1043 struct fasync_struct *fa, **fp;
1045 spin_lock(&filp->f_lock);
1046 spin_lock(&fasync_lock);
1047 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
1048 if (fa->fa_file != filp)
1051 write_lock_irq(&fa->fa_lock);
1053 write_unlock_irq(&fa->fa_lock);
1057 rwlock_init(&new->fa_lock);
1058 new->magic = FASYNC_MAGIC;
1059 new->fa_file = filp;
1061 new->fa_next = *fapp;
1062 rcu_assign_pointer(*fapp, new);
1063 filp->f_flags |= FASYNC;
1066 spin_unlock(&fasync_lock);
1067 spin_unlock(&filp->f_lock);
1072 * Add a fasync entry. Return negative on error, positive if
1073 * added, and zero if did nothing but change an existing one.
1075 static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
1077 struct fasync_struct *new;
1079 new = fasync_alloc();
1084 * fasync_insert_entry() returns the old (update) entry if
1087 * So free the (unused) new entry and return 0 to let the
1088 * caller know that we didn't add any new fasync entries.
1090 if (fasync_insert_entry(fd, filp, fapp, new)) {
1099 * fasync_helper() is used by almost all character device drivers
1100 * to set up the fasync queue, and for regular files by the file
1101 * lease code. It returns negative on error, 0 if it did no changes
1102 * and positive if it added/deleted the entry.
1104 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
1107 return fasync_remove_entry(filp, fapp);
1108 return fasync_add_entry(fd, filp, fapp);
1111 EXPORT_SYMBOL(fasync_helper);
1114 * rcu_read_lock() is held
1116 static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
1119 struct fown_struct *fown;
1120 unsigned long flags;
1122 if (fa->magic != FASYNC_MAGIC) {
1123 printk(KERN_ERR "kill_fasync: bad magic number in "
1124 "fasync_struct!\n");
1127 read_lock_irqsave(&fa->fa_lock, flags);
1129 fown = file_f_owner(fa->fa_file);
1132 /* Don't send SIGURG to processes which have not set a
1133 queued signum: SIGURG has its own default signalling
1135 if (!(sig == SIGURG && fown->signum == 0))
1136 send_sigio(fown, fa->fa_fd, band);
1139 read_unlock_irqrestore(&fa->fa_lock, flags);
1140 fa = rcu_dereference(fa->fa_next);
1144 void kill_fasync(struct fasync_struct **fp, int sig, int band)
1146 /* First a quick test without locking: usually
1147 * the list is empty.
1151 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1155 EXPORT_SYMBOL(kill_fasync);
1157 static int __init fcntl_init(void)
1160 * Please add new bits here to ensure allocation uniqueness.
1161 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1162 * is defined as O_NONBLOCK on some platforms and not on others.
1164 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1166 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1167 __FMODE_EXEC | __FMODE_NONOTIFY));
1169 fasync_cache = kmem_cache_create("fasync_cache",
1170 sizeof(struct fasync_struct), 0,
1171 SLAB_PANIC | SLAB_ACCOUNT, NULL);
1175 module_init(fcntl_init)