1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
13 #include <linux/kernel.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
27 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
28 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
29 /* our min() is unusable in constant expressions ;-/ */
30 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
31 unsigned int sysctl_nr_open_max =
32 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
34 static void __free_fdtable(struct fdtable *fdt)
37 kvfree(fdt->open_fds);
41 static void free_fdtable_rcu(struct rcu_head *rcu)
43 __free_fdtable(container_of(rcu, struct fdtable, rcu));
46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
50 * Copy 'count' fd bits from the old table to the new table and clear the extra
51 * space if any. This does not copy the file pointers. Called with the files
52 * spinlock held for write.
54 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
57 unsigned int cpy, set;
59 cpy = count / BITS_PER_BYTE;
60 set = (nfdt->max_fds - count) / BITS_PER_BYTE;
61 memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
62 memset((char *)nfdt->open_fds + cpy, 0, set);
63 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
64 memset((char *)nfdt->close_on_exec + cpy, 0, set);
66 cpy = BITBIT_SIZE(count);
67 set = BITBIT_SIZE(nfdt->max_fds) - cpy;
68 memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
69 memset((char *)nfdt->full_fds_bits + cpy, 0, set);
73 * Copy all file descriptors from the old table to the new, expanded table and
74 * clear the extra space. Called with the files spinlock held for write.
76 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
80 BUG_ON(nfdt->max_fds < ofdt->max_fds);
82 cpy = ofdt->max_fds * sizeof(struct file *);
83 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
84 memcpy(nfdt->fd, ofdt->fd, cpy);
85 memset((char *)nfdt->fd + cpy, 0, set);
87 copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
91 * Note how the fdtable bitmap allocations very much have to be a multiple of
92 * BITS_PER_LONG. This is not only because we walk those things in chunks of
93 * 'unsigned long' in some places, but simply because that is how the Linux
94 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
95 * they are very much "bits in an array of unsigned long".
97 * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied
98 * by that "1024/sizeof(ptr)" before, we already know there are sufficient
99 * clear low bits. Clang seems to realize that, gcc ends up being confused.
101 * On a 128-bit machine, the ALIGN() would actually matter. In the meantime,
102 * let's consider it documentation (and maybe a test-case for gcc to improve
103 * its code generation ;)
105 static struct fdtable * alloc_fdtable(unsigned int nr)
111 * Figure out how many fds we actually want to support in this fdtable.
112 * Allocation steps are keyed to the size of the fdarray, since it
113 * grows far faster than any of the other dynamic data. We try to fit
114 * the fdarray into comfortable page-tuned chunks: starting at 1024B
115 * and growing in powers of two from there on.
117 nr /= (1024 / sizeof(struct file *));
118 nr = roundup_pow_of_two(nr + 1);
119 nr *= (1024 / sizeof(struct file *));
120 nr = ALIGN(nr, BITS_PER_LONG);
122 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
123 * had been set lower between the check in expand_files() and here. Deal
124 * with that in caller, it's cheaper that way.
126 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
127 * bitmaps handling below becomes unpleasant, to put it mildly...
129 if (unlikely(nr > sysctl_nr_open))
130 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
132 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
136 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
141 data = kvmalloc(max_t(size_t,
142 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
146 fdt->open_fds = data;
147 data += nr / BITS_PER_BYTE;
148 fdt->close_on_exec = data;
149 data += nr / BITS_PER_BYTE;
150 fdt->full_fds_bits = data;
163 * Expand the file descriptor table.
164 * This function will allocate a new fdtable and both fd array and fdset, of
166 * Return <0 error code on error; 1 on successful completion.
167 * The files->file_lock should be held on entry, and will be held on exit.
169 static int expand_fdtable(struct files_struct *files, unsigned int nr)
170 __releases(files->file_lock)
171 __acquires(files->file_lock)
173 struct fdtable *new_fdt, *cur_fdt;
175 spin_unlock(&files->file_lock);
176 new_fdt = alloc_fdtable(nr);
178 /* make sure all fd_install() have seen resize_in_progress
179 * or have finished their rcu_read_lock_sched() section.
181 if (atomic_read(&files->count) > 1)
184 spin_lock(&files->file_lock);
188 * extremely unlikely race - sysctl_nr_open decreased between the check in
189 * caller and alloc_fdtable(). Cheaper to catch it here...
191 if (unlikely(new_fdt->max_fds <= nr)) {
192 __free_fdtable(new_fdt);
195 cur_fdt = files_fdtable(files);
196 BUG_ON(nr < cur_fdt->max_fds);
197 copy_fdtable(new_fdt, cur_fdt);
198 rcu_assign_pointer(files->fdt, new_fdt);
199 if (cur_fdt != &files->fdtab)
200 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
201 /* coupled with smp_rmb() in fd_install() */
208 * This function will expand the file structures, if the requested size exceeds
209 * the current capacity and there is room for expansion.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
211 * expanded and execution may have blocked.
212 * The files->file_lock should be held on entry, and will be held on exit.
214 static int expand_files(struct files_struct *files, unsigned int nr)
215 __releases(files->file_lock)
216 __acquires(files->file_lock)
222 fdt = files_fdtable(files);
224 /* Do we need to expand? */
225 if (nr < fdt->max_fds)
229 if (nr >= sysctl_nr_open)
232 if (unlikely(files->resize_in_progress)) {
233 spin_unlock(&files->file_lock);
235 wait_event(files->resize_wait, !files->resize_in_progress);
236 spin_lock(&files->file_lock);
240 /* All good, so we try */
241 files->resize_in_progress = true;
242 expanded = expand_fdtable(files, nr);
243 files->resize_in_progress = false;
245 wake_up_all(&files->resize_wait);
249 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
251 __set_bit(fd, fdt->close_on_exec);
254 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
256 if (test_bit(fd, fdt->close_on_exec))
257 __clear_bit(fd, fdt->close_on_exec);
260 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
262 __set_bit(fd, fdt->open_fds);
264 if (!~fdt->open_fds[fd])
265 __set_bit(fd, fdt->full_fds_bits);
268 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
270 __clear_bit(fd, fdt->open_fds);
271 __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
274 static unsigned int count_open_files(struct fdtable *fdt)
276 unsigned int size = fdt->max_fds;
279 /* Find the last open fd */
280 for (i = size / BITS_PER_LONG; i > 0; ) {
281 if (fdt->open_fds[--i])
284 i = (i + 1) * BITS_PER_LONG;
289 * Note that a sane fdtable size always has to be a multiple of
290 * BITS_PER_LONG, since we have bitmaps that are sized by this.
292 * 'max_fds' will normally already be properly aligned, but it
293 * turns out that in the close_range() -> __close_range() ->
294 * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
295 * up having a 'max_fds' value that isn't already aligned.
297 * Rather than make close_range() have to worry about this,
298 * just make that BITS_PER_LONG alignment be part of a sane
299 * fdtable size. Becuase that's really what it is.
301 static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
305 count = count_open_files(fdt);
306 if (max_fds < NR_OPEN_DEFAULT)
307 max_fds = NR_OPEN_DEFAULT;
308 return ALIGN(min(count, max_fds), BITS_PER_LONG);
312 * Allocate a new files structure and copy contents from the
313 * passed in files structure.
314 * errorp will be valid only when the returned files_struct is NULL.
316 struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
318 struct files_struct *newf;
319 struct file **old_fds, **new_fds;
320 unsigned int open_files, i;
321 struct fdtable *old_fdt, *new_fdt;
324 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
328 atomic_set(&newf->count, 1);
330 spin_lock_init(&newf->file_lock);
331 newf->resize_in_progress = false;
332 init_waitqueue_head(&newf->resize_wait);
334 new_fdt = &newf->fdtab;
335 new_fdt->max_fds = NR_OPEN_DEFAULT;
336 new_fdt->close_on_exec = newf->close_on_exec_init;
337 new_fdt->open_fds = newf->open_fds_init;
338 new_fdt->full_fds_bits = newf->full_fds_bits_init;
339 new_fdt->fd = &newf->fd_array[0];
341 spin_lock(&oldf->file_lock);
342 old_fdt = files_fdtable(oldf);
343 open_files = sane_fdtable_size(old_fdt, max_fds);
346 * Check whether we need to allocate a larger fd array and fd set.
348 while (unlikely(open_files > new_fdt->max_fds)) {
349 spin_unlock(&oldf->file_lock);
351 if (new_fdt != &newf->fdtab)
352 __free_fdtable(new_fdt);
354 new_fdt = alloc_fdtable(open_files - 1);
360 /* beyond sysctl_nr_open; nothing to do */
361 if (unlikely(new_fdt->max_fds < open_files)) {
362 __free_fdtable(new_fdt);
368 * Reacquire the oldf lock and a pointer to its fd table
369 * who knows it may have a new bigger fd table. We need
370 * the latest pointer.
372 spin_lock(&oldf->file_lock);
373 old_fdt = files_fdtable(oldf);
374 open_files = sane_fdtable_size(old_fdt, max_fds);
377 copy_fd_bitmaps(new_fdt, old_fdt, open_files);
379 old_fds = old_fdt->fd;
380 new_fds = new_fdt->fd;
382 for (i = open_files; i != 0; i--) {
383 struct file *f = *old_fds++;
388 * The fd may be claimed in the fd bitmap but not yet
389 * instantiated in the files array if a sibling thread
390 * is partway through open(). So make sure that this
391 * fd is available to the new process.
393 __clear_open_fd(open_files - i, new_fdt);
395 rcu_assign_pointer(*new_fds++, f);
397 spin_unlock(&oldf->file_lock);
399 /* clear the remainder */
400 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
402 rcu_assign_pointer(newf->fdt, new_fdt);
407 kmem_cache_free(files_cachep, newf);
412 static struct fdtable *close_files(struct files_struct * files)
415 * It is safe to dereference the fd table without RCU or
416 * ->file_lock because this is the last reference to the
419 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
420 unsigned int i, j = 0;
424 i = j * BITS_PER_LONG;
425 if (i >= fdt->max_fds)
427 set = fdt->open_fds[j++];
430 struct file * file = xchg(&fdt->fd[i], NULL);
432 filp_close(file, files);
444 void put_files_struct(struct files_struct *files)
446 if (atomic_dec_and_test(&files->count)) {
447 struct fdtable *fdt = close_files(files);
449 /* free the arrays if they are not embedded */
450 if (fdt != &files->fdtab)
452 kmem_cache_free(files_cachep, files);
456 void exit_files(struct task_struct *tsk)
458 struct files_struct * files = tsk->files;
464 put_files_struct(files);
468 struct files_struct init_files = {
469 .count = ATOMIC_INIT(1),
470 .fdt = &init_files.fdtab,
472 .max_fds = NR_OPEN_DEFAULT,
473 .fd = &init_files.fd_array[0],
474 .close_on_exec = init_files.close_on_exec_init,
475 .open_fds = init_files.open_fds_init,
476 .full_fds_bits = init_files.full_fds_bits_init,
478 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
479 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
482 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
484 unsigned int maxfd = fdt->max_fds;
485 unsigned int maxbit = maxfd / BITS_PER_LONG;
486 unsigned int bitbit = start / BITS_PER_LONG;
488 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
493 return find_next_zero_bit(fdt->open_fds, maxfd, start);
497 * allocate a file descriptor, mark it busy.
499 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
501 struct files_struct *files = current->files;
506 spin_lock(&files->file_lock);
508 fdt = files_fdtable(files);
510 if (fd < files->next_fd)
513 if (fd < fdt->max_fds)
514 fd = find_next_fd(fdt, fd);
517 * N.B. For clone tasks sharing a files structure, this test
518 * will limit the total number of files that can be opened.
524 error = expand_files(files, fd);
529 * If we needed to expand the fs array we
530 * might have blocked - try again.
535 if (start <= files->next_fd)
536 files->next_fd = fd + 1;
538 __set_open_fd(fd, fdt);
539 if (flags & O_CLOEXEC)
540 __set_close_on_exec(fd, fdt);
542 __clear_close_on_exec(fd, fdt);
546 if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
547 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
548 rcu_assign_pointer(fdt->fd[fd], NULL);
553 spin_unlock(&files->file_lock);
557 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
559 return alloc_fd(0, nofile, flags);
562 int get_unused_fd_flags(unsigned flags)
564 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
566 EXPORT_SYMBOL(get_unused_fd_flags);
568 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
570 struct fdtable *fdt = files_fdtable(files);
571 __clear_open_fd(fd, fdt);
572 if (fd < files->next_fd)
576 void put_unused_fd(unsigned int fd)
578 struct files_struct *files = current->files;
579 spin_lock(&files->file_lock);
580 __put_unused_fd(files, fd);
581 spin_unlock(&files->file_lock);
584 EXPORT_SYMBOL(put_unused_fd);
587 * Install a file pointer in the fd array.
589 * The VFS is full of places where we drop the files lock between
590 * setting the open_fds bitmap and installing the file in the file
591 * array. At any such point, we are vulnerable to a dup2() race
592 * installing a file in the array before us. We need to detect this and
593 * fput() the struct file we are about to overwrite in this case.
595 * It should never happen - if we allow dup2() do it, _really_ bad things
598 * This consumes the "file" refcount, so callers should treat it
599 * as if they had called fput(file).
602 void fd_install(unsigned int fd, struct file *file)
604 struct files_struct *files = current->files;
607 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
610 rcu_read_lock_sched();
612 if (unlikely(files->resize_in_progress)) {
613 rcu_read_unlock_sched();
614 spin_lock(&files->file_lock);
615 fdt = files_fdtable(files);
616 BUG_ON(fdt->fd[fd] != NULL);
617 rcu_assign_pointer(fdt->fd[fd], file);
618 spin_unlock(&files->file_lock);
621 /* coupled with smp_wmb() in expand_fdtable() */
623 fdt = rcu_dereference_sched(files->fdt);
624 BUG_ON(fdt->fd[fd] != NULL);
625 rcu_assign_pointer(fdt->fd[fd], file);
626 rcu_read_unlock_sched();
629 EXPORT_SYMBOL(fd_install);
632 * file_close_fd_locked - return file associated with fd
633 * @files: file struct to retrieve file from
634 * @fd: file descriptor to retrieve file for
636 * Doesn't take a separate reference count.
638 * Context: files_lock must be held.
640 * Returns: The file associated with @fd (NULL if @fd is not open)
642 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
644 struct fdtable *fdt = files_fdtable(files);
647 lockdep_assert_held(&files->file_lock);
649 if (fd >= fdt->max_fds)
652 fd = array_index_nospec(fd, fdt->max_fds);
655 rcu_assign_pointer(fdt->fd[fd], NULL);
656 __put_unused_fd(files, fd);
661 int close_fd(unsigned fd)
663 struct files_struct *files = current->files;
666 spin_lock(&files->file_lock);
667 file = file_close_fd_locked(files, fd);
668 spin_unlock(&files->file_lock);
672 return filp_close(file, files);
674 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
677 * last_fd - return last valid index into fd table
678 * @fdt: File descriptor table.
680 * Context: Either rcu read lock or files_lock must be held.
682 * Returns: Last valid index into fdtable.
684 static inline unsigned last_fd(struct fdtable *fdt)
686 return fdt->max_fds - 1;
689 static inline void __range_cloexec(struct files_struct *cur_fds,
690 unsigned int fd, unsigned int max_fd)
694 /* make sure we're using the correct maximum value */
695 spin_lock(&cur_fds->file_lock);
696 fdt = files_fdtable(cur_fds);
697 max_fd = min(last_fd(fdt), max_fd);
699 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
700 spin_unlock(&cur_fds->file_lock);
703 static inline void __range_close(struct files_struct *files, unsigned int fd,
709 spin_lock(&files->file_lock);
710 n = last_fd(files_fdtable(files));
711 max_fd = min(max_fd, n);
713 for (; fd <= max_fd; fd++) {
714 file = file_close_fd_locked(files, fd);
716 spin_unlock(&files->file_lock);
717 filp_close(file, files);
719 spin_lock(&files->file_lock);
720 } else if (need_resched()) {
721 spin_unlock(&files->file_lock);
723 spin_lock(&files->file_lock);
726 spin_unlock(&files->file_lock);
730 * __close_range() - Close all file descriptors in a given range.
732 * @fd: starting file descriptor to close
733 * @max_fd: last file descriptor to close
734 * @flags: CLOSE_RANGE flags.
736 * This closes a range of file descriptors. All file descriptors
737 * from @fd up to and including @max_fd are closed.
739 int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
741 struct task_struct *me = current;
742 struct files_struct *cur_fds = me->files, *fds = NULL;
744 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
750 if (flags & CLOSE_RANGE_UNSHARE) {
752 unsigned int max_unshare_fds = NR_OPEN_MAX;
755 * If the caller requested all fds to be made cloexec we always
756 * copy all of the file descriptors since they still want to
759 if (!(flags & CLOSE_RANGE_CLOEXEC)) {
761 * If the requested range is greater than the current
762 * maximum, we're closing everything so only copy all
763 * file descriptors beneath the lowest file descriptor.
766 if (max_fd >= last_fd(files_fdtable(cur_fds)))
767 max_unshare_fds = fd;
771 ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
776 * We used to share our file descriptor table, and have now
777 * created a private one, make sure we're using it below.
783 if (flags & CLOSE_RANGE_CLOEXEC)
784 __range_cloexec(cur_fds, fd, max_fd);
786 __range_close(cur_fds, fd, max_fd);
790 * We're done closing the files we were supposed to. Time to install
791 * the new file descriptor table and drop the old one.
796 put_files_struct(fds);
803 * file_close_fd - return file associated with fd
804 * @fd: file descriptor to retrieve file for
806 * Doesn't take a separate reference count.
808 * Returns: The file associated with @fd (NULL if @fd is not open)
810 struct file *file_close_fd(unsigned int fd)
812 struct files_struct *files = current->files;
815 spin_lock(&files->file_lock);
816 file = file_close_fd_locked(files, fd);
817 spin_unlock(&files->file_lock);
822 void do_close_on_exec(struct files_struct *files)
827 /* exec unshares first */
828 spin_lock(&files->file_lock);
831 unsigned fd = i * BITS_PER_LONG;
832 fdt = files_fdtable(files);
833 if (fd >= fdt->max_fds)
835 set = fdt->close_on_exec[i];
838 fdt->close_on_exec[i] = 0;
839 for ( ; set ; fd++, set >>= 1) {
846 rcu_assign_pointer(fdt->fd[fd], NULL);
847 __put_unused_fd(files, fd);
848 spin_unlock(&files->file_lock);
849 filp_close(file, files);
851 spin_lock(&files->file_lock);
855 spin_unlock(&files->file_lock);
858 static struct file *__get_file_rcu(struct file __rcu **f)
860 struct file __rcu *file;
861 struct file __rcu *file_reloaded;
862 struct file __rcu *file_reloaded_cmp;
864 file = rcu_dereference_raw(*f);
868 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
869 return ERR_PTR(-EAGAIN);
871 file_reloaded = rcu_dereference_raw(*f);
874 * Ensure that all accesses have a dependency on the load from
875 * rcu_dereference_raw() above so we get correct ordering
876 * between reuse/allocation and the pointer check below.
878 file_reloaded_cmp = file_reloaded;
879 OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
882 * atomic_long_inc_not_zero() above provided a full memory
883 * barrier when we acquired a reference.
885 * This is paired with the write barrier from assigning to the
886 * __rcu protected file pointer so that if that pointer still
887 * matches the current file, we know we have successfully
888 * acquired a reference to the right file.
890 * If the pointers don't match the file has been reallocated by
891 * SLAB_TYPESAFE_BY_RCU.
893 if (file == file_reloaded_cmp)
894 return file_reloaded;
897 return ERR_PTR(-EAGAIN);
901 * get_file_rcu - try go get a reference to a file under rcu
902 * @f: the file to get a reference on
904 * This function tries to get a reference on @f carefully verifying that
905 * @f hasn't been reused.
907 * This function should rarely have to be used and only by users who
908 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
910 * Return: Returns @f with the reference count increased or NULL.
912 struct file *get_file_rcu(struct file __rcu **f)
915 struct file __rcu *file;
917 file = __get_file_rcu(f);
921 if (unlikely(IS_ERR(file)))
927 EXPORT_SYMBOL_GPL(get_file_rcu);
930 * get_file_active - try go get a reference to a file
931 * @f: the file to get a reference on
933 * In contast to get_file_rcu() the pointer itself isn't part of the
934 * reference counting.
936 * This function should rarely have to be used and only by users who
937 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
939 * Return: Returns @f with the reference count increased or NULL.
941 struct file *get_file_active(struct file **f)
943 struct file __rcu *file;
946 file = __get_file_rcu(f);
952 EXPORT_SYMBOL_GPL(get_file_active);
954 static inline struct file *__fget_files_rcu(struct files_struct *files,
955 unsigned int fd, fmode_t mask)
959 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
960 struct file __rcu **fdentry;
961 unsigned long nospec_mask;
963 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
964 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
967 * fdentry points to the 'fd' offset, or fdt->fd[0].
968 * Loading from fdt->fd[0] is always safe, because the
969 * array always exists.
971 fdentry = fdt->fd + (fd & nospec_mask);
973 /* Do the load, then mask any invalid result */
974 file = rcu_dereference_raw(*fdentry);
975 file = (void *)(nospec_mask & (unsigned long)file);
980 * Ok, we have a file pointer that was valid at
981 * some point, but it might have become stale since.
983 * We need to confirm it by incrementing the refcount
984 * and then check the lookup again.
986 * atomic_long_inc_not_zero() gives us a full memory
987 * barrier. We only really need an 'acquire' one to
988 * protect the loads below, but we don't have that.
990 if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
994 * Such a race can take two forms:
996 * (a) the file ref already went down to zero and the
997 * file hasn't been reused yet or the file count
998 * isn't zero but the file has already been reused.
1000 * (b) the file table entry has changed under us.
1001 * Note that we don't need to re-check the 'fdt->fd'
1002 * pointer having changed, because it always goes
1003 * hand-in-hand with 'fdt'.
1005 * If so, we need to put our ref and try again.
1007 if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
1008 unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
1014 * This isn't the file we're looking for or we're not
1015 * allowed to get a reference to it.
1017 if (unlikely(file->f_mode & mask)) {
1023 * Ok, we have a ref to the file, and checked that it
1030 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1036 file = __fget_files_rcu(files, fd, mask);
1042 static inline struct file *__fget(unsigned int fd, fmode_t mask)
1044 return __fget_files(current->files, fd, mask);
1047 struct file *fget(unsigned int fd)
1049 return __fget(fd, FMODE_PATH);
1051 EXPORT_SYMBOL(fget);
1053 struct file *fget_raw(unsigned int fd)
1055 return __fget(fd, 0);
1057 EXPORT_SYMBOL(fget_raw);
1059 struct file *fget_task(struct task_struct *task, unsigned int fd)
1061 struct file *file = NULL;
1065 file = __fget_files(task->files, fd, 0);
1071 struct file *lookup_fdget_rcu(unsigned int fd)
1073 return __fget_files_rcu(current->files, fd, 0);
1076 EXPORT_SYMBOL_GPL(lookup_fdget_rcu);
1078 struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd)
1080 /* Must be called with rcu_read_lock held */
1081 struct files_struct *files;
1082 struct file *file = NULL;
1085 files = task->files;
1087 file = __fget_files_rcu(files, fd, 0);
1093 struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd)
1095 /* Must be called with rcu_read_lock held */
1096 struct files_struct *files;
1097 unsigned int fd = *ret_fd;
1098 struct file *file = NULL;
1101 files = task->files;
1103 for (; fd < files_fdtable(files)->max_fds; fd++) {
1104 file = __fget_files_rcu(files, fd, 0);
1113 EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
1116 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1118 * You can use this instead of fget if you satisfy all of the following
1120 * 1) You must call fput_light before exiting the syscall and returning control
1121 * to userspace (i.e. you cannot remember the returned struct file * after
1122 * returning to userspace).
1123 * 2) You must not call filp_close on the returned struct file * in between
1124 * calls to fget_light and fput_light.
1125 * 3) You must not clone the current task in between the calls to fget_light
1128 * The fput_needed flag returned by fget_light should be passed to the
1129 * corresponding fput_light.
1131 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
1133 struct files_struct *files = current->files;
1137 * If another thread is concurrently calling close_fd() followed
1138 * by put_files_struct(), we must not observe the old table
1139 * entry combined with the new refcount - otherwise we could
1140 * return a file that is concurrently being freed.
1142 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1143 * put_files_struct().
1145 if (likely(atomic_read_acquire(&files->count) == 1)) {
1146 file = files_lookup_fd_raw(files, fd);
1147 if (!file || unlikely(file->f_mode & mask))
1149 return (unsigned long)file;
1151 file = __fget_files(files, fd, mask);
1154 return FDPUT_FPUT | (unsigned long)file;
1157 unsigned long __fdget(unsigned int fd)
1159 return __fget_light(fd, FMODE_PATH);
1161 EXPORT_SYMBOL(__fdget);
1163 unsigned long __fdget_raw(unsigned int fd)
1165 return __fget_light(fd, 0);
1169 * Try to avoid f_pos locking. We only need it if the
1170 * file is marked for FMODE_ATOMIC_POS, and it can be
1171 * accessed multiple ways.
1173 * Always do it for directories, because pidfd_getfd()
1174 * can make a file accessible even if it otherwise would
1175 * not be, and for directories this is a correctness
1176 * issue, not a "POSIX requirement".
1178 static inline bool file_needs_f_pos_lock(struct file *file)
1180 return (file->f_mode & FMODE_ATOMIC_POS) &&
1181 (file_count(file) > 1 || file->f_op->iterate_shared);
1184 unsigned long __fdget_pos(unsigned int fd)
1186 unsigned long v = __fdget(fd);
1187 struct file *file = (struct file *)(v & ~3);
1189 if (file && file_needs_f_pos_lock(file)) {
1190 v |= FDPUT_POS_UNLOCK;
1191 mutex_lock(&file->f_pos_lock);
1196 void __f_unlock_pos(struct file *f)
1198 mutex_unlock(&f->f_pos_lock);
1202 * We only lock f_pos if we have threads or if the file might be
1203 * shared with another process. In both cases we'll have an elevated
1204 * file count (done either by fdget() or by fork()).
1207 void set_close_on_exec(unsigned int fd, int flag)
1209 struct files_struct *files = current->files;
1210 struct fdtable *fdt;
1211 spin_lock(&files->file_lock);
1212 fdt = files_fdtable(files);
1214 __set_close_on_exec(fd, fdt);
1216 __clear_close_on_exec(fd, fdt);
1217 spin_unlock(&files->file_lock);
1220 bool get_close_on_exec(unsigned int fd)
1222 struct files_struct *files = current->files;
1223 struct fdtable *fdt;
1226 fdt = files_fdtable(files);
1227 res = close_on_exec(fd, fdt);
1232 static int do_dup2(struct files_struct *files,
1233 struct file *file, unsigned fd, unsigned flags)
1234 __releases(&files->file_lock)
1236 struct file *tofree;
1237 struct fdtable *fdt;
1240 * We need to detect attempts to do dup2() over allocated but still
1241 * not finished descriptor. NB: OpenBSD avoids that at the price of
1242 * extra work in their equivalent of fget() - they insert struct
1243 * file immediately after grabbing descriptor, mark it larval if
1244 * more work (e.g. actual opening) is needed and make sure that
1245 * fget() treats larval files as absent. Potentially interesting,
1246 * but while extra work in fget() is trivial, locking implications
1247 * and amount of surgery on open()-related paths in VFS are not.
1248 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1249 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1250 * scope of POSIX or SUS, since neither considers shared descriptor
1251 * tables and this condition does not arise without those.
1253 fdt = files_fdtable(files);
1254 tofree = fdt->fd[fd];
1255 if (!tofree && fd_is_open(fd, fdt))
1258 rcu_assign_pointer(fdt->fd[fd], file);
1259 __set_open_fd(fd, fdt);
1260 if (flags & O_CLOEXEC)
1261 __set_close_on_exec(fd, fdt);
1263 __clear_close_on_exec(fd, fdt);
1264 spin_unlock(&files->file_lock);
1267 filp_close(tofree, files);
1272 spin_unlock(&files->file_lock);
1276 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1279 struct files_struct *files = current->files;
1282 return close_fd(fd);
1284 if (fd >= rlimit(RLIMIT_NOFILE))
1287 spin_lock(&files->file_lock);
1288 err = expand_files(files, fd);
1289 if (unlikely(err < 0))
1291 return do_dup2(files, file, fd, flags);
1294 spin_unlock(&files->file_lock);
1299 * receive_fd() - Install received file into file descriptor table
1300 * @file: struct file that was received from another process
1301 * @ufd: __user pointer to write new fd number to
1302 * @o_flags: the O_* flags to apply to the new fd entry
1304 * Installs a received file into the file descriptor table, with appropriate
1305 * checks and count updates. Optionally writes the fd number to userspace, if
1308 * This helper handles its own reference counting of the incoming
1311 * Returns newly install fd or -ve on error.
1313 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1318 error = security_file_receive(file);
1322 new_fd = get_unused_fd_flags(o_flags);
1327 error = put_user(new_fd, ufd);
1329 put_unused_fd(new_fd);
1334 fd_install(new_fd, get_file(file));
1335 __receive_sock(file);
1338 EXPORT_SYMBOL_GPL(receive_fd);
1340 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1344 error = security_file_receive(file);
1347 error = replace_fd(new_fd, file, o_flags);
1350 __receive_sock(file);
1354 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1358 struct files_struct *files = current->files;
1360 if ((flags & ~O_CLOEXEC) != 0)
1363 if (unlikely(oldfd == newfd))
1366 if (newfd >= rlimit(RLIMIT_NOFILE))
1369 spin_lock(&files->file_lock);
1370 err = expand_files(files, newfd);
1371 file = files_lookup_fd_locked(files, oldfd);
1372 if (unlikely(!file))
1374 if (unlikely(err < 0)) {
1379 return do_dup2(files, file, newfd, flags);
1384 spin_unlock(&files->file_lock);
1388 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1390 return ksys_dup3(oldfd, newfd, flags);
1393 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1395 if (unlikely(newfd == oldfd)) { /* corner case */
1396 struct files_struct *files = current->files;
1401 f = __fget_files_rcu(files, oldfd, 0);
1409 return ksys_dup3(oldfd, newfd, 0);
1412 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1415 struct file *file = fget_raw(fildes);
1418 ret = get_unused_fd_flags(0);
1420 fd_install(ret, file);
1427 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1429 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1433 err = alloc_fd(from, nofile, flags);
1436 fd_install(err, file);
1441 int iterate_fd(struct files_struct *files, unsigned n,
1442 int (*f)(const void *, struct file *, unsigned),
1445 struct fdtable *fdt;
1449 spin_lock(&files->file_lock);
1450 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1452 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1455 res = f(p, file, n);
1459 spin_unlock(&files->file_lock);
1462 EXPORT_SYMBOL(iterate_fd);