]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/file_table.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 1997 David S. Miller ([email protected]) | |
6 | */ | |
7 | ||
8 | #include <linux/string.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/file.h> | |
11 | #include <linux/fdtable.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/security.h> | |
16 | #include <linux/eventpoll.h> | |
17 | #include <linux/rcupdate.h> | |
18 | #include <linux/mount.h> | |
19 | #include <linux/capability.h> | |
20 | #include <linux/cdev.h> | |
21 | #include <linux/fsnotify.h> | |
22 | #include <linux/sysctl.h> | |
23 | #include <linux/lglock.h> | |
24 | #include <linux/percpu_counter.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/hardirq.h> | |
27 | #include <linux/task_work.h> | |
28 | #include <linux/ima.h> | |
29 | ||
30 | #include <linux/atomic.h> | |
31 | ||
32 | #include "internal.h" | |
33 | ||
34 | /* sysctl tunables... */ | |
35 | struct files_stat_struct files_stat = { | |
36 | .max_files = NR_FILE | |
37 | }; | |
38 | ||
39 | /* SLAB cache for file structures */ | |
40 | static struct kmem_cache *filp_cachep __read_mostly; | |
41 | ||
42 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; | |
43 | ||
44 | static void file_free_rcu(struct rcu_head *head) | |
45 | { | |
46 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); | |
47 | ||
48 | put_cred(f->f_cred); | |
49 | kmem_cache_free(filp_cachep, f); | |
50 | } | |
51 | ||
52 | static inline void file_free(struct file *f) | |
53 | { | |
54 | percpu_counter_dec(&nr_files); | |
55 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); | |
56 | } | |
57 | ||
58 | /* | |
59 | * Return the total number of open files in the system | |
60 | */ | |
61 | static long get_nr_files(void) | |
62 | { | |
63 | return percpu_counter_read_positive(&nr_files); | |
64 | } | |
65 | ||
66 | /* | |
67 | * Return the maximum number of open files in the system | |
68 | */ | |
69 | unsigned long get_max_files(void) | |
70 | { | |
71 | return files_stat.max_files; | |
72 | } | |
73 | EXPORT_SYMBOL_GPL(get_max_files); | |
74 | ||
75 | /* | |
76 | * Handle nr_files sysctl | |
77 | */ | |
78 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | |
79 | int proc_nr_files(struct ctl_table *table, int write, | |
80 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
81 | { | |
82 | files_stat.nr_files = get_nr_files(); | |
83 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
84 | } | |
85 | #else | |
86 | int proc_nr_files(struct ctl_table *table, int write, | |
87 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
88 | { | |
89 | return -ENOSYS; | |
90 | } | |
91 | #endif | |
92 | ||
93 | /* Find an unused file structure and return a pointer to it. | |
94 | * Returns an error pointer if some error happend e.g. we over file | |
95 | * structures limit, run out of memory or operation is not permitted. | |
96 | * | |
97 | * Be very careful using this. You are responsible for | |
98 | * getting write access to any mount that you might assign | |
99 | * to this filp, if it is opened for write. If this is not | |
100 | * done, you will imbalance int the mount's writer count | |
101 | * and a warning at __fput() time. | |
102 | */ | |
103 | struct file *get_empty_filp(void) | |
104 | { | |
105 | const struct cred *cred = current_cred(); | |
106 | static long old_max; | |
107 | struct file *f; | |
108 | int error; | |
109 | ||
110 | /* | |
111 | * Privileged users can go above max_files | |
112 | */ | |
113 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { | |
114 | /* | |
115 | * percpu_counters are inaccurate. Do an expensive check before | |
116 | * we go and fail. | |
117 | */ | |
118 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) | |
119 | goto over; | |
120 | } | |
121 | ||
122 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); | |
123 | if (unlikely(!f)) | |
124 | return ERR_PTR(-ENOMEM); | |
125 | ||
126 | percpu_counter_inc(&nr_files); | |
127 | f->f_cred = get_cred(cred); | |
128 | error = security_file_alloc(f); | |
129 | if (unlikely(error)) { | |
130 | file_free(f); | |
131 | return ERR_PTR(error); | |
132 | } | |
133 | ||
134 | atomic_long_set(&f->f_count, 1); | |
135 | rwlock_init(&f->f_owner.lock); | |
136 | spin_lock_init(&f->f_lock); | |
137 | mutex_init(&f->f_pos_lock); | |
138 | eventpoll_init_file(f); | |
139 | /* f->f_version: 0 */ | |
140 | return f; | |
141 | ||
142 | over: | |
143 | /* Ran out of filps - report that */ | |
144 | if (get_nr_files() > old_max) { | |
145 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); | |
146 | old_max = get_nr_files(); | |
147 | } | |
148 | return ERR_PTR(-ENFILE); | |
149 | } | |
150 | ||
151 | /** | |
152 | * alloc_file - allocate and initialize a 'struct file' | |
153 | * | |
154 | * @path: the (dentry, vfsmount) pair for the new file | |
155 | * @mode: the mode with which the new file will be opened | |
156 | * @fop: the 'struct file_operations' for the new file | |
157 | */ | |
158 | struct file *alloc_file(struct path *path, fmode_t mode, | |
159 | const struct file_operations *fop) | |
160 | { | |
161 | struct file *file; | |
162 | ||
163 | file = get_empty_filp(); | |
164 | if (IS_ERR(file)) | |
165 | return file; | |
166 | ||
167 | file->f_path = *path; | |
168 | file->f_inode = path->dentry->d_inode; | |
169 | file->f_mapping = path->dentry->d_inode->i_mapping; | |
170 | if ((mode & FMODE_READ) && | |
171 | likely(fop->read || fop->read_iter)) | |
172 | mode |= FMODE_CAN_READ; | |
173 | if ((mode & FMODE_WRITE) && | |
174 | likely(fop->write || fop->write_iter)) | |
175 | mode |= FMODE_CAN_WRITE; | |
176 | file->f_mode = mode; | |
177 | file->f_op = fop; | |
178 | if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | |
179 | i_readcount_inc(path->dentry->d_inode); | |
180 | return file; | |
181 | } | |
182 | EXPORT_SYMBOL(alloc_file); | |
183 | ||
184 | /* the real guts of fput() - releasing the last reference to file | |
185 | */ | |
186 | static void __fput(struct file *file) | |
187 | { | |
188 | struct dentry *dentry = file->f_path.dentry; | |
189 | struct vfsmount *mnt = file->f_path.mnt; | |
190 | struct inode *inode = file->f_inode; | |
191 | ||
192 | might_sleep(); | |
193 | ||
194 | fsnotify_close(file); | |
195 | /* | |
196 | * The function eventpoll_release() should be the first called | |
197 | * in the file cleanup chain. | |
198 | */ | |
199 | eventpoll_release(file); | |
200 | locks_remove_file(file); | |
201 | ||
202 | if (unlikely(file->f_flags & FASYNC)) { | |
203 | if (file->f_op->fasync) | |
204 | file->f_op->fasync(-1, file, 0); | |
205 | } | |
206 | ima_file_free(file); | |
207 | if (file->f_op->release) | |
208 | file->f_op->release(inode, file); | |
209 | security_file_free(file); | |
210 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && | |
211 | !(file->f_mode & FMODE_PATH))) { | |
212 | cdev_put(inode->i_cdev); | |
213 | } | |
214 | fops_put(file->f_op); | |
215 | put_pid(file->f_owner.pid); | |
216 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) | |
217 | i_readcount_dec(inode); | |
218 | if (file->f_mode & FMODE_WRITER) { | |
219 | put_write_access(inode); | |
220 | __mnt_drop_write(mnt); | |
221 | } | |
222 | file->f_path.dentry = NULL; | |
223 | file->f_path.mnt = NULL; | |
224 | file->f_inode = NULL; | |
225 | file_free(file); | |
226 | dput(dentry); | |
227 | mntput(mnt); | |
228 | } | |
229 | ||
230 | static LLIST_HEAD(delayed_fput_list); | |
231 | static void delayed_fput(struct work_struct *unused) | |
232 | { | |
233 | struct llist_node *node = llist_del_all(&delayed_fput_list); | |
234 | struct llist_node *next; | |
235 | ||
236 | for (; node; node = next) { | |
237 | next = llist_next(node); | |
238 | __fput(llist_entry(node, struct file, f_u.fu_llist)); | |
239 | } | |
240 | } | |
241 | ||
242 | static void ____fput(struct callback_head *work) | |
243 | { | |
244 | __fput(container_of(work, struct file, f_u.fu_rcuhead)); | |
245 | } | |
246 | ||
247 | /* | |
248 | * If kernel thread really needs to have the final fput() it has done | |
249 | * to complete, call this. The only user right now is the boot - we | |
250 | * *do* need to make sure our writes to binaries on initramfs has | |
251 | * not left us with opened struct file waiting for __fput() - execve() | |
252 | * won't work without that. Please, don't add more callers without | |
253 | * very good reasons; in particular, never call that with locks | |
254 | * held and never call that from a thread that might need to do | |
255 | * some work on any kind of umount. | |
256 | */ | |
257 | void flush_delayed_fput(void) | |
258 | { | |
259 | delayed_fput(NULL); | |
260 | } | |
261 | ||
262 | static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); | |
263 | ||
264 | void fput(struct file *file) | |
265 | { | |
266 | if (atomic_long_dec_and_test(&file->f_count)) { | |
267 | struct task_struct *task = current; | |
268 | ||
269 | if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { | |
270 | init_task_work(&file->f_u.fu_rcuhead, ____fput); | |
271 | if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) | |
272 | return; | |
273 | /* | |
274 | * After this task has run exit_task_work(), | |
275 | * task_work_add() will fail. Fall through to delayed | |
276 | * fput to avoid leaking *file. | |
277 | */ | |
278 | } | |
279 | ||
280 | if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) | |
281 | schedule_delayed_work(&delayed_fput_work, 1); | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * synchronous analog of fput(); for kernel threads that might be needed | |
287 | * in some umount() (and thus can't use flush_delayed_fput() without | |
288 | * risking deadlocks), need to wait for completion of __fput() and know | |
289 | * for this specific struct file it won't involve anything that would | |
290 | * need them. Use only if you really need it - at the very least, | |
291 | * don't blindly convert fput() by kernel thread to that. | |
292 | */ | |
293 | void __fput_sync(struct file *file) | |
294 | { | |
295 | if (atomic_long_dec_and_test(&file->f_count)) { | |
296 | struct task_struct *task = current; | |
297 | BUG_ON(!(task->flags & PF_KTHREAD)); | |
298 | __fput(file); | |
299 | } | |
300 | } | |
301 | ||
302 | EXPORT_SYMBOL(fput); | |
303 | ||
304 | void put_filp(struct file *file) | |
305 | { | |
306 | if (atomic_long_dec_and_test(&file->f_count)) { | |
307 | security_file_free(file); | |
308 | file_free(file); | |
309 | } | |
310 | } | |
311 | ||
312 | void __init files_init(unsigned long mempages) | |
313 | { | |
314 | unsigned long n; | |
315 | ||
316 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, | |
317 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | |
318 | ||
319 | /* | |
320 | * One file with associated inode and dcache is very roughly 1K. | |
321 | * Per default don't use more than 10% of our memory for files. | |
322 | */ | |
323 | ||
324 | n = (mempages * (PAGE_SIZE / 1024)) / 10; | |
325 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); | |
326 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); | |
327 | } |