]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/fs/file_table.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * Copyright (C) 1997 David S. Miller ([email protected]) | |
7 | */ | |
8 | ||
9 | #include <linux/string.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/file.h> | |
9f3acc31 | 12 | #include <linux/fdtable.h> |
1da177e4 LT |
13 | #include <linux/init.h> |
14 | #include <linux/module.h> | |
1da177e4 | 15 | #include <linux/fs.h> |
5970e15d | 16 | #include <linux/filelock.h> |
1da177e4 | 17 | #include <linux/security.h> |
5b825c3a | 18 | #include <linux/cred.h> |
1da177e4 | 19 | #include <linux/eventpoll.h> |
ab2af1f5 | 20 | #include <linux/rcupdate.h> |
1da177e4 | 21 | #include <linux/mount.h> |
16f7e0fe | 22 | #include <linux/capability.h> |
1da177e4 | 23 | #include <linux/cdev.h> |
0eeca283 | 24 | #include <linux/fsnotify.h> |
529bf6be DS |
25 | #include <linux/sysctl.h> |
26 | #include <linux/percpu_counter.h> | |
6416ccb7 | 27 | #include <linux/percpu.h> |
4a9d4b02 | 28 | #include <linux/task_work.h> |
0552f879 | 29 | #include <linux/ima.h> |
4248b0da | 30 | #include <linux/swap.h> |
a3580ac9 | 31 | #include <linux/kmemleak.h> |
529bf6be | 32 | |
60063497 | 33 | #include <linux/atomic.h> |
1da177e4 | 34 | |
e81e3f4d EP |
35 | #include "internal.h" |
36 | ||
1da177e4 | 37 | /* sysctl tunables... */ |
204d5a24 | 38 | static struct files_stat_struct files_stat = { |
1da177e4 LT |
39 | .max_files = NR_FILE |
40 | }; | |
41 | ||
b6b3fdea | 42 | /* SLAB cache for file structures */ |
68279f9c | 43 | static struct kmem_cache *filp_cachep __ro_after_init; |
b6b3fdea | 44 | |
529bf6be | 45 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; |
1da177e4 | 46 | |
def3ae83 | 47 | /* Container for backing file with optional user path */ |
62d53c4a AG |
48 | struct backing_file { |
49 | struct file file; | |
def3ae83 | 50 | struct path user_path; |
62d53c4a AG |
51 | }; |
52 | ||
53 | static inline struct backing_file *backing_file(struct file *f) | |
54 | { | |
55 | return container_of(f, struct backing_file, file); | |
56 | } | |
57 | ||
def3ae83 | 58 | struct path *backing_file_user_path(struct file *f) |
62d53c4a | 59 | { |
def3ae83 | 60 | return &backing_file(f)->user_path; |
62d53c4a | 61 | } |
def3ae83 | 62 | EXPORT_SYMBOL_GPL(backing_file_user_path); |
62d53c4a | 63 | |
529bf6be | 64 | static inline void file_free(struct file *f) |
1da177e4 | 65 | { |
e8cff84f | 66 | security_file_free(f); |
62d53c4a | 67 | if (likely(!(f->f_mode & FMODE_NOACCOUNT))) |
d3b1084d | 68 | percpu_counter_dec(&nr_files); |
0ede61d8 CB |
69 | put_cred(f->f_cred); |
70 | if (unlikely(f->f_mode & FMODE_BACKING)) { | |
def3ae83 | 71 | path_put(backing_file_user_path(f)); |
6cf41fcf | 72 | kfree(backing_file(f)); |
0ede61d8 CB |
73 | } else { |
74 | kmem_cache_free(filp_cachep, f); | |
75 | } | |
1da177e4 LT |
76 | } |
77 | ||
529bf6be DS |
78 | /* |
79 | * Return the total number of open files in the system | |
80 | */ | |
518de9b3 | 81 | static long get_nr_files(void) |
1da177e4 | 82 | { |
529bf6be | 83 | return percpu_counter_read_positive(&nr_files); |
1da177e4 LT |
84 | } |
85 | ||
529bf6be DS |
86 | /* |
87 | * Return the maximum number of open files in the system | |
88 | */ | |
518de9b3 | 89 | unsigned long get_max_files(void) |
ab2af1f5 | 90 | { |
529bf6be | 91 | return files_stat.max_files; |
ab2af1f5 | 92 | } |
529bf6be DS |
93 | EXPORT_SYMBOL_GPL(get_max_files); |
94 | ||
204d5a24 LC |
95 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
96 | ||
529bf6be DS |
97 | /* |
98 | * Handle nr_files sysctl | |
99 | */ | |
204d5a24 LC |
100 | static int proc_nr_files(struct ctl_table *table, int write, void *buffer, |
101 | size_t *lenp, loff_t *ppos) | |
529bf6be DS |
102 | { |
103 | files_stat.nr_files = get_nr_files(); | |
518de9b3 | 104 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
529bf6be | 105 | } |
204d5a24 LC |
106 | |
107 | static struct ctl_table fs_stat_sysctls[] = { | |
108 | { | |
109 | .procname = "file-nr", | |
110 | .data = &files_stat, | |
111 | .maxlen = sizeof(files_stat), | |
112 | .mode = 0444, | |
113 | .proc_handler = proc_nr_files, | |
114 | }, | |
115 | { | |
116 | .procname = "file-max", | |
117 | .data = &files_stat.max_files, | |
118 | .maxlen = sizeof(files_stat.max_files), | |
119 | .mode = 0644, | |
120 | .proc_handler = proc_doulongvec_minmax, | |
121 | .extra1 = SYSCTL_LONG_ZERO, | |
122 | .extra2 = SYSCTL_LONG_MAX, | |
123 | }, | |
124 | { | |
125 | .procname = "nr_open", | |
126 | .data = &sysctl_nr_open, | |
127 | .maxlen = sizeof(unsigned int), | |
128 | .mode = 0644, | |
129 | .proc_handler = proc_dointvec_minmax, | |
130 | .extra1 = &sysctl_nr_open_min, | |
131 | .extra2 = &sysctl_nr_open_max, | |
132 | }, | |
133 | { } | |
134 | }; | |
135 | ||
136 | static int __init init_fs_stat_sysctls(void) | |
529bf6be | 137 | { |
204d5a24 | 138 | register_sysctl_init("fs", fs_stat_sysctls); |
a3580ac9 LC |
139 | if (IS_ENABLED(CONFIG_BINFMT_MISC)) { |
140 | struct ctl_table_header *hdr; | |
141 | hdr = register_sysctl_mount_point("fs/binfmt_misc"); | |
142 | kmemleak_not_leak(hdr); | |
143 | } | |
204d5a24 | 144 | return 0; |
529bf6be | 145 | } |
204d5a24 | 146 | fs_initcall(init_fs_stat_sysctls); |
529bf6be | 147 | #endif |
ab2af1f5 | 148 | |
8a05a8c3 | 149 | static int init_file(struct file *f, int flags, const struct cred *cred) |
d3b1084d | 150 | { |
d3b1084d MS |
151 | int error; |
152 | ||
d3b1084d MS |
153 | f->f_cred = get_cred(cred); |
154 | error = security_file_alloc(f); | |
155 | if (unlikely(error)) { | |
dff745c1 | 156 | put_cred(f->f_cred); |
8a05a8c3 | 157 | return error; |
d3b1084d MS |
158 | } |
159 | ||
d3b1084d MS |
160 | rwlock_init(&f->f_owner.lock); |
161 | spin_lock_init(&f->f_lock); | |
162 | mutex_init(&f->f_pos_lock); | |
d3b1084d MS |
163 | f->f_flags = flags; |
164 | f->f_mode = OPEN_FMODE(flags); | |
165 | /* f->f_version: 0 */ | |
166 | ||
0ede61d8 CB |
167 | /* |
168 | * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While | |
169 | * fget-rcu pattern users need to be able to handle spurious | |
170 | * refcount bumps we should reinitialize the reused file first. | |
171 | */ | |
172 | atomic_long_set(&f->f_count, 1); | |
8a05a8c3 | 173 | return 0; |
d3b1084d MS |
174 | } |
175 | ||
1da177e4 | 176 | /* Find an unused file structure and return a pointer to it. |
1afc99be AV |
177 | * Returns an error pointer if some error happend e.g. we over file |
178 | * structures limit, run out of memory or operation is not permitted. | |
430e285e DH |
179 | * |
180 | * Be very careful using this. You are responsible for | |
181 | * getting write access to any mount that you might assign | |
182 | * to this filp, if it is opened for write. If this is not | |
183 | * done, you will imbalance int the mount's writer count | |
184 | * and a warning at __fput() time. | |
1da177e4 | 185 | */ |
ea73ea72 | 186 | struct file *alloc_empty_file(int flags, const struct cred *cred) |
1da177e4 | 187 | { |
518de9b3 | 188 | static long old_max; |
1afc99be | 189 | struct file *f; |
8a05a8c3 | 190 | int error; |
1da177e4 LT |
191 | |
192 | /* | |
193 | * Privileged users can go above max_files | |
194 | */ | |
529bf6be DS |
195 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { |
196 | /* | |
197 | * percpu_counters are inaccurate. Do an expensive check before | |
198 | * we go and fail. | |
199 | */ | |
52d9f3b4 | 200 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) |
529bf6be DS |
201 | goto over; |
202 | } | |
af4d2ecb | 203 | |
8a05a8c3 AG |
204 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); |
205 | if (unlikely(!f)) | |
206 | return ERR_PTR(-ENOMEM); | |
207 | ||
208 | error = init_file(f, flags, cred); | |
dff745c1 AG |
209 | if (unlikely(error)) { |
210 | kmem_cache_free(filp_cachep, f); | |
8a05a8c3 | 211 | return ERR_PTR(error); |
dff745c1 | 212 | } |
8a05a8c3 AG |
213 | |
214 | percpu_counter_inc(&nr_files); | |
1da177e4 | 215 | |
af4d2ecb KK |
216 | return f; |
217 | ||
218 | over: | |
1da177e4 | 219 | /* Ran out of filps - report that */ |
529bf6be | 220 | if (get_nr_files() > old_max) { |
518de9b3 | 221 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); |
529bf6be | 222 | old_max = get_nr_files(); |
1da177e4 | 223 | } |
1afc99be | 224 | return ERR_PTR(-ENFILE); |
1da177e4 LT |
225 | } |
226 | ||
d3b1084d MS |
227 | /* |
228 | * Variant of alloc_empty_file() that doesn't check and modify nr_files. | |
229 | * | |
8a05a8c3 AG |
230 | * This is only for kernel internal use, and the allocate file must not be |
231 | * installed into file tables or such. | |
d3b1084d MS |
232 | */ |
233 | struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred) | |
234 | { | |
8a05a8c3 AG |
235 | struct file *f; |
236 | int error; | |
237 | ||
238 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); | |
239 | if (unlikely(!f)) | |
240 | return ERR_PTR(-ENOMEM); | |
241 | ||
242 | error = init_file(f, flags, cred); | |
dff745c1 AG |
243 | if (unlikely(error)) { |
244 | kmem_cache_free(filp_cachep, f); | |
8a05a8c3 | 245 | return ERR_PTR(error); |
dff745c1 | 246 | } |
d3b1084d | 247 | |
8a05a8c3 | 248 | f->f_mode |= FMODE_NOACCOUNT; |
d3b1084d MS |
249 | |
250 | return f; | |
251 | } | |
252 | ||
62d53c4a AG |
253 | /* |
254 | * Variant of alloc_empty_file() that allocates a backing_file container | |
255 | * and doesn't check and modify nr_files. | |
256 | * | |
257 | * This is only for kernel internal use, and the allocate file must not be | |
258 | * installed into file tables or such. | |
259 | */ | |
260 | struct file *alloc_empty_backing_file(int flags, const struct cred *cred) | |
261 | { | |
262 | struct backing_file *ff; | |
263 | int error; | |
264 | ||
265 | ff = kzalloc(sizeof(struct backing_file), GFP_KERNEL); | |
266 | if (unlikely(!ff)) | |
267 | return ERR_PTR(-ENOMEM); | |
268 | ||
269 | error = init_file(&ff->file, flags, cred); | |
dff745c1 AG |
270 | if (unlikely(error)) { |
271 | kfree(ff); | |
62d53c4a | 272 | return ERR_PTR(error); |
dff745c1 | 273 | } |
62d53c4a AG |
274 | |
275 | ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT; | |
276 | return &ff->file; | |
277 | } | |
278 | ||
ce8d2cdf DH |
279 | /** |
280 | * alloc_file - allocate and initialize a 'struct file' | |
a457606a EB |
281 | * |
282 | * @path: the (dentry, vfsmount) pair for the new file | |
c9c554f2 | 283 | * @flags: O_... flags with which the new file will be opened |
ce8d2cdf | 284 | * @fop: the 'struct file_operations' for the new file |
ce8d2cdf | 285 | */ |
ee1904ba | 286 | static struct file *alloc_file(const struct path *path, int flags, |
2c48b9c4 | 287 | const struct file_operations *fop) |
ce8d2cdf DH |
288 | { |
289 | struct file *file; | |
ce8d2cdf | 290 | |
ea73ea72 | 291 | file = alloc_empty_file(flags, current_cred()); |
1afc99be | 292 | if (IS_ERR(file)) |
39b65252 | 293 | return file; |
ce8d2cdf | 294 | |
2c48b9c4 | 295 | file->f_path = *path; |
dd37978c | 296 | file->f_inode = path->dentry->d_inode; |
2c48b9c4 | 297 | file->f_mapping = path->dentry->d_inode->i_mapping; |
5660e13d | 298 | file->f_wb_err = filemap_sample_wb_err(file->f_mapping); |
735e4ae5 | 299 | file->f_sb_err = file_sample_sb_err(file); |
868941b1 | 300 | if (fop->llseek) |
e7478158 | 301 | file->f_mode |= FMODE_LSEEK; |
c9c554f2 | 302 | if ((file->f_mode & FMODE_READ) && |
84363182 | 303 | likely(fop->read || fop->read_iter)) |
c9c554f2 AV |
304 | file->f_mode |= FMODE_CAN_READ; |
305 | if ((file->f_mode & FMODE_WRITE) && | |
84363182 | 306 | likely(fop->write || fop->write_iter)) |
c9c554f2 | 307 | file->f_mode |= FMODE_CAN_WRITE; |
164f4064 | 308 | file->f_iocb_flags = iocb_flags(file); |
f5d11409 | 309 | file->f_mode |= FMODE_OPENED; |
ce8d2cdf | 310 | file->f_op = fop; |
c9c554f2 | 311 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) |
890275b5 | 312 | i_readcount_inc(path->dentry->d_inode); |
3d1e4631 | 313 | return file; |
ce8d2cdf | 314 | } |
ce8d2cdf | 315 | |
d93aa9d8 AV |
316 | struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, |
317 | const char *name, int flags, | |
318 | const struct file_operations *fops) | |
319 | { | |
320 | static const struct dentry_operations anon_ops = { | |
321 | .d_dname = simple_dname | |
322 | }; | |
323 | struct qstr this = QSTR_INIT(name, strlen(name)); | |
324 | struct path path; | |
325 | struct file *file; | |
326 | ||
327 | path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this); | |
328 | if (!path.dentry) | |
329 | return ERR_PTR(-ENOMEM); | |
330 | if (!mnt->mnt_sb->s_d_op) | |
331 | d_set_d_op(path.dentry, &anon_ops); | |
332 | path.mnt = mntget(mnt); | |
333 | d_instantiate(path.dentry, inode); | |
b6509f6a | 334 | file = alloc_file(&path, flags, fops); |
d93aa9d8 AV |
335 | if (IS_ERR(file)) { |
336 | ihold(inode); | |
337 | path_put(&path); | |
338 | } | |
339 | return file; | |
340 | } | |
341 | EXPORT_SYMBOL(alloc_file_pseudo); | |
342 | ||
183266f2 AV |
343 | struct file *alloc_file_clone(struct file *base, int flags, |
344 | const struct file_operations *fops) | |
345 | { | |
346 | struct file *f = alloc_file(&base->f_path, flags, fops); | |
347 | if (!IS_ERR(f)) { | |
348 | path_get(&f->f_path); | |
349 | f->f_mapping = base->f_mapping; | |
350 | } | |
351 | return f; | |
352 | } | |
353 | ||
d7065da0 | 354 | /* the real guts of fput() - releasing the last reference to file |
1da177e4 | 355 | */ |
d7065da0 | 356 | static void __fput(struct file *file) |
1da177e4 | 357 | { |
0f7fc9e4 JJS |
358 | struct dentry *dentry = file->f_path.dentry; |
359 | struct vfsmount *mnt = file->f_path.mnt; | |
c77cecee | 360 | struct inode *inode = file->f_inode; |
a07b2000 | 361 | fmode_t mode = file->f_mode; |
1da177e4 | 362 | |
4d27f326 AV |
363 | if (unlikely(!(file->f_mode & FMODE_OPENED))) |
364 | goto out; | |
365 | ||
1da177e4 | 366 | might_sleep(); |
0eeca283 RL |
367 | |
368 | fsnotify_close(file); | |
1da177e4 LT |
369 | /* |
370 | * The function eventpoll_release() should be the first called | |
371 | * in the file cleanup chain. | |
372 | */ | |
373 | eventpoll_release(file); | |
78ed8a13 | 374 | locks_remove_file(file); |
1da177e4 | 375 | |
bb02b186 | 376 | ima_file_free(file); |
233e70f4 | 377 | if (unlikely(file->f_flags & FASYNC)) { |
72c2d531 | 378 | if (file->f_op->fasync) |
233e70f4 AV |
379 | file->f_op->fasync(-1, file, 0); |
380 | } | |
72c2d531 | 381 | if (file->f_op->release) |
1da177e4 | 382 | file->f_op->release(inode, file); |
60ed8cf7 | 383 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && |
a07b2000 | 384 | !(mode & FMODE_PATH))) { |
1da177e4 | 385 | cdev_put(inode->i_cdev); |
60ed8cf7 | 386 | } |
1da177e4 | 387 | fops_put(file->f_op); |
609d7fa9 | 388 | put_pid(file->f_owner.pid); |
d6da19c9 | 389 | put_file_access(file); |
1da177e4 | 390 | dput(dentry); |
a07b2000 AV |
391 | if (unlikely(mode & FMODE_NEED_UNMOUNT)) |
392 | dissolve_on_fput(mnt); | |
1da177e4 | 393 | mntput(mnt); |
4d27f326 AV |
394 | out: |
395 | file_free(file); | |
1da177e4 LT |
396 | } |
397 | ||
4f5e65a1 | 398 | static LLIST_HEAD(delayed_fput_list); |
4a9d4b02 AV |
399 | static void delayed_fput(struct work_struct *unused) |
400 | { | |
4f5e65a1 | 401 | struct llist_node *node = llist_del_all(&delayed_fput_list); |
b9ea557e | 402 | struct file *f, *t; |
4f5e65a1 | 403 | |
e87f2c26 | 404 | llist_for_each_entry_safe(f, t, node, f_llist) |
b9ea557e | 405 | __fput(f); |
4a9d4b02 AV |
406 | } |
407 | ||
408 | static void ____fput(struct callback_head *work) | |
409 | { | |
372a34e6 | 410 | __fput(container_of(work, struct file, f_task_work)); |
4a9d4b02 AV |
411 | } |
412 | ||
413 | /* | |
414 | * If kernel thread really needs to have the final fput() it has done | |
415 | * to complete, call this. The only user right now is the boot - we | |
416 | * *do* need to make sure our writes to binaries on initramfs has | |
417 | * not left us with opened struct file waiting for __fput() - execve() | |
418 | * won't work without that. Please, don't add more callers without | |
419 | * very good reasons; in particular, never call that with locks | |
420 | * held and never call that from a thread that might need to do | |
421 | * some work on any kind of umount. | |
422 | */ | |
423 | void flush_delayed_fput(void) | |
424 | { | |
425 | delayed_fput(NULL); | |
426 | } | |
7239a40c | 427 | EXPORT_SYMBOL_GPL(flush_delayed_fput); |
4a9d4b02 | 428 | |
c7314d74 | 429 | static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); |
4a9d4b02 | 430 | |
81132a39 | 431 | void fput(struct file *file) |
d7065da0 | 432 | { |
81132a39 | 433 | if (atomic_long_dec_and_test(&file->f_count)) { |
4a9d4b02 | 434 | struct task_struct *task = current; |
e7b2c406 | 435 | |
7cb537b6 AV |
436 | if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) { |
437 | file_free(file); | |
438 | return; | |
439 | } | |
e7b2c406 | 440 | if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { |
372a34e6 CB |
441 | init_task_work(&file->f_task_work, ____fput); |
442 | if (!task_work_add(task, &file->f_task_work, TWA_RESUME)) | |
e7b2c406 | 443 | return; |
64372501 AM |
444 | /* |
445 | * After this task has run exit_task_work(), | |
be49b30a | 446 | * task_work_add() will fail. Fall through to delayed |
64372501 AM |
447 | * fput to avoid leaking *file. |
448 | */ | |
4a9d4b02 | 449 | } |
4f5e65a1 | 450 | |
e87f2c26 | 451 | if (llist_add(&file->f_llist, &delayed_fput_list)) |
c7314d74 | 452 | schedule_delayed_work(&delayed_fput_work, 1); |
4a9d4b02 AV |
453 | } |
454 | } | |
455 | ||
456 | /* | |
457 | * synchronous analog of fput(); for kernel threads that might be needed | |
458 | * in some umount() (and thus can't use flush_delayed_fput() without | |
459 | * risking deadlocks), need to wait for completion of __fput() and know | |
460 | * for this specific struct file it won't involve anything that would | |
461 | * need them. Use only if you really need it - at the very least, | |
462 | * don't blindly convert fput() by kernel thread to that. | |
463 | */ | |
464 | void __fput_sync(struct file *file) | |
465 | { | |
021a160a | 466 | if (atomic_long_dec_and_test(&file->f_count)) |
d7065da0 AV |
467 | __fput(file); |
468 | } | |
469 | ||
470 | EXPORT_SYMBOL(fput); | |
f0043206 | 471 | EXPORT_SYMBOL(__fput_sync); |
d7065da0 | 472 | |
4248b0da | 473 | void __init files_init(void) |
b9ea557e | 474 | { |
b6b3fdea | 475 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
0ede61d8 CB |
476 | SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | |
477 | SLAB_PANIC | SLAB_ACCOUNT, NULL); | |
4248b0da MG |
478 | percpu_counter_init(&nr_files, 0, GFP_KERNEL); |
479 | } | |
b6b3fdea | 480 | |
4248b0da MG |
481 | /* |
482 | * One file with associated inode and dcache is very roughly 1K. Per default | |
483 | * do not use more than 10% of our memory for files. | |
484 | */ | |
485 | void __init files_maxfiles_init(void) | |
486 | { | |
487 | unsigned long n; | |
ca79b0c2 | 488 | unsigned long nr_pages = totalram_pages(); |
3d6357de | 489 | unsigned long memreserve = (nr_pages - nr_free_pages()) * 3/2; |
4248b0da | 490 | |
3d6357de AK |
491 | memreserve = min(memreserve, nr_pages - 1); |
492 | n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; | |
1da177e4 | 493 | |
518de9b3 | 494 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); |
b9ea557e | 495 | } |