]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/file.c | |
3 | * | |
4 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes | |
5 | * | |
6 | * Manage the dynamic fd arrays in the process files_struct. | |
7 | */ | |
8 | ||
630d9c47 | 9 | #include <linux/export.h> |
1da177e4 LT |
10 | #include <linux/fs.h> |
11 | #include <linux/mm.h> | |
6d4831c2 | 12 | #include <linux/mmzone.h> |
1da177e4 | 13 | #include <linux/time.h> |
d43c36dc | 14 | #include <linux/sched.h> |
1da177e4 LT |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/file.h> | |
9f3acc31 | 18 | #include <linux/fdtable.h> |
1da177e4 | 19 | #include <linux/bitops.h> |
ab2af1f5 DS |
20 | #include <linux/interrupt.h> |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/rcupdate.h> | |
23 | #include <linux/workqueue.h> | |
24 | ||
25 | struct fdtable_defer { | |
26 | spinlock_t lock; | |
27 | struct work_struct wq; | |
ab2af1f5 DS |
28 | struct fdtable *next; |
29 | }; | |
30 | ||
9cfe015a | 31 | int sysctl_nr_open __read_mostly = 1024*1024; |
eceea0b3 AV |
32 | int sysctl_nr_open_min = BITS_PER_LONG; |
33 | int sysctl_nr_open_max = 1024 * 1024; /* raised later */ | |
9cfe015a | 34 | |
ab2af1f5 DS |
35 | /* |
36 | * We use this list to defer free fdtables that have vmalloced | |
37 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed | |
38 | * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in | |
39 | * this per-task structure. | |
40 | */ | |
41 | static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); | |
1da177e4 | 42 | |
1fd36adc | 43 | static void *alloc_fdmem(size_t size) |
1da177e4 | 44 | { |
6d4831c2 AM |
45 | /* |
46 | * Very large allocations can stress page reclaim, so fall back to | |
47 | * vmalloc() if the allocation size will be considered "large" by the VM. | |
48 | */ | |
49 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | |
50 | void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); | |
51 | if (data != NULL) | |
52 | return data; | |
53 | } | |
a892e2d7 | 54 | return vmalloc(size); |
1da177e4 LT |
55 | } |
56 | ||
a892e2d7 | 57 | static void free_fdmem(void *ptr) |
1da177e4 | 58 | { |
a892e2d7 | 59 | is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr); |
1da177e4 LT |
60 | } |
61 | ||
a892e2d7 | 62 | static void __free_fdtable(struct fdtable *fdt) |
1da177e4 | 63 | { |
a892e2d7 CG |
64 | free_fdmem(fdt->fd); |
65 | free_fdmem(fdt->open_fds); | |
66 | kfree(fdt); | |
ab2af1f5 | 67 | } |
1da177e4 | 68 | |
65f27f38 | 69 | static void free_fdtable_work(struct work_struct *work) |
ab2af1f5 | 70 | { |
65f27f38 DH |
71 | struct fdtable_defer *f = |
72 | container_of(work, struct fdtable_defer, wq); | |
ab2af1f5 | 73 | struct fdtable *fdt; |
1da177e4 | 74 | |
ab2af1f5 DS |
75 | spin_lock_bh(&f->lock); |
76 | fdt = f->next; | |
77 | f->next = NULL; | |
78 | spin_unlock_bh(&f->lock); | |
79 | while(fdt) { | |
80 | struct fdtable *next = fdt->next; | |
a892e2d7 CG |
81 | |
82 | __free_fdtable(fdt); | |
ab2af1f5 DS |
83 | fdt = next; |
84 | } | |
85 | } | |
1da177e4 | 86 | |
4fd45812 | 87 | void free_fdtable_rcu(struct rcu_head *rcu) |
ab2af1f5 DS |
88 | { |
89 | struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); | |
ab2af1f5 | 90 | struct fdtable_defer *fddef; |
1da177e4 | 91 | |
ab2af1f5 | 92 | BUG_ON(!fdt); |
ab2af1f5 | 93 | |
4fd45812 | 94 | if (fdt->max_fds <= NR_OPEN_DEFAULT) { |
ab2af1f5 | 95 | /* |
4fd45812 VL |
96 | * This fdtable is embedded in the files structure and that |
97 | * structure itself is getting destroyed. | |
ab2af1f5 | 98 | */ |
4fd45812 VL |
99 | kmem_cache_free(files_cachep, |
100 | container_of(fdt, struct files_struct, fdtab)); | |
ab2af1f5 DS |
101 | return; |
102 | } | |
a892e2d7 | 103 | if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) { |
ab2af1f5 | 104 | kfree(fdt->fd); |
5466b456 | 105 | kfree(fdt->open_fds); |
ab2af1f5 | 106 | kfree(fdt); |
1da177e4 | 107 | } else { |
ab2af1f5 DS |
108 | fddef = &get_cpu_var(fdtable_defer_list); |
109 | spin_lock(&fddef->lock); | |
110 | fdt->next = fddef->next; | |
111 | fddef->next = fdt; | |
593be07a TH |
112 | /* vmallocs are handled from the workqueue context */ |
113 | schedule_work(&fddef->wq); | |
ab2af1f5 DS |
114 | spin_unlock(&fddef->lock); |
115 | put_cpu_var(fdtable_defer_list); | |
1da177e4 | 116 | } |
ab2af1f5 DS |
117 | } |
118 | ||
ab2af1f5 DS |
119 | /* |
120 | * Expand the fdset in the files_struct. Called with the files spinlock | |
121 | * held for write. | |
122 | */ | |
5466b456 | 123 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) |
ab2af1f5 | 124 | { |
5466b456 | 125 | unsigned int cpy, set; |
ab2af1f5 | 126 | |
5466b456 | 127 | BUG_ON(nfdt->max_fds < ofdt->max_fds); |
5466b456 VL |
128 | |
129 | cpy = ofdt->max_fds * sizeof(struct file *); | |
130 | set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); | |
131 | memcpy(nfdt->fd, ofdt->fd, cpy); | |
132 | memset((char *)(nfdt->fd) + cpy, 0, set); | |
133 | ||
134 | cpy = ofdt->max_fds / BITS_PER_BYTE; | |
135 | set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; | |
136 | memcpy(nfdt->open_fds, ofdt->open_fds, cpy); | |
137 | memset((char *)(nfdt->open_fds) + cpy, 0, set); | |
138 | memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); | |
139 | memset((char *)(nfdt->close_on_exec) + cpy, 0, set); | |
1da177e4 LT |
140 | } |
141 | ||
5466b456 | 142 | static struct fdtable * alloc_fdtable(unsigned int nr) |
1da177e4 | 143 | { |
5466b456 | 144 | struct fdtable *fdt; |
1fd36adc | 145 | void *data; |
1da177e4 | 146 | |
ab2af1f5 | 147 | /* |
5466b456 VL |
148 | * Figure out how many fds we actually want to support in this fdtable. |
149 | * Allocation steps are keyed to the size of the fdarray, since it | |
150 | * grows far faster than any of the other dynamic data. We try to fit | |
151 | * the fdarray into comfortable page-tuned chunks: starting at 1024B | |
152 | * and growing in powers of two from there on. | |
ab2af1f5 | 153 | */ |
5466b456 VL |
154 | nr /= (1024 / sizeof(struct file *)); |
155 | nr = roundup_pow_of_two(nr + 1); | |
156 | nr *= (1024 / sizeof(struct file *)); | |
5c598b34 AV |
157 | /* |
158 | * Note that this can drive nr *below* what we had passed if sysctl_nr_open | |
159 | * had been set lower between the check in expand_files() and here. Deal | |
160 | * with that in caller, it's cheaper that way. | |
161 | * | |
162 | * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise | |
163 | * bitmaps handling below becomes unpleasant, to put it mildly... | |
164 | */ | |
165 | if (unlikely(nr > sysctl_nr_open)) | |
166 | nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; | |
bbea9f69 | 167 | |
5466b456 VL |
168 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); |
169 | if (!fdt) | |
bbea9f69 | 170 | goto out; |
5466b456 VL |
171 | fdt->max_fds = nr; |
172 | data = alloc_fdmem(nr * sizeof(struct file *)); | |
173 | if (!data) | |
174 | goto out_fdt; | |
1fd36adc DH |
175 | fdt->fd = data; |
176 | ||
177 | data = alloc_fdmem(max_t(size_t, | |
5466b456 VL |
178 | 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); |
179 | if (!data) | |
180 | goto out_arr; | |
1fd36adc | 181 | fdt->open_fds = data; |
5466b456 | 182 | data += nr / BITS_PER_BYTE; |
1fd36adc | 183 | fdt->close_on_exec = data; |
5466b456 VL |
184 | fdt->next = NULL; |
185 | ||
ab2af1f5 | 186 | return fdt; |
5466b456 VL |
187 | |
188 | out_arr: | |
a892e2d7 | 189 | free_fdmem(fdt->fd); |
5466b456 | 190 | out_fdt: |
ab2af1f5 | 191 | kfree(fdt); |
5466b456 | 192 | out: |
ab2af1f5 DS |
193 | return NULL; |
194 | } | |
1da177e4 | 195 | |
ab2af1f5 | 196 | /* |
74d392aa VL |
197 | * Expand the file descriptor table. |
198 | * This function will allocate a new fdtable and both fd array and fdset, of | |
199 | * the given size. | |
200 | * Return <0 error code on error; 1 on successful completion. | |
201 | * The files->file_lock should be held on entry, and will be held on exit. | |
ab2af1f5 DS |
202 | */ |
203 | static int expand_fdtable(struct files_struct *files, int nr) | |
204 | __releases(files->file_lock) | |
205 | __acquires(files->file_lock) | |
206 | { | |
74d392aa | 207 | struct fdtable *new_fdt, *cur_fdt; |
ab2af1f5 DS |
208 | |
209 | spin_unlock(&files->file_lock); | |
74d392aa | 210 | new_fdt = alloc_fdtable(nr); |
ab2af1f5 | 211 | spin_lock(&files->file_lock); |
74d392aa VL |
212 | if (!new_fdt) |
213 | return -ENOMEM; | |
5c598b34 AV |
214 | /* |
215 | * extremely unlikely race - sysctl_nr_open decreased between the check in | |
216 | * caller and alloc_fdtable(). Cheaper to catch it here... | |
217 | */ | |
218 | if (unlikely(new_fdt->max_fds <= nr)) { | |
a892e2d7 | 219 | __free_fdtable(new_fdt); |
5c598b34 AV |
220 | return -EMFILE; |
221 | } | |
ab2af1f5 | 222 | /* |
74d392aa VL |
223 | * Check again since another task may have expanded the fd table while |
224 | * we dropped the lock | |
ab2af1f5 | 225 | */ |
74d392aa | 226 | cur_fdt = files_fdtable(files); |
bbea9f69 | 227 | if (nr >= cur_fdt->max_fds) { |
74d392aa VL |
228 | /* Continue as planned */ |
229 | copy_fdtable(new_fdt, cur_fdt); | |
230 | rcu_assign_pointer(files->fdt, new_fdt); | |
4fd45812 | 231 | if (cur_fdt->max_fds > NR_OPEN_DEFAULT) |
01b2d93c | 232 | free_fdtable(cur_fdt); |
ab2af1f5 | 233 | } else { |
74d392aa | 234 | /* Somebody else expanded, so undo our attempt */ |
a892e2d7 | 235 | __free_fdtable(new_fdt); |
ab2af1f5 | 236 | } |
74d392aa | 237 | return 1; |
1da177e4 LT |
238 | } |
239 | ||
240 | /* | |
241 | * Expand files. | |
74d392aa VL |
242 | * This function will expand the file structures, if the requested size exceeds |
243 | * the current capacity and there is room for expansion. | |
244 | * Return <0 error code on error; 0 when nothing done; 1 when files were | |
245 | * expanded and execution may have blocked. | |
246 | * The files->file_lock should be held on entry, and will be held on exit. | |
1da177e4 LT |
247 | */ |
248 | int expand_files(struct files_struct *files, int nr) | |
249 | { | |
badf1662 | 250 | struct fdtable *fdt; |
1da177e4 | 251 | |
badf1662 | 252 | fdt = files_fdtable(files); |
4e1e018e AV |
253 | |
254 | /* | |
255 | * N.B. For clone tasks sharing a files structure, this test | |
256 | * will limit the total number of files that can be opened. | |
257 | */ | |
d554ed89 | 258 | if (nr >= rlimit(RLIMIT_NOFILE)) |
4e1e018e AV |
259 | return -EMFILE; |
260 | ||
74d392aa | 261 | /* Do we need to expand? */ |
bbea9f69 | 262 | if (nr < fdt->max_fds) |
74d392aa | 263 | return 0; |
4e1e018e | 264 | |
74d392aa | 265 | /* Can we expand? */ |
9cfe015a | 266 | if (nr >= sysctl_nr_open) |
74d392aa VL |
267 | return -EMFILE; |
268 | ||
269 | /* All good, so we try */ | |
270 | return expand_fdtable(files, nr); | |
1da177e4 | 271 | } |
ab2af1f5 | 272 | |
02afc626 AV |
273 | static int count_open_files(struct fdtable *fdt) |
274 | { | |
275 | int size = fdt->max_fds; | |
276 | int i; | |
277 | ||
278 | /* Find the last open fd */ | |
1fd36adc DH |
279 | for (i = size / BITS_PER_LONG; i > 0; ) { |
280 | if (fdt->open_fds[--i]) | |
02afc626 AV |
281 | break; |
282 | } | |
1fd36adc | 283 | i = (i + 1) * BITS_PER_LONG; |
02afc626 AV |
284 | return i; |
285 | } | |
286 | ||
02afc626 AV |
287 | /* |
288 | * Allocate a new files structure and copy contents from the | |
289 | * passed in files structure. | |
290 | * errorp will be valid only when the returned files_struct is NULL. | |
291 | */ | |
292 | struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) | |
293 | { | |
294 | struct files_struct *newf; | |
295 | struct file **old_fds, **new_fds; | |
296 | int open_files, size, i; | |
297 | struct fdtable *old_fdt, *new_fdt; | |
298 | ||
299 | *errorp = -ENOMEM; | |
afbec7ff | 300 | newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); |
02afc626 AV |
301 | if (!newf) |
302 | goto out; | |
303 | ||
afbec7ff AV |
304 | atomic_set(&newf->count, 1); |
305 | ||
306 | spin_lock_init(&newf->file_lock); | |
307 | newf->next_fd = 0; | |
308 | new_fdt = &newf->fdtab; | |
309 | new_fdt->max_fds = NR_OPEN_DEFAULT; | |
1fd36adc DH |
310 | new_fdt->close_on_exec = newf->close_on_exec_init; |
311 | new_fdt->open_fds = newf->open_fds_init; | |
afbec7ff | 312 | new_fdt->fd = &newf->fd_array[0]; |
afbec7ff AV |
313 | new_fdt->next = NULL; |
314 | ||
02afc626 AV |
315 | spin_lock(&oldf->file_lock); |
316 | old_fdt = files_fdtable(oldf); | |
02afc626 AV |
317 | open_files = count_open_files(old_fdt); |
318 | ||
319 | /* | |
320 | * Check whether we need to allocate a larger fd array and fd set. | |
02afc626 | 321 | */ |
adbecb12 | 322 | while (unlikely(open_files > new_fdt->max_fds)) { |
02afc626 | 323 | spin_unlock(&oldf->file_lock); |
9dec3c4d | 324 | |
a892e2d7 CG |
325 | if (new_fdt != &newf->fdtab) |
326 | __free_fdtable(new_fdt); | |
adbecb12 | 327 | |
9dec3c4d AV |
328 | new_fdt = alloc_fdtable(open_files - 1); |
329 | if (!new_fdt) { | |
330 | *errorp = -ENOMEM; | |
331 | goto out_release; | |
332 | } | |
333 | ||
334 | /* beyond sysctl_nr_open; nothing to do */ | |
335 | if (unlikely(new_fdt->max_fds < open_files)) { | |
a892e2d7 | 336 | __free_fdtable(new_fdt); |
9dec3c4d | 337 | *errorp = -EMFILE; |
02afc626 | 338 | goto out_release; |
9dec3c4d | 339 | } |
9dec3c4d | 340 | |
02afc626 AV |
341 | /* |
342 | * Reacquire the oldf lock and a pointer to its fd table | |
343 | * who knows it may have a new bigger fd table. We need | |
344 | * the latest pointer. | |
345 | */ | |
346 | spin_lock(&oldf->file_lock); | |
347 | old_fdt = files_fdtable(oldf); | |
adbecb12 | 348 | open_files = count_open_files(old_fdt); |
02afc626 AV |
349 | } |
350 | ||
351 | old_fds = old_fdt->fd; | |
352 | new_fds = new_fdt->fd; | |
353 | ||
1fd36adc DH |
354 | memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8); |
355 | memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8); | |
02afc626 AV |
356 | |
357 | for (i = open_files; i != 0; i--) { | |
358 | struct file *f = *old_fds++; | |
359 | if (f) { | |
360 | get_file(f); | |
361 | } else { | |
362 | /* | |
363 | * The fd may be claimed in the fd bitmap but not yet | |
364 | * instantiated in the files array if a sibling thread | |
365 | * is partway through open(). So make sure that this | |
366 | * fd is available to the new process. | |
367 | */ | |
1dce27c5 | 368 | __clear_open_fd(open_files - i, new_fdt); |
02afc626 AV |
369 | } |
370 | rcu_assign_pointer(*new_fds++, f); | |
371 | } | |
372 | spin_unlock(&oldf->file_lock); | |
373 | ||
374 | /* compute the remainder to be cleared */ | |
375 | size = (new_fdt->max_fds - open_files) * sizeof(struct file *); | |
376 | ||
377 | /* This is long word aligned thus could use a optimized version */ | |
378 | memset(new_fds, 0, size); | |
379 | ||
380 | if (new_fdt->max_fds > open_files) { | |
1fd36adc DH |
381 | int left = (new_fdt->max_fds - open_files) / 8; |
382 | int start = open_files / BITS_PER_LONG; | |
02afc626 | 383 | |
1fd36adc DH |
384 | memset(&new_fdt->open_fds[start], 0, left); |
385 | memset(&new_fdt->close_on_exec[start], 0, left); | |
02afc626 AV |
386 | } |
387 | ||
afbec7ff AV |
388 | rcu_assign_pointer(newf->fdt, new_fdt); |
389 | ||
02afc626 AV |
390 | return newf; |
391 | ||
392 | out_release: | |
393 | kmem_cache_free(files_cachep, newf); | |
394 | out: | |
395 | return NULL; | |
396 | } | |
397 | ||
ab2af1f5 DS |
398 | static void __devinit fdtable_defer_list_init(int cpu) |
399 | { | |
400 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | |
401 | spin_lock_init(&fddef->lock); | |
65f27f38 | 402 | INIT_WORK(&fddef->wq, free_fdtable_work); |
ab2af1f5 DS |
403 | fddef->next = NULL; |
404 | } | |
405 | ||
406 | void __init files_defer_init(void) | |
407 | { | |
408 | int i; | |
0a945022 | 409 | for_each_possible_cpu(i) |
ab2af1f5 | 410 | fdtable_defer_list_init(i); |
eceea0b3 AV |
411 | sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) & |
412 | -BITS_PER_LONG; | |
ab2af1f5 | 413 | } |
f52111b1 AV |
414 | |
415 | struct files_struct init_files = { | |
416 | .count = ATOMIC_INIT(1), | |
417 | .fdt = &init_files.fdtab, | |
418 | .fdtab = { | |
419 | .max_fds = NR_OPEN_DEFAULT, | |
420 | .fd = &init_files.fd_array[0], | |
1fd36adc DH |
421 | .close_on_exec = init_files.close_on_exec_init, |
422 | .open_fds = init_files.open_fds_init, | |
f52111b1 AV |
423 | }, |
424 | .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), | |
425 | }; | |
1027abe8 AV |
426 | |
427 | /* | |
428 | * allocate a file descriptor, mark it busy. | |
429 | */ | |
430 | int alloc_fd(unsigned start, unsigned flags) | |
431 | { | |
432 | struct files_struct *files = current->files; | |
433 | unsigned int fd; | |
434 | int error; | |
435 | struct fdtable *fdt; | |
436 | ||
437 | spin_lock(&files->file_lock); | |
438 | repeat: | |
439 | fdt = files_fdtable(files); | |
440 | fd = start; | |
441 | if (fd < files->next_fd) | |
442 | fd = files->next_fd; | |
443 | ||
444 | if (fd < fdt->max_fds) | |
1fd36adc | 445 | fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd); |
1027abe8 AV |
446 | |
447 | error = expand_files(files, fd); | |
448 | if (error < 0) | |
449 | goto out; | |
450 | ||
451 | /* | |
452 | * If we needed to expand the fs array we | |
453 | * might have blocked - try again. | |
454 | */ | |
455 | if (error) | |
456 | goto repeat; | |
457 | ||
458 | if (start <= files->next_fd) | |
459 | files->next_fd = fd + 1; | |
460 | ||
1dce27c5 | 461 | __set_open_fd(fd, fdt); |
1027abe8 | 462 | if (flags & O_CLOEXEC) |
1dce27c5 | 463 | __set_close_on_exec(fd, fdt); |
1027abe8 | 464 | else |
1dce27c5 | 465 | __clear_close_on_exec(fd, fdt); |
1027abe8 AV |
466 | error = fd; |
467 | #if 1 | |
468 | /* Sanity check */ | |
7dc52157 | 469 | if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { |
1027abe8 AV |
470 | printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); |
471 | rcu_assign_pointer(fdt->fd[fd], NULL); | |
472 | } | |
473 | #endif | |
474 | ||
475 | out: | |
476 | spin_unlock(&files->file_lock); | |
477 | return error; | |
478 | } | |
479 | ||
480 | int get_unused_fd(void) | |
481 | { | |
482 | return alloc_fd(0, 0); | |
483 | } | |
484 | EXPORT_SYMBOL(get_unused_fd); |