]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/file.c | |
3 | * | |
4 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes | |
5 | * | |
6 | * Manage the dynamic fd arrays in the process files_struct. | |
7 | */ | |
8 | ||
9 | #include <linux/fs.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/bitops.h> | |
ab2af1f5 DS |
16 | #include <linux/interrupt.h> |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/rcupdate.h> | |
19 | #include <linux/workqueue.h> | |
20 | ||
21 | struct fdtable_defer { | |
22 | spinlock_t lock; | |
23 | struct work_struct wq; | |
24 | struct timer_list timer; | |
25 | struct fdtable *next; | |
26 | }; | |
27 | ||
28 | /* | |
29 | * We use this list to defer free fdtables that have vmalloced | |
30 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed | |
31 | * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in | |
32 | * this per-task structure. | |
33 | */ | |
34 | static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); | |
1da177e4 LT |
35 | |
36 | ||
37 | /* | |
38 | * Allocate an fd array, using kmalloc or vmalloc. | |
39 | * Note: the array isn't cleared at allocation time. | |
40 | */ | |
41 | struct file ** alloc_fd_array(int num) | |
42 | { | |
43 | struct file **new_fds; | |
44 | int size = num * sizeof(struct file *); | |
45 | ||
46 | if (size <= PAGE_SIZE) | |
47 | new_fds = (struct file **) kmalloc(size, GFP_KERNEL); | |
48 | else | |
49 | new_fds = (struct file **) vmalloc(size); | |
50 | return new_fds; | |
51 | } | |
52 | ||
53 | void free_fd_array(struct file **array, int num) | |
54 | { | |
55 | int size = num * sizeof(struct file *); | |
56 | ||
57 | if (!array) { | |
58 | printk (KERN_ERR "free_fd_array: array = 0 (num = %d)\n", num); | |
59 | return; | |
60 | } | |
61 | ||
62 | if (num <= NR_OPEN_DEFAULT) /* Don't free the embedded fd array! */ | |
63 | return; | |
64 | else if (size <= PAGE_SIZE) | |
65 | kfree(array); | |
66 | else | |
67 | vfree(array); | |
68 | } | |
69 | ||
ab2af1f5 | 70 | static void __free_fdtable(struct fdtable *fdt) |
1da177e4 | 71 | { |
0b175a7e DS |
72 | free_fdset(fdt->open_fds, fdt->max_fdset); |
73 | free_fdset(fdt->close_on_exec, fdt->max_fdset); | |
74 | free_fd_array(fdt->fd, fdt->max_fds); | |
ab2af1f5 DS |
75 | kfree(fdt); |
76 | } | |
1da177e4 | 77 | |
ab2af1f5 DS |
78 | static void fdtable_timer(unsigned long data) |
79 | { | |
80 | struct fdtable_defer *fddef = (struct fdtable_defer *)data; | |
1da177e4 | 81 | |
ab2af1f5 DS |
82 | spin_lock(&fddef->lock); |
83 | /* | |
84 | * If someone already emptied the queue return. | |
1da177e4 | 85 | */ |
ab2af1f5 DS |
86 | if (!fddef->next) |
87 | goto out; | |
88 | if (!schedule_work(&fddef->wq)) | |
89 | mod_timer(&fddef->timer, 5); | |
90 | out: | |
91 | spin_unlock(&fddef->lock); | |
92 | } | |
1da177e4 | 93 | |
ab2af1f5 DS |
94 | static void free_fdtable_work(struct fdtable_defer *f) |
95 | { | |
96 | struct fdtable *fdt; | |
1da177e4 | 97 | |
ab2af1f5 DS |
98 | spin_lock_bh(&f->lock); |
99 | fdt = f->next; | |
100 | f->next = NULL; | |
101 | spin_unlock_bh(&f->lock); | |
102 | while(fdt) { | |
103 | struct fdtable *next = fdt->next; | |
104 | __free_fdtable(fdt); | |
105 | fdt = next; | |
106 | } | |
107 | } | |
1da177e4 | 108 | |
ab2af1f5 DS |
109 | static void free_fdtable_rcu(struct rcu_head *rcu) |
110 | { | |
111 | struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); | |
112 | int fdset_size, fdarray_size; | |
113 | struct fdtable_defer *fddef; | |
1da177e4 | 114 | |
ab2af1f5 DS |
115 | BUG_ON(!fdt); |
116 | fdset_size = fdt->max_fdset / 8; | |
117 | fdarray_size = fdt->max_fds * sizeof(struct file *); | |
118 | ||
119 | if (fdt->free_files) { | |
120 | /* | |
121 | * The this fdtable was embedded in the files structure | |
122 | * and the files structure itself was getting destroyed. | |
123 | * It is now safe to free the files structure. | |
124 | */ | |
125 | kmem_cache_free(files_cachep, fdt->free_files); | |
126 | return; | |
127 | } | |
0c9e63fd ED |
128 | if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && |
129 | fdt->max_fds <= NR_OPEN_DEFAULT) { | |
ab2af1f5 DS |
130 | /* |
131 | * The fdtable was embedded | |
132 | */ | |
133 | return; | |
134 | } | |
135 | if (fdset_size <= PAGE_SIZE && fdarray_size <= PAGE_SIZE) { | |
136 | kfree(fdt->open_fds); | |
137 | kfree(fdt->close_on_exec); | |
138 | kfree(fdt->fd); | |
139 | kfree(fdt); | |
1da177e4 | 140 | } else { |
ab2af1f5 DS |
141 | fddef = &get_cpu_var(fdtable_defer_list); |
142 | spin_lock(&fddef->lock); | |
143 | fdt->next = fddef->next; | |
144 | fddef->next = fdt; | |
145 | /* | |
146 | * vmallocs are handled from the workqueue context. | |
147 | * If the per-cpu workqueue is running, then we | |
148 | * defer work scheduling through a timer. | |
149 | */ | |
150 | if (!schedule_work(&fddef->wq)) | |
151 | mod_timer(&fddef->timer, 5); | |
152 | spin_unlock(&fddef->lock); | |
153 | put_cpu_var(fdtable_defer_list); | |
1da177e4 | 154 | } |
ab2af1f5 DS |
155 | } |
156 | ||
157 | void free_fdtable(struct fdtable *fdt) | |
158 | { | |
0c9e63fd ED |
159 | if (fdt->free_files || |
160 | fdt->max_fdset > EMBEDDED_FD_SET_SIZE || | |
161 | fdt->max_fds > NR_OPEN_DEFAULT) | |
ab2af1f5 DS |
162 | call_rcu(&fdt->rcu, free_fdtable_rcu); |
163 | } | |
164 | ||
165 | /* | |
166 | * Expand the fdset in the files_struct. Called with the files spinlock | |
167 | * held for write. | |
168 | */ | |
169 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) | |
170 | { | |
171 | int i; | |
172 | int count; | |
173 | ||
174 | BUG_ON(nfdt->max_fdset < fdt->max_fdset); | |
175 | BUG_ON(nfdt->max_fds < fdt->max_fds); | |
176 | /* Copy the existing tables and install the new pointers */ | |
177 | ||
178 | i = fdt->max_fdset / (sizeof(unsigned long) * 8); | |
179 | count = (nfdt->max_fdset - fdt->max_fdset) / 8; | |
180 | ||
181 | /* | |
182 | * Don't copy the entire array if the current fdset is | |
183 | * not yet initialised. | |
184 | */ | |
185 | if (i) { | |
186 | memcpy (nfdt->open_fds, fdt->open_fds, | |
187 | fdt->max_fdset/8); | |
188 | memcpy (nfdt->close_on_exec, fdt->close_on_exec, | |
189 | fdt->max_fdset/8); | |
190 | memset (&nfdt->open_fds->fds_bits[i], 0, count); | |
191 | memset (&nfdt->close_on_exec->fds_bits[i], 0, count); | |
192 | } | |
193 | ||
194 | /* Don't copy/clear the array if we are creating a new | |
195 | fd array for fork() */ | |
196 | if (fdt->max_fds) { | |
197 | memcpy(nfdt->fd, fdt->fd, | |
198 | fdt->max_fds * sizeof(struct file *)); | |
199 | /* clear the remainder of the array */ | |
200 | memset(&nfdt->fd[fdt->max_fds], 0, | |
201 | (nfdt->max_fds - fdt->max_fds) * | |
202 | sizeof(struct file *)); | |
203 | } | |
1da177e4 LT |
204 | } |
205 | ||
206 | /* | |
207 | * Allocate an fdset array, using kmalloc or vmalloc. | |
208 | * Note: the array isn't cleared at allocation time. | |
209 | */ | |
210 | fd_set * alloc_fdset(int num) | |
211 | { | |
212 | fd_set *new_fdset; | |
213 | int size = num / 8; | |
214 | ||
215 | if (size <= PAGE_SIZE) | |
216 | new_fdset = (fd_set *) kmalloc(size, GFP_KERNEL); | |
217 | else | |
218 | new_fdset = (fd_set *) vmalloc(size); | |
219 | return new_fdset; | |
220 | } | |
221 | ||
222 | void free_fdset(fd_set *array, int num) | |
223 | { | |
0c9e63fd | 224 | if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ |
1da177e4 | 225 | return; |
0c9e63fd | 226 | else if (num <= 8 * PAGE_SIZE) |
1da177e4 LT |
227 | kfree(array); |
228 | else | |
229 | vfree(array); | |
230 | } | |
231 | ||
ab2af1f5 | 232 | static struct fdtable *alloc_fdtable(int nr) |
1da177e4 | 233 | { |
ab2af1f5 DS |
234 | struct fdtable *fdt = NULL; |
235 | int nfds = 0; | |
236 | fd_set *new_openset = NULL, *new_execset = NULL; | |
237 | struct file **new_fds; | |
1da177e4 | 238 | |
0c9e63fd | 239 | fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); |
ab2af1f5 DS |
240 | if (!fdt) |
241 | goto out; | |
1da177e4 | 242 | |
a29b0b74 | 243 | nfds = max_t(int, 8 * L1_CACHE_BYTES, roundup_pow_of_two(nr + 1)); |
92eb7a2f AM |
244 | if (nfds > NR_OPEN) |
245 | nfds = NR_OPEN; | |
1da177e4 | 246 | |
ab2af1f5 DS |
247 | new_openset = alloc_fdset(nfds); |
248 | new_execset = alloc_fdset(nfds); | |
249 | if (!new_openset || !new_execset) | |
250 | goto out; | |
251 | fdt->open_fds = new_openset; | |
252 | fdt->close_on_exec = new_execset; | |
253 | fdt->max_fdset = nfds; | |
254 | ||
255 | nfds = NR_OPEN_DEFAULT; | |
256 | /* | |
257 | * Expand to the max in easy steps, and keep expanding it until | |
258 | * we have enough for the requested fd array size. | |
259 | */ | |
260 | do { | |
261 | #if NR_OPEN_DEFAULT < 256 | |
262 | if (nfds < 256) | |
263 | nfds = 256; | |
264 | else | |
265 | #endif | |
266 | if (nfds < (PAGE_SIZE / sizeof(struct file *))) | |
267 | nfds = PAGE_SIZE / sizeof(struct file *); | |
268 | else { | |
269 | nfds = nfds * 2; | |
270 | if (nfds > NR_OPEN) | |
271 | nfds = NR_OPEN; | |
272 | } | |
273 | } while (nfds <= nr); | |
274 | new_fds = alloc_fd_array(nfds); | |
275 | if (!new_fds) | |
d579091b | 276 | goto out2; |
ab2af1f5 DS |
277 | fdt->fd = new_fds; |
278 | fdt->max_fds = nfds; | |
279 | fdt->free_files = NULL; | |
280 | return fdt; | |
d579091b KK |
281 | out2: |
282 | nfds = fdt->max_fdset; | |
ab2af1f5 | 283 | out: |
8b0e330b AM |
284 | free_fdset(new_openset, nfds); |
285 | free_fdset(new_execset, nfds); | |
ab2af1f5 DS |
286 | kfree(fdt); |
287 | return NULL; | |
288 | } | |
1da177e4 | 289 | |
ab2af1f5 | 290 | /* |
74d392aa VL |
291 | * Expand the file descriptor table. |
292 | * This function will allocate a new fdtable and both fd array and fdset, of | |
293 | * the given size. | |
294 | * Return <0 error code on error; 1 on successful completion. | |
295 | * The files->file_lock should be held on entry, and will be held on exit. | |
ab2af1f5 DS |
296 | */ |
297 | static int expand_fdtable(struct files_struct *files, int nr) | |
298 | __releases(files->file_lock) | |
299 | __acquires(files->file_lock) | |
300 | { | |
74d392aa | 301 | struct fdtable *new_fdt, *cur_fdt; |
ab2af1f5 DS |
302 | |
303 | spin_unlock(&files->file_lock); | |
74d392aa | 304 | new_fdt = alloc_fdtable(nr); |
ab2af1f5 | 305 | spin_lock(&files->file_lock); |
74d392aa VL |
306 | if (!new_fdt) |
307 | return -ENOMEM; | |
ab2af1f5 | 308 | /* |
74d392aa VL |
309 | * Check again since another task may have expanded the fd table while |
310 | * we dropped the lock | |
ab2af1f5 | 311 | */ |
74d392aa VL |
312 | cur_fdt = files_fdtable(files); |
313 | if (nr >= cur_fdt->max_fds || nr >= cur_fdt->max_fdset) { | |
314 | /* Continue as planned */ | |
315 | copy_fdtable(new_fdt, cur_fdt); | |
316 | rcu_assign_pointer(files->fdt, new_fdt); | |
317 | free_fdtable(cur_fdt); | |
ab2af1f5 | 318 | } else { |
74d392aa | 319 | /* Somebody else expanded, so undo our attempt */ |
74d392aa | 320 | __free_fdtable(new_fdt); |
ab2af1f5 | 321 | } |
74d392aa | 322 | return 1; |
1da177e4 LT |
323 | } |
324 | ||
325 | /* | |
326 | * Expand files. | |
74d392aa VL |
327 | * This function will expand the file structures, if the requested size exceeds |
328 | * the current capacity and there is room for expansion. | |
329 | * Return <0 error code on error; 0 when nothing done; 1 when files were | |
330 | * expanded and execution may have blocked. | |
331 | * The files->file_lock should be held on entry, and will be held on exit. | |
1da177e4 LT |
332 | */ |
333 | int expand_files(struct files_struct *files, int nr) | |
334 | { | |
badf1662 | 335 | struct fdtable *fdt; |
1da177e4 | 336 | |
badf1662 | 337 | fdt = files_fdtable(files); |
74d392aa VL |
338 | /* Do we need to expand? */ |
339 | if (nr < fdt->max_fdset && nr < fdt->max_fds) | |
340 | return 0; | |
341 | /* Can we expand? */ | |
342 | if (fdt->max_fdset >= NR_OPEN || fdt->max_fds >= NR_OPEN || | |
343 | nr >= NR_OPEN) | |
344 | return -EMFILE; | |
345 | ||
346 | /* All good, so we try */ | |
347 | return expand_fdtable(files, nr); | |
1da177e4 | 348 | } |
ab2af1f5 DS |
349 | |
350 | static void __devinit fdtable_defer_list_init(int cpu) | |
351 | { | |
352 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | |
353 | spin_lock_init(&fddef->lock); | |
354 | INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); | |
355 | init_timer(&fddef->timer); | |
356 | fddef->timer.data = (unsigned long)fddef; | |
357 | fddef->timer.function = fdtable_timer; | |
358 | fddef->next = NULL; | |
359 | } | |
360 | ||
361 | void __init files_defer_init(void) | |
362 | { | |
363 | int i; | |
0a945022 | 364 | for_each_possible_cpu(i) |
ab2af1f5 DS |
365 | fdtable_defer_list_init(i); |
366 | } |