]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/file.c | |
3 | * | |
4 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes | |
5 | * | |
6 | * Manage the dynamic fd arrays in the process files_struct. | |
7 | */ | |
8 | ||
9 | #include <linux/fs.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/vmalloc.h> | |
14 | #include <linux/file.h> | |
15 | #include <linux/bitops.h> | |
ab2af1f5 DS |
16 | #include <linux/interrupt.h> |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/rcupdate.h> | |
19 | #include <linux/workqueue.h> | |
20 | ||
21 | struct fdtable_defer { | |
22 | spinlock_t lock; | |
23 | struct work_struct wq; | |
ab2af1f5 DS |
24 | struct fdtable *next; |
25 | }; | |
26 | ||
9cfe015a ED |
27 | int sysctl_nr_open __read_mostly = 1024*1024; |
28 | ||
ab2af1f5 DS |
29 | /* |
30 | * We use this list to defer free fdtables that have vmalloced | |
31 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed | |
32 | * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in | |
33 | * this per-task structure. | |
34 | */ | |
35 | static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); | |
1da177e4 | 36 | |
5466b456 | 37 | static inline void * alloc_fdmem(unsigned int size) |
1da177e4 | 38 | { |
1da177e4 | 39 | if (size <= PAGE_SIZE) |
5466b456 VL |
40 | return kmalloc(size, GFP_KERNEL); |
41 | else | |
42 | return vmalloc(size); | |
1da177e4 LT |
43 | } |
44 | ||
5466b456 | 45 | static inline void free_fdarr(struct fdtable *fdt) |
1da177e4 | 46 | { |
5466b456 VL |
47 | if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) |
48 | kfree(fdt->fd); | |
1da177e4 | 49 | else |
5466b456 | 50 | vfree(fdt->fd); |
1da177e4 LT |
51 | } |
52 | ||
5466b456 | 53 | static inline void free_fdset(struct fdtable *fdt) |
1da177e4 | 54 | { |
5466b456 VL |
55 | if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2)) |
56 | kfree(fdt->open_fds); | |
57 | else | |
58 | vfree(fdt->open_fds); | |
ab2af1f5 | 59 | } |
1da177e4 | 60 | |
65f27f38 | 61 | static void free_fdtable_work(struct work_struct *work) |
ab2af1f5 | 62 | { |
65f27f38 DH |
63 | struct fdtable_defer *f = |
64 | container_of(work, struct fdtable_defer, wq); | |
ab2af1f5 | 65 | struct fdtable *fdt; |
1da177e4 | 66 | |
ab2af1f5 DS |
67 | spin_lock_bh(&f->lock); |
68 | fdt = f->next; | |
69 | f->next = NULL; | |
70 | spin_unlock_bh(&f->lock); | |
71 | while(fdt) { | |
72 | struct fdtable *next = fdt->next; | |
5466b456 VL |
73 | vfree(fdt->fd); |
74 | free_fdset(fdt); | |
75 | kfree(fdt); | |
ab2af1f5 DS |
76 | fdt = next; |
77 | } | |
78 | } | |
1da177e4 | 79 | |
4fd45812 | 80 | void free_fdtable_rcu(struct rcu_head *rcu) |
ab2af1f5 DS |
81 | { |
82 | struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); | |
ab2af1f5 | 83 | struct fdtable_defer *fddef; |
1da177e4 | 84 | |
ab2af1f5 | 85 | BUG_ON(!fdt); |
ab2af1f5 | 86 | |
4fd45812 | 87 | if (fdt->max_fds <= NR_OPEN_DEFAULT) { |
ab2af1f5 | 88 | /* |
4fd45812 VL |
89 | * This fdtable is embedded in the files structure and that |
90 | * structure itself is getting destroyed. | |
ab2af1f5 | 91 | */ |
4fd45812 VL |
92 | kmem_cache_free(files_cachep, |
93 | container_of(fdt, struct files_struct, fdtab)); | |
ab2af1f5 DS |
94 | return; |
95 | } | |
5466b456 | 96 | if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) { |
ab2af1f5 | 97 | kfree(fdt->fd); |
5466b456 | 98 | kfree(fdt->open_fds); |
ab2af1f5 | 99 | kfree(fdt); |
1da177e4 | 100 | } else { |
ab2af1f5 DS |
101 | fddef = &get_cpu_var(fdtable_defer_list); |
102 | spin_lock(&fddef->lock); | |
103 | fdt->next = fddef->next; | |
104 | fddef->next = fdt; | |
593be07a TH |
105 | /* vmallocs are handled from the workqueue context */ |
106 | schedule_work(&fddef->wq); | |
ab2af1f5 DS |
107 | spin_unlock(&fddef->lock); |
108 | put_cpu_var(fdtable_defer_list); | |
1da177e4 | 109 | } |
ab2af1f5 DS |
110 | } |
111 | ||
ab2af1f5 DS |
112 | /* |
113 | * Expand the fdset in the files_struct. Called with the files spinlock | |
114 | * held for write. | |
115 | */ | |
5466b456 | 116 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) |
ab2af1f5 | 117 | { |
5466b456 | 118 | unsigned int cpy, set; |
ab2af1f5 | 119 | |
5466b456 VL |
120 | BUG_ON(nfdt->max_fds < ofdt->max_fds); |
121 | if (ofdt->max_fds == 0) | |
1da177e4 | 122 | return; |
5466b456 VL |
123 | |
124 | cpy = ofdt->max_fds * sizeof(struct file *); | |
125 | set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); | |
126 | memcpy(nfdt->fd, ofdt->fd, cpy); | |
127 | memset((char *)(nfdt->fd) + cpy, 0, set); | |
128 | ||
129 | cpy = ofdt->max_fds / BITS_PER_BYTE; | |
130 | set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; | |
131 | memcpy(nfdt->open_fds, ofdt->open_fds, cpy); | |
132 | memset((char *)(nfdt->open_fds) + cpy, 0, set); | |
133 | memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); | |
134 | memset((char *)(nfdt->close_on_exec) + cpy, 0, set); | |
1da177e4 LT |
135 | } |
136 | ||
5466b456 | 137 | static struct fdtable * alloc_fdtable(unsigned int nr) |
1da177e4 | 138 | { |
5466b456 VL |
139 | struct fdtable *fdt; |
140 | char *data; | |
1da177e4 | 141 | |
ab2af1f5 | 142 | /* |
5466b456 VL |
143 | * Figure out how many fds we actually want to support in this fdtable. |
144 | * Allocation steps are keyed to the size of the fdarray, since it | |
145 | * grows far faster than any of the other dynamic data. We try to fit | |
146 | * the fdarray into comfortable page-tuned chunks: starting at 1024B | |
147 | * and growing in powers of two from there on. | |
ab2af1f5 | 148 | */ |
5466b456 VL |
149 | nr /= (1024 / sizeof(struct file *)); |
150 | nr = roundup_pow_of_two(nr + 1); | |
151 | nr *= (1024 / sizeof(struct file *)); | |
9cfe015a ED |
152 | if (nr > sysctl_nr_open) |
153 | nr = sysctl_nr_open; | |
bbea9f69 | 154 | |
5466b456 VL |
155 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); |
156 | if (!fdt) | |
bbea9f69 | 157 | goto out; |
5466b456 VL |
158 | fdt->max_fds = nr; |
159 | data = alloc_fdmem(nr * sizeof(struct file *)); | |
160 | if (!data) | |
161 | goto out_fdt; | |
162 | fdt->fd = (struct file **)data; | |
163 | data = alloc_fdmem(max_t(unsigned int, | |
164 | 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); | |
165 | if (!data) | |
166 | goto out_arr; | |
167 | fdt->open_fds = (fd_set *)data; | |
168 | data += nr / BITS_PER_BYTE; | |
169 | fdt->close_on_exec = (fd_set *)data; | |
170 | INIT_RCU_HEAD(&fdt->rcu); | |
171 | fdt->next = NULL; | |
172 | ||
ab2af1f5 | 173 | return fdt; |
5466b456 VL |
174 | |
175 | out_arr: | |
176 | free_fdarr(fdt); | |
177 | out_fdt: | |
ab2af1f5 | 178 | kfree(fdt); |
5466b456 | 179 | out: |
ab2af1f5 DS |
180 | return NULL; |
181 | } | |
1da177e4 | 182 | |
ab2af1f5 | 183 | /* |
74d392aa VL |
184 | * Expand the file descriptor table. |
185 | * This function will allocate a new fdtable and both fd array and fdset, of | |
186 | * the given size. | |
187 | * Return <0 error code on error; 1 on successful completion. | |
188 | * The files->file_lock should be held on entry, and will be held on exit. | |
ab2af1f5 DS |
189 | */ |
190 | static int expand_fdtable(struct files_struct *files, int nr) | |
191 | __releases(files->file_lock) | |
192 | __acquires(files->file_lock) | |
193 | { | |
74d392aa | 194 | struct fdtable *new_fdt, *cur_fdt; |
ab2af1f5 DS |
195 | |
196 | spin_unlock(&files->file_lock); | |
74d392aa | 197 | new_fdt = alloc_fdtable(nr); |
ab2af1f5 | 198 | spin_lock(&files->file_lock); |
74d392aa VL |
199 | if (!new_fdt) |
200 | return -ENOMEM; | |
ab2af1f5 | 201 | /* |
74d392aa VL |
202 | * Check again since another task may have expanded the fd table while |
203 | * we dropped the lock | |
ab2af1f5 | 204 | */ |
74d392aa | 205 | cur_fdt = files_fdtable(files); |
bbea9f69 | 206 | if (nr >= cur_fdt->max_fds) { |
74d392aa VL |
207 | /* Continue as planned */ |
208 | copy_fdtable(new_fdt, cur_fdt); | |
209 | rcu_assign_pointer(files->fdt, new_fdt); | |
4fd45812 | 210 | if (cur_fdt->max_fds > NR_OPEN_DEFAULT) |
01b2d93c | 211 | free_fdtable(cur_fdt); |
ab2af1f5 | 212 | } else { |
74d392aa | 213 | /* Somebody else expanded, so undo our attempt */ |
5466b456 VL |
214 | free_fdarr(new_fdt); |
215 | free_fdset(new_fdt); | |
216 | kfree(new_fdt); | |
ab2af1f5 | 217 | } |
74d392aa | 218 | return 1; |
1da177e4 LT |
219 | } |
220 | ||
221 | /* | |
222 | * Expand files. | |
74d392aa VL |
223 | * This function will expand the file structures, if the requested size exceeds |
224 | * the current capacity and there is room for expansion. | |
225 | * Return <0 error code on error; 0 when nothing done; 1 when files were | |
226 | * expanded and execution may have blocked. | |
227 | * The files->file_lock should be held on entry, and will be held on exit. | |
1da177e4 LT |
228 | */ |
229 | int expand_files(struct files_struct *files, int nr) | |
230 | { | |
badf1662 | 231 | struct fdtable *fdt; |
1da177e4 | 232 | |
badf1662 | 233 | fdt = files_fdtable(files); |
74d392aa | 234 | /* Do we need to expand? */ |
bbea9f69 | 235 | if (nr < fdt->max_fds) |
74d392aa VL |
236 | return 0; |
237 | /* Can we expand? */ | |
9cfe015a | 238 | if (nr >= sysctl_nr_open) |
74d392aa VL |
239 | return -EMFILE; |
240 | ||
241 | /* All good, so we try */ | |
242 | return expand_fdtable(files, nr); | |
1da177e4 | 243 | } |
ab2af1f5 DS |
244 | |
245 | static void __devinit fdtable_defer_list_init(int cpu) | |
246 | { | |
247 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | |
248 | spin_lock_init(&fddef->lock); | |
65f27f38 | 249 | INIT_WORK(&fddef->wq, free_fdtable_work); |
ab2af1f5 DS |
250 | fddef->next = NULL; |
251 | } | |
252 | ||
253 | void __init files_defer_init(void) | |
254 | { | |
255 | int i; | |
0a945022 | 256 | for_each_possible_cpu(i) |
ab2af1f5 DS |
257 | fdtable_defer_list_init(i); |
258 | } |