]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Implementation of the diskquota system for the LINUX operating system. QUOTA | |
3 | * is implemented using the BSD system call interface as the means of | |
4 | * communication with the user level. This file contains the generic routines | |
5 | * called by the different filesystems on allocation of an inode or block. | |
6 | * These routines take care of the administration needed to have a consistent | |
7 | * diskquota tracking system. The ideas of both user and group quotas are based | |
8 | * on the Melbourne quota system as used on BSD derived systems. The internal | |
9 | * implementation is based on one of the several variants of the LINUX | |
10 | * inode-subsystem with added complexity of the diskquota system. | |
11 | * | |
12 | * Version: $Id: dquot.c,v 6.3 1996/11/17 18:35:34 mvw Exp mvw $ | |
13 | * | |
14 | * Author: Marco van Wieringen <[email protected]> | |
15 | * | |
16 | * Fixes: Dmitry Gorodchanin <[email protected]>, 11 Feb 96 | |
17 | * | |
18 | * Revised list management to avoid races | |
19 | * -- Bill Hawes, <[email protected]>, 9/98 | |
20 | * | |
21 | * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). | |
22 | * As the consequence the locking was moved from dquot_decr_...(), | |
23 | * dquot_incr_...() to calling functions. | |
24 | * invalidate_dquots() now writes modified dquots. | |
25 | * Serialized quota_off() and quota_on() for mount point. | |
26 | * Fixed a few bugs in grow_dquots(). | |
27 | * Fixed deadlock in write_dquot() - we no longer account quotas on | |
28 | * quota files | |
29 | * remove_dquot_ref() moved to inode.c - it now traverses through inodes | |
30 | * add_dquot_ref() restarts after blocking | |
31 | * Added check for bogus uid and fixed check for group in quotactl. | |
32 | * Jan Kara, <[email protected]>, sponsored by SuSE CR, 10-11/99 | |
33 | * | |
34 | * Used struct list_head instead of own list struct | |
35 | * Invalidation of referenced dquots is no longer possible | |
36 | * Improved free_dquots list management | |
37 | * Quota and i_blocks are now updated in one place to avoid races | |
38 | * Warnings are now delayed so we won't block in critical section | |
39 | * Write updated not to require dquot lock | |
40 | * Jan Kara, <[email protected]>, 9/2000 | |
41 | * | |
42 | * Added dynamic quota structure allocation | |
43 | * Jan Kara <[email protected]> 12/2000 | |
44 | * | |
45 | * Rewritten quota interface. Implemented new quota format and | |
46 | * formats registering. | |
47 | * Jan Kara, <[email protected]>, 2001,2002 | |
48 | * | |
49 | * New SMP locking. | |
50 | * Jan Kara, <[email protected]>, 10/2002 | |
51 | * | |
52 | * Added journalled quota support, fix lock inversion problems | |
53 | * Jan Kara, <[email protected]>, 2003,2004 | |
54 | * | |
55 | * (C) Copyright 1994 - 1997 Marco van Wieringen | |
56 | */ | |
57 | ||
58 | #include <linux/errno.h> | |
59 | #include <linux/kernel.h> | |
60 | #include <linux/fs.h> | |
61 | #include <linux/mount.h> | |
62 | #include <linux/mm.h> | |
63 | #include <linux/time.h> | |
64 | #include <linux/types.h> | |
65 | #include <linux/string.h> | |
66 | #include <linux/fcntl.h> | |
67 | #include <linux/stat.h> | |
68 | #include <linux/tty.h> | |
69 | #include <linux/file.h> | |
70 | #include <linux/slab.h> | |
71 | #include <linux/sysctl.h> | |
72 | #include <linux/smp_lock.h> | |
73 | #include <linux/init.h> | |
74 | #include <linux/module.h> | |
75 | #include <linux/proc_fs.h> | |
76 | #include <linux/security.h> | |
77 | #include <linux/kmod.h> | |
78 | #include <linux/namei.h> | |
79 | #include <linux/buffer_head.h> | |
be586bab | 80 | #include <linux/quotaops.h> |
1da177e4 LT |
81 | |
82 | #include <asm/uaccess.h> | |
83 | ||
84 | #define __DQUOT_PARANOIA | |
85 | ||
86 | /* | |
87 | * There are two quota SMP locks. dq_list_lock protects all lists with quotas | |
88 | * and quota formats and also dqstats structure containing statistics about the | |
89 | * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures | |
90 | * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. | |
91 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly | |
92 | * in inode_add_bytes() and inode_sub_bytes(). | |
93 | * | |
94 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock | |
95 | * | |
96 | * Note that some things (eg. sb pointer, type, id) doesn't change during | |
97 | * the life of the dquot structure and so needn't to be protected by a lock | |
98 | * | |
99 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | |
100 | * operation is just reading pointers from inode (or not using them at all) the | |
101 | * read lock is enough. If pointers are altered function must hold write lock | |
102 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | |
103 | * for altering the flag i_sem is also needed). If operation is holding | |
104 | * reference to dquot in other way (e.g. quotactl ops) it must be guarded by | |
105 | * dqonoff_sem. | |
106 | * This locking assures that: | |
107 | * a) update/access to dquot pointers in inode is serialized | |
108 | * b) everyone is guarded against invalidate_dquots() | |
109 | * | |
110 | * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced | |
111 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | |
112 | * Currently dquot is locked only when it is being read to memory (or space for | |
113 | * it is being allocated) on the first dqget() and when it is being released on | |
114 | * the last dqput(). The allocation and release oparations are serialized by | |
115 | * the dq_lock and by checking the use count in dquot_release(). Write | |
116 | * operations on dquots don't hold dq_lock as they copy data under dq_data_lock | |
117 | * spinlock to internal buffers before writing. | |
118 | * | |
119 | * Lock ordering (including related VFS locks) is the following: | |
120 | * i_sem > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > | |
121 | * > dquot->dq_lock > dqio_sem | |
122 | * i_sem on quota files is special (it's below dqio_sem) | |
123 | */ | |
124 | ||
125 | static DEFINE_SPINLOCK(dq_list_lock); | |
126 | DEFINE_SPINLOCK(dq_data_lock); | |
127 | ||
128 | static char *quotatypes[] = INITQFNAMES; | |
129 | static struct quota_format_type *quota_formats; /* List of registered formats */ | |
130 | static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; | |
131 | ||
132 | /* SLAB cache for dquot structures */ | |
133 | static kmem_cache_t *dquot_cachep; | |
134 | ||
135 | int register_quota_format(struct quota_format_type *fmt) | |
136 | { | |
137 | spin_lock(&dq_list_lock); | |
138 | fmt->qf_next = quota_formats; | |
139 | quota_formats = fmt; | |
140 | spin_unlock(&dq_list_lock); | |
141 | return 0; | |
142 | } | |
143 | ||
144 | void unregister_quota_format(struct quota_format_type *fmt) | |
145 | { | |
146 | struct quota_format_type **actqf; | |
147 | ||
148 | spin_lock(&dq_list_lock); | |
149 | for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); | |
150 | if (*actqf) | |
151 | *actqf = (*actqf)->qf_next; | |
152 | spin_unlock(&dq_list_lock); | |
153 | } | |
154 | ||
155 | static struct quota_format_type *find_quota_format(int id) | |
156 | { | |
157 | struct quota_format_type *actqf; | |
158 | ||
159 | spin_lock(&dq_list_lock); | |
160 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | |
161 | if (!actqf || !try_module_get(actqf->qf_owner)) { | |
162 | int qm; | |
163 | ||
164 | spin_unlock(&dq_list_lock); | |
165 | ||
166 | for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); | |
167 | if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) | |
168 | return NULL; | |
169 | ||
170 | spin_lock(&dq_list_lock); | |
171 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | |
172 | if (actqf && !try_module_get(actqf->qf_owner)) | |
173 | actqf = NULL; | |
174 | } | |
175 | spin_unlock(&dq_list_lock); | |
176 | return actqf; | |
177 | } | |
178 | ||
179 | static void put_quota_format(struct quota_format_type *fmt) | |
180 | { | |
181 | module_put(fmt->qf_owner); | |
182 | } | |
183 | ||
184 | /* | |
185 | * Dquot List Management: | |
186 | * The quota code uses three lists for dquot management: the inuse_list, | |
187 | * free_dquots, and dquot_hash[] array. A single dquot structure may be | |
188 | * on all three lists, depending on its current state. | |
189 | * | |
190 | * All dquots are placed to the end of inuse_list when first created, and this | |
191 | * list is used for invalidate operation, which must look at every dquot. | |
192 | * | |
193 | * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, | |
194 | * and this list is searched whenever we need an available dquot. Dquots are | |
195 | * removed from the list as soon as they are used again, and | |
196 | * dqstats.free_dquots gives the number of dquots on the list. When | |
197 | * dquot is invalidated it's completely released from memory. | |
198 | * | |
199 | * Dquots with a specific identity (device, type and id) are placed on | |
200 | * one of the dquot_hash[] hash chains. The provides an efficient search | |
201 | * mechanism to locate a specific dquot. | |
202 | */ | |
203 | ||
204 | static LIST_HEAD(inuse_list); | |
205 | static LIST_HEAD(free_dquots); | |
206 | static unsigned int dq_hash_bits, dq_hash_mask; | |
207 | static struct hlist_head *dquot_hash; | |
208 | ||
209 | struct dqstats dqstats; | |
210 | ||
211 | static void dqput(struct dquot *dquot); | |
212 | ||
213 | static inline unsigned int | |
214 | hashfn(const struct super_block *sb, unsigned int id, int type) | |
215 | { | |
216 | unsigned long tmp; | |
217 | ||
218 | tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); | |
219 | return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; | |
220 | } | |
221 | ||
222 | /* | |
223 | * Following list functions expect dq_list_lock to be held | |
224 | */ | |
225 | static inline void insert_dquot_hash(struct dquot *dquot) | |
226 | { | |
227 | struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); | |
228 | hlist_add_head(&dquot->dq_hash, head); | |
229 | } | |
230 | ||
231 | static inline void remove_dquot_hash(struct dquot *dquot) | |
232 | { | |
233 | hlist_del_init(&dquot->dq_hash); | |
234 | } | |
235 | ||
236 | static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) | |
237 | { | |
238 | struct hlist_node *node; | |
239 | struct dquot *dquot; | |
240 | ||
241 | hlist_for_each (node, dquot_hash+hashent) { | |
242 | dquot = hlist_entry(node, struct dquot, dq_hash); | |
243 | if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) | |
244 | return dquot; | |
245 | } | |
246 | return NODQUOT; | |
247 | } | |
248 | ||
249 | /* Add a dquot to the tail of the free list */ | |
250 | static inline void put_dquot_last(struct dquot *dquot) | |
251 | { | |
252 | list_add(&dquot->dq_free, free_dquots.prev); | |
253 | dqstats.free_dquots++; | |
254 | } | |
255 | ||
256 | static inline void remove_free_dquot(struct dquot *dquot) | |
257 | { | |
258 | if (list_empty(&dquot->dq_free)) | |
259 | return; | |
260 | list_del_init(&dquot->dq_free); | |
261 | dqstats.free_dquots--; | |
262 | } | |
263 | ||
264 | static inline void put_inuse(struct dquot *dquot) | |
265 | { | |
266 | /* We add to the back of inuse list so we don't have to restart | |
267 | * when traversing this list and we block */ | |
268 | list_add(&dquot->dq_inuse, inuse_list.prev); | |
269 | dqstats.allocated_dquots++; | |
270 | } | |
271 | ||
272 | static inline void remove_inuse(struct dquot *dquot) | |
273 | { | |
274 | dqstats.allocated_dquots--; | |
275 | list_del(&dquot->dq_inuse); | |
276 | } | |
277 | /* | |
278 | * End of list functions needing dq_list_lock | |
279 | */ | |
280 | ||
281 | static void wait_on_dquot(struct dquot *dquot) | |
282 | { | |
283 | down(&dquot->dq_lock); | |
284 | up(&dquot->dq_lock); | |
285 | } | |
286 | ||
287 | #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) | |
288 | ||
289 | int dquot_mark_dquot_dirty(struct dquot *dquot) | |
290 | { | |
291 | spin_lock(&dq_list_lock); | |
292 | if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) | |
293 | list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> | |
294 | info[dquot->dq_type].dqi_dirty_list); | |
295 | spin_unlock(&dq_list_lock); | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* This function needs dq_list_lock */ | |
300 | static inline int clear_dquot_dirty(struct dquot *dquot) | |
301 | { | |
302 | if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) | |
303 | return 0; | |
304 | list_del_init(&dquot->dq_dirty); | |
305 | return 1; | |
306 | } | |
307 | ||
308 | void mark_info_dirty(struct super_block *sb, int type) | |
309 | { | |
310 | set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); | |
311 | } | |
312 | EXPORT_SYMBOL(mark_info_dirty); | |
313 | ||
314 | /* | |
315 | * Read dquot from disk and alloc space for it | |
316 | */ | |
317 | ||
318 | int dquot_acquire(struct dquot *dquot) | |
319 | { | |
320 | int ret = 0, ret2 = 0; | |
321 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | |
322 | ||
323 | down(&dquot->dq_lock); | |
324 | down(&dqopt->dqio_sem); | |
325 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | |
326 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | |
327 | if (ret < 0) | |
328 | goto out_iolock; | |
329 | set_bit(DQ_READ_B, &dquot->dq_flags); | |
330 | /* Instantiate dquot if needed */ | |
331 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { | |
332 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | |
333 | /* Write the info if needed */ | |
334 | if (info_dirty(&dqopt->info[dquot->dq_type])) | |
335 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | |
336 | if (ret < 0) | |
337 | goto out_iolock; | |
338 | if (ret2 < 0) { | |
339 | ret = ret2; | |
340 | goto out_iolock; | |
341 | } | |
342 | } | |
343 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | |
344 | out_iolock: | |
345 | up(&dqopt->dqio_sem); | |
346 | up(&dquot->dq_lock); | |
347 | return ret; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Write dquot to disk | |
352 | */ | |
353 | int dquot_commit(struct dquot *dquot) | |
354 | { | |
355 | int ret = 0, ret2 = 0; | |
356 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | |
357 | ||
358 | down(&dqopt->dqio_sem); | |
359 | spin_lock(&dq_list_lock); | |
360 | if (!clear_dquot_dirty(dquot)) { | |
361 | spin_unlock(&dq_list_lock); | |
362 | goto out_sem; | |
363 | } | |
364 | spin_unlock(&dq_list_lock); | |
365 | /* Inactive dquot can be only if there was error during read/init | |
366 | * => we have better not writing it */ | |
367 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | |
368 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | |
369 | if (info_dirty(&dqopt->info[dquot->dq_type])) | |
370 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | |
371 | if (ret >= 0) | |
372 | ret = ret2; | |
373 | } | |
374 | out_sem: | |
375 | up(&dqopt->dqio_sem); | |
376 | return ret; | |
377 | } | |
378 | ||
379 | /* | |
380 | * Release dquot | |
381 | */ | |
382 | int dquot_release(struct dquot *dquot) | |
383 | { | |
384 | int ret = 0, ret2 = 0; | |
385 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | |
386 | ||
387 | down(&dquot->dq_lock); | |
388 | /* Check whether we are not racing with some other dqget() */ | |
389 | if (atomic_read(&dquot->dq_count) > 1) | |
390 | goto out_dqlock; | |
391 | down(&dqopt->dqio_sem); | |
392 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | |
393 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | |
394 | /* Write the info */ | |
395 | if (info_dirty(&dqopt->info[dquot->dq_type])) | |
396 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | |
397 | if (ret >= 0) | |
398 | ret = ret2; | |
399 | } | |
400 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | |
401 | up(&dqopt->dqio_sem); | |
402 | out_dqlock: | |
403 | up(&dquot->dq_lock); | |
404 | return ret; | |
405 | } | |
406 | ||
407 | /* Invalidate all dquots on the list. Note that this function is called after | |
408 | * quota is disabled and pointers from inodes removed so there cannot be new | |
409 | * quota users. Also because we hold dqonoff_sem there can be no quota users | |
410 | * for this sb+type at all. */ | |
411 | static void invalidate_dquots(struct super_block *sb, int type) | |
412 | { | |
c33ed271 | 413 | struct dquot *dquot, *tmp; |
1da177e4 LT |
414 | |
415 | spin_lock(&dq_list_lock); | |
c33ed271 | 416 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { |
1da177e4 LT |
417 | if (dquot->dq_sb != sb) |
418 | continue; | |
419 | if (dquot->dq_type != type) | |
420 | continue; | |
421 | #ifdef __DQUOT_PARANOIA | |
422 | if (atomic_read(&dquot->dq_count)) | |
423 | BUG(); | |
424 | #endif | |
425 | /* Quota now has no users and it has been written on last dqput() */ | |
426 | remove_dquot_hash(dquot); | |
427 | remove_free_dquot(dquot); | |
428 | remove_inuse(dquot); | |
429 | kmem_cache_free(dquot_cachep, dquot); | |
430 | } | |
431 | spin_unlock(&dq_list_lock); | |
432 | } | |
433 | ||
434 | int vfs_quota_sync(struct super_block *sb, int type) | |
435 | { | |
436 | struct list_head *dirty; | |
437 | struct dquot *dquot; | |
438 | struct quota_info *dqopt = sb_dqopt(sb); | |
439 | int cnt; | |
440 | ||
441 | down(&dqopt->dqonoff_sem); | |
442 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
443 | if (type != -1 && cnt != type) | |
444 | continue; | |
445 | if (!sb_has_quota_enabled(sb, cnt)) | |
446 | continue; | |
447 | spin_lock(&dq_list_lock); | |
448 | dirty = &dqopt->info[cnt].dqi_dirty_list; | |
449 | while (!list_empty(dirty)) { | |
450 | dquot = list_entry(dirty->next, struct dquot, dq_dirty); | |
451 | /* Dirty and inactive can be only bad dquot... */ | |
452 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | |
453 | clear_dquot_dirty(dquot); | |
454 | continue; | |
455 | } | |
456 | /* Now we have active dquot from which someone is | |
457 | * holding reference so we can safely just increase | |
458 | * use count */ | |
459 | atomic_inc(&dquot->dq_count); | |
460 | dqstats.lookups++; | |
461 | spin_unlock(&dq_list_lock); | |
462 | sb->dq_op->write_dquot(dquot); | |
463 | dqput(dquot); | |
464 | spin_lock(&dq_list_lock); | |
465 | } | |
466 | spin_unlock(&dq_list_lock); | |
467 | } | |
468 | ||
469 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
470 | if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt) | |
471 | && info_dirty(&dqopt->info[cnt])) | |
472 | sb->dq_op->write_info(sb, cnt); | |
473 | spin_lock(&dq_list_lock); | |
474 | dqstats.syncs++; | |
475 | spin_unlock(&dq_list_lock); | |
476 | up(&dqopt->dqonoff_sem); | |
477 | ||
478 | return 0; | |
479 | } | |
480 | ||
481 | /* Free unused dquots from cache */ | |
482 | static void prune_dqcache(int count) | |
483 | { | |
484 | struct list_head *head; | |
485 | struct dquot *dquot; | |
486 | ||
487 | head = free_dquots.prev; | |
488 | while (head != &free_dquots && count) { | |
489 | dquot = list_entry(head, struct dquot, dq_free); | |
490 | remove_dquot_hash(dquot); | |
491 | remove_free_dquot(dquot); | |
492 | remove_inuse(dquot); | |
493 | kmem_cache_free(dquot_cachep, dquot); | |
494 | count--; | |
495 | head = free_dquots.prev; | |
496 | } | |
497 | } | |
498 | ||
499 | /* | |
500 | * This is called from kswapd when we think we need some | |
501 | * more memory | |
502 | */ | |
503 | ||
27496a8c | 504 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) |
1da177e4 LT |
505 | { |
506 | if (nr) { | |
507 | spin_lock(&dq_list_lock); | |
508 | prune_dqcache(nr); | |
509 | spin_unlock(&dq_list_lock); | |
510 | } | |
511 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; | |
512 | } | |
513 | ||
514 | /* | |
515 | * Put reference to dquot | |
516 | * NOTE: If you change this function please check whether dqput_blocks() works right... | |
517 | * MUST be called with either dqptr_sem or dqonoff_sem held | |
518 | */ | |
519 | static void dqput(struct dquot *dquot) | |
520 | { | |
521 | if (!dquot) | |
522 | return; | |
523 | #ifdef __DQUOT_PARANOIA | |
524 | if (!atomic_read(&dquot->dq_count)) { | |
525 | printk("VFS: dqput: trying to free free dquot\n"); | |
526 | printk("VFS: device %s, dquot of %s %d\n", | |
527 | dquot->dq_sb->s_id, | |
528 | quotatypes[dquot->dq_type], | |
529 | dquot->dq_id); | |
530 | BUG(); | |
531 | } | |
532 | #endif | |
533 | ||
534 | spin_lock(&dq_list_lock); | |
535 | dqstats.drops++; | |
536 | spin_unlock(&dq_list_lock); | |
537 | we_slept: | |
538 | spin_lock(&dq_list_lock); | |
539 | if (atomic_read(&dquot->dq_count) > 1) { | |
540 | /* We have more than one user... nothing to do */ | |
541 | atomic_dec(&dquot->dq_count); | |
542 | spin_unlock(&dq_list_lock); | |
543 | return; | |
544 | } | |
545 | /* Need to release dquot? */ | |
546 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { | |
547 | spin_unlock(&dq_list_lock); | |
548 | /* Commit dquot before releasing */ | |
549 | dquot->dq_sb->dq_op->write_dquot(dquot); | |
550 | goto we_slept; | |
551 | } | |
552 | /* Clear flag in case dquot was inactive (something bad happened) */ | |
553 | clear_dquot_dirty(dquot); | |
554 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | |
555 | spin_unlock(&dq_list_lock); | |
556 | dquot->dq_sb->dq_op->release_dquot(dquot); | |
557 | goto we_slept; | |
558 | } | |
559 | atomic_dec(&dquot->dq_count); | |
560 | #ifdef __DQUOT_PARANOIA | |
561 | /* sanity check */ | |
562 | if (!list_empty(&dquot->dq_free)) | |
563 | BUG(); | |
564 | #endif | |
565 | put_dquot_last(dquot); | |
566 | spin_unlock(&dq_list_lock); | |
567 | } | |
568 | ||
569 | static struct dquot *get_empty_dquot(struct super_block *sb, int type) | |
570 | { | |
571 | struct dquot *dquot; | |
572 | ||
573 | dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS); | |
574 | if(!dquot) | |
575 | return NODQUOT; | |
576 | ||
577 | memset((caddr_t)dquot, 0, sizeof(struct dquot)); | |
578 | sema_init(&dquot->dq_lock, 1); | |
579 | INIT_LIST_HEAD(&dquot->dq_free); | |
580 | INIT_LIST_HEAD(&dquot->dq_inuse); | |
581 | INIT_HLIST_NODE(&dquot->dq_hash); | |
582 | INIT_LIST_HEAD(&dquot->dq_dirty); | |
583 | dquot->dq_sb = sb; | |
584 | dquot->dq_type = type; | |
585 | atomic_set(&dquot->dq_count, 1); | |
586 | ||
587 | return dquot; | |
588 | } | |
589 | ||
590 | /* | |
591 | * Get reference to dquot | |
592 | * MUST be called with either dqptr_sem or dqonoff_sem held | |
593 | */ | |
594 | static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | |
595 | { | |
596 | unsigned int hashent = hashfn(sb, id, type); | |
597 | struct dquot *dquot, *empty = NODQUOT; | |
598 | ||
599 | if (!sb_has_quota_enabled(sb, type)) | |
600 | return NODQUOT; | |
601 | we_slept: | |
602 | spin_lock(&dq_list_lock); | |
603 | if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { | |
604 | if (empty == NODQUOT) { | |
605 | spin_unlock(&dq_list_lock); | |
606 | if ((empty = get_empty_dquot(sb, type)) == NODQUOT) | |
607 | schedule(); /* Try to wait for a moment... */ | |
608 | goto we_slept; | |
609 | } | |
610 | dquot = empty; | |
611 | dquot->dq_id = id; | |
612 | /* all dquots go on the inuse_list */ | |
613 | put_inuse(dquot); | |
614 | /* hash it first so it can be found */ | |
615 | insert_dquot_hash(dquot); | |
616 | dqstats.lookups++; | |
617 | spin_unlock(&dq_list_lock); | |
618 | } else { | |
619 | if (!atomic_read(&dquot->dq_count)) | |
620 | remove_free_dquot(dquot); | |
621 | atomic_inc(&dquot->dq_count); | |
622 | dqstats.cache_hits++; | |
623 | dqstats.lookups++; | |
624 | spin_unlock(&dq_list_lock); | |
625 | if (empty) | |
626 | kmem_cache_free(dquot_cachep, empty); | |
627 | } | |
628 | /* Wait for dq_lock - after this we know that either dquot_release() is already | |
629 | * finished or it will be canceled due to dq_count > 1 test */ | |
630 | wait_on_dquot(dquot); | |
631 | /* Read the dquot and instantiate it (everything done only if needed) */ | |
632 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { | |
633 | dqput(dquot); | |
634 | return NODQUOT; | |
635 | } | |
636 | #ifdef __DQUOT_PARANOIA | |
637 | if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */ | |
638 | BUG(); | |
639 | #endif | |
640 | ||
641 | return dquot; | |
642 | } | |
643 | ||
644 | static int dqinit_needed(struct inode *inode, int type) | |
645 | { | |
646 | int cnt; | |
647 | ||
648 | if (IS_NOQUOTA(inode)) | |
649 | return 0; | |
650 | if (type != -1) | |
651 | return inode->i_dquot[type] == NODQUOT; | |
652 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
653 | if (inode->i_dquot[cnt] == NODQUOT) | |
654 | return 1; | |
655 | return 0; | |
656 | } | |
657 | ||
658 | /* This routine is guarded by dqonoff_sem semaphore */ | |
659 | static void add_dquot_ref(struct super_block *sb, int type) | |
660 | { | |
661 | struct list_head *p; | |
662 | ||
663 | restart: | |
664 | file_list_lock(); | |
665 | list_for_each(p, &sb->s_files) { | |
2f512016 | 666 | struct file *filp = list_entry(p, struct file, f_u.fu_list); |
1da177e4 LT |
667 | struct inode *inode = filp->f_dentry->d_inode; |
668 | if (filp->f_mode & FMODE_WRITE && dqinit_needed(inode, type)) { | |
669 | struct dentry *dentry = dget(filp->f_dentry); | |
670 | file_list_unlock(); | |
671 | sb->dq_op->initialize(inode, type); | |
672 | dput(dentry); | |
673 | /* As we may have blocked we had better restart... */ | |
674 | goto restart; | |
675 | } | |
676 | } | |
677 | file_list_unlock(); | |
678 | } | |
679 | ||
680 | /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ | |
681 | static inline int dqput_blocks(struct dquot *dquot) | |
682 | { | |
683 | if (atomic_read(&dquot->dq_count) <= 1) | |
684 | return 1; | |
685 | return 0; | |
686 | } | |
687 | ||
688 | /* Remove references to dquots from inode - add dquot to list for freeing if needed */ | |
689 | /* We can't race with anybody because we hold dqptr_sem for writing... */ | |
690 | int remove_inode_dquot_ref(struct inode *inode, int type, struct list_head *tofree_head) | |
691 | { | |
692 | struct dquot *dquot = inode->i_dquot[type]; | |
693 | ||
694 | inode->i_dquot[type] = NODQUOT; | |
695 | if (dquot != NODQUOT) { | |
696 | if (dqput_blocks(dquot)) { | |
697 | #ifdef __DQUOT_PARANOIA | |
698 | if (atomic_read(&dquot->dq_count) != 1) | |
699 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); | |
700 | #endif | |
701 | spin_lock(&dq_list_lock); | |
702 | list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ | |
703 | spin_unlock(&dq_list_lock); | |
704 | return 1; | |
705 | } | |
706 | else | |
707 | dqput(dquot); /* We have guaranteed we won't block */ | |
708 | } | |
709 | return 0; | |
710 | } | |
711 | ||
712 | /* Free list of dquots - called from inode.c */ | |
713 | /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ | |
714 | static void put_dquot_list(struct list_head *tofree_head) | |
715 | { | |
716 | struct list_head *act_head; | |
717 | struct dquot *dquot; | |
718 | ||
719 | act_head = tofree_head->next; | |
720 | /* So now we have dquots on the list... Just free them */ | |
721 | while (act_head != tofree_head) { | |
722 | dquot = list_entry(act_head, struct dquot, dq_free); | |
723 | act_head = act_head->next; | |
724 | list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ | |
725 | dqput(dquot); | |
726 | } | |
727 | } | |
728 | ||
729 | /* Gather all references from inodes and drop them */ | |
730 | static void drop_dquot_ref(struct super_block *sb, int type) | |
731 | { | |
732 | LIST_HEAD(tofree_head); | |
733 | ||
734 | /* We need to be guarded against prune_icache to reach all the | |
735 | * inodes - otherwise some can be on the local list of prune_icache */ | |
736 | down(&iprune_sem); | |
737 | down_write(&sb_dqopt(sb)->dqptr_sem); | |
738 | remove_dquot_ref(sb, type, &tofree_head); | |
739 | up_write(&sb_dqopt(sb)->dqptr_sem); | |
740 | up(&iprune_sem); | |
741 | put_dquot_list(&tofree_head); | |
742 | } | |
743 | ||
744 | static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number) | |
745 | { | |
746 | dquot->dq_dqb.dqb_curinodes += number; | |
747 | } | |
748 | ||
749 | static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) | |
750 | { | |
751 | dquot->dq_dqb.dqb_curspace += number; | |
752 | } | |
753 | ||
754 | static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number) | |
755 | { | |
756 | if (dquot->dq_dqb.dqb_curinodes > number) | |
757 | dquot->dq_dqb.dqb_curinodes -= number; | |
758 | else | |
759 | dquot->dq_dqb.dqb_curinodes = 0; | |
760 | if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) | |
761 | dquot->dq_dqb.dqb_itime = (time_t) 0; | |
762 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | |
763 | } | |
764 | ||
765 | static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) | |
766 | { | |
767 | if (dquot->dq_dqb.dqb_curspace > number) | |
768 | dquot->dq_dqb.dqb_curspace -= number; | |
769 | else | |
770 | dquot->dq_dqb.dqb_curspace = 0; | |
771 | if (toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit) | |
772 | dquot->dq_dqb.dqb_btime = (time_t) 0; | |
773 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | |
774 | } | |
775 | ||
776 | static int flag_print_warnings = 1; | |
777 | ||
778 | static inline int need_print_warning(struct dquot *dquot) | |
779 | { | |
780 | if (!flag_print_warnings) | |
781 | return 0; | |
782 | ||
783 | switch (dquot->dq_type) { | |
784 | case USRQUOTA: | |
785 | return current->fsuid == dquot->dq_id; | |
786 | case GRPQUOTA: | |
787 | return in_group_p(dquot->dq_id); | |
788 | } | |
789 | return 0; | |
790 | } | |
791 | ||
792 | /* Values of warnings */ | |
793 | #define NOWARN 0 | |
794 | #define IHARDWARN 1 | |
795 | #define ISOFTLONGWARN 2 | |
796 | #define ISOFTWARN 3 | |
797 | #define BHARDWARN 4 | |
798 | #define BSOFTLONGWARN 5 | |
799 | #define BSOFTWARN 6 | |
800 | ||
801 | /* Print warning to user which exceeded quota */ | |
802 | static void print_warning(struct dquot *dquot, const char warntype) | |
803 | { | |
804 | char *msg = NULL; | |
805 | int flag = (warntype == BHARDWARN || warntype == BSOFTLONGWARN) ? DQ_BLKS_B : | |
806 | ((warntype == IHARDWARN || warntype == ISOFTLONGWARN) ? DQ_INODES_B : 0); | |
807 | ||
808 | if (!need_print_warning(dquot) || (flag && test_and_set_bit(flag, &dquot->dq_flags))) | |
809 | return; | |
810 | ||
811 | tty_write_message(current->signal->tty, dquot->dq_sb->s_id); | |
812 | if (warntype == ISOFTWARN || warntype == BSOFTWARN) | |
813 | tty_write_message(current->signal->tty, ": warning, "); | |
814 | else | |
815 | tty_write_message(current->signal->tty, ": write failed, "); | |
816 | tty_write_message(current->signal->tty, quotatypes[dquot->dq_type]); | |
817 | switch (warntype) { | |
818 | case IHARDWARN: | |
819 | msg = " file limit reached.\r\n"; | |
820 | break; | |
821 | case ISOFTLONGWARN: | |
822 | msg = " file quota exceeded too long.\r\n"; | |
823 | break; | |
824 | case ISOFTWARN: | |
825 | msg = " file quota exceeded.\r\n"; | |
826 | break; | |
827 | case BHARDWARN: | |
828 | msg = " block limit reached.\r\n"; | |
829 | break; | |
830 | case BSOFTLONGWARN: | |
831 | msg = " block quota exceeded too long.\r\n"; | |
832 | break; | |
833 | case BSOFTWARN: | |
834 | msg = " block quota exceeded.\r\n"; | |
835 | break; | |
836 | } | |
837 | tty_write_message(current->signal->tty, msg); | |
838 | } | |
839 | ||
840 | static inline void flush_warnings(struct dquot **dquots, char *warntype) | |
841 | { | |
842 | int i; | |
843 | ||
844 | for (i = 0; i < MAXQUOTAS; i++) | |
845 | if (dquots[i] != NODQUOT && warntype[i] != NOWARN) | |
846 | print_warning(dquots[i], warntype[i]); | |
847 | } | |
848 | ||
849 | static inline char ignore_hardlimit(struct dquot *dquot) | |
850 | { | |
851 | struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | |
852 | ||
853 | return capable(CAP_SYS_RESOURCE) && | |
854 | (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); | |
855 | } | |
856 | ||
857 | /* needs dq_data_lock */ | |
858 | static int check_idq(struct dquot *dquot, ulong inodes, char *warntype) | |
859 | { | |
860 | *warntype = NOWARN; | |
861 | if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags)) | |
862 | return QUOTA_OK; | |
863 | ||
864 | if (dquot->dq_dqb.dqb_ihardlimit && | |
865 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && | |
866 | !ignore_hardlimit(dquot)) { | |
867 | *warntype = IHARDWARN; | |
868 | return NO_QUOTA; | |
869 | } | |
870 | ||
871 | if (dquot->dq_dqb.dqb_isoftlimit && | |
872 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | |
873 | dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && | |
874 | !ignore_hardlimit(dquot)) { | |
875 | *warntype = ISOFTLONGWARN; | |
876 | return NO_QUOTA; | |
877 | } | |
878 | ||
879 | if (dquot->dq_dqb.dqb_isoftlimit && | |
880 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | |
881 | dquot->dq_dqb.dqb_itime == 0) { | |
882 | *warntype = ISOFTWARN; | |
883 | dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | |
884 | } | |
885 | ||
886 | return QUOTA_OK; | |
887 | } | |
888 | ||
889 | /* needs dq_data_lock */ | |
890 | static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) | |
891 | { | |
892 | *warntype = 0; | |
893 | if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags)) | |
894 | return QUOTA_OK; | |
895 | ||
896 | if (dquot->dq_dqb.dqb_bhardlimit && | |
897 | toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit && | |
898 | !ignore_hardlimit(dquot)) { | |
899 | if (!prealloc) | |
900 | *warntype = BHARDWARN; | |
901 | return NO_QUOTA; | |
902 | } | |
903 | ||
904 | if (dquot->dq_dqb.dqb_bsoftlimit && | |
905 | toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && | |
906 | dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && | |
907 | !ignore_hardlimit(dquot)) { | |
908 | if (!prealloc) | |
909 | *warntype = BSOFTLONGWARN; | |
910 | return NO_QUOTA; | |
911 | } | |
912 | ||
913 | if (dquot->dq_dqb.dqb_bsoftlimit && | |
914 | toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit && | |
915 | dquot->dq_dqb.dqb_btime == 0) { | |
916 | if (!prealloc) { | |
917 | *warntype = BSOFTWARN; | |
918 | dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; | |
919 | } | |
920 | else | |
921 | /* | |
922 | * We don't allow preallocation to exceed softlimit so exceeding will | |
923 | * be always printed | |
924 | */ | |
925 | return NO_QUOTA; | |
926 | } | |
927 | ||
928 | return QUOTA_OK; | |
929 | } | |
930 | ||
931 | /* | |
932 | * Initialize quota pointers in inode | |
933 | * Transaction must be started at entry | |
934 | */ | |
935 | int dquot_initialize(struct inode *inode, int type) | |
936 | { | |
937 | unsigned int id = 0; | |
938 | int cnt, ret = 0; | |
939 | ||
940 | /* First test before acquiring semaphore - solves deadlocks when we | |
941 | * re-enter the quota code and are already holding the semaphore */ | |
942 | if (IS_NOQUOTA(inode)) | |
943 | return 0; | |
944 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
945 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | |
946 | if (IS_NOQUOTA(inode)) | |
947 | goto out_err; | |
948 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
949 | if (type != -1 && cnt != type) | |
950 | continue; | |
951 | if (inode->i_dquot[cnt] == NODQUOT) { | |
952 | switch (cnt) { | |
953 | case USRQUOTA: | |
954 | id = inode->i_uid; | |
955 | break; | |
956 | case GRPQUOTA: | |
957 | id = inode->i_gid; | |
958 | break; | |
959 | } | |
960 | inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt); | |
961 | } | |
962 | } | |
963 | out_err: | |
964 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
965 | return ret; | |
966 | } | |
967 | ||
968 | /* | |
969 | * Release all quotas referenced by inode | |
970 | * Transaction must be started at an entry | |
971 | */ | |
972 | int dquot_drop(struct inode *inode) | |
973 | { | |
974 | int cnt; | |
975 | ||
976 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
977 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
978 | if (inode->i_dquot[cnt] != NODQUOT) { | |
979 | dqput(inode->i_dquot[cnt]); | |
980 | inode->i_dquot[cnt] = NODQUOT; | |
981 | } | |
982 | } | |
983 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
984 | return 0; | |
985 | } | |
986 | ||
987 | /* | |
988 | * Following four functions update i_blocks+i_bytes fields and | |
989 | * quota information (together with appropriate checks) | |
990 | * NOTE: We absolutely rely on the fact that caller dirties | |
991 | * the inode (usually macros in quotaops.h care about this) and | |
992 | * holds a handle for the current transaction so that dquot write and | |
993 | * inode write go into the same transaction. | |
994 | */ | |
995 | ||
996 | /* | |
997 | * This operation can block, but only after everything is updated | |
998 | */ | |
999 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | |
1000 | { | |
1001 | int cnt, ret = NO_QUOTA; | |
1002 | char warntype[MAXQUOTAS]; | |
1003 | ||
1004 | /* First test before acquiring semaphore - solves deadlocks when we | |
1005 | * re-enter the quota code and are already holding the semaphore */ | |
1006 | if (IS_NOQUOTA(inode)) { | |
1007 | out_add: | |
1008 | inode_add_bytes(inode, number); | |
1009 | return QUOTA_OK; | |
1010 | } | |
1011 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1012 | warntype[cnt] = NOWARN; | |
1013 | ||
1014 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1015 | if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */ | |
1016 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1017 | goto out_add; | |
1018 | } | |
1019 | spin_lock(&dq_data_lock); | |
1020 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1021 | if (inode->i_dquot[cnt] == NODQUOT) | |
1022 | continue; | |
1023 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) | |
1024 | goto warn_put_all; | |
1025 | } | |
1026 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1027 | if (inode->i_dquot[cnt] == NODQUOT) | |
1028 | continue; | |
1029 | dquot_incr_space(inode->i_dquot[cnt], number); | |
1030 | } | |
1031 | inode_add_bytes(inode, number); | |
1032 | ret = QUOTA_OK; | |
1033 | warn_put_all: | |
1034 | spin_unlock(&dq_data_lock); | |
1035 | if (ret == QUOTA_OK) | |
1036 | /* Dirtify all the dquots - this can block when journalling */ | |
1037 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1038 | if (inode->i_dquot[cnt]) | |
1039 | mark_dquot_dirty(inode->i_dquot[cnt]); | |
1040 | flush_warnings(inode->i_dquot, warntype); | |
1041 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1042 | return ret; | |
1043 | } | |
1044 | ||
1045 | /* | |
1046 | * This operation can block, but only after everything is updated | |
1047 | */ | |
1048 | int dquot_alloc_inode(const struct inode *inode, unsigned long number) | |
1049 | { | |
1050 | int cnt, ret = NO_QUOTA; | |
1051 | char warntype[MAXQUOTAS]; | |
1052 | ||
1053 | /* First test before acquiring semaphore - solves deadlocks when we | |
1054 | * re-enter the quota code and are already holding the semaphore */ | |
1055 | if (IS_NOQUOTA(inode)) | |
1056 | return QUOTA_OK; | |
1057 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1058 | warntype[cnt] = NOWARN; | |
1059 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1060 | if (IS_NOQUOTA(inode)) { | |
1061 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1062 | return QUOTA_OK; | |
1063 | } | |
1064 | spin_lock(&dq_data_lock); | |
1065 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1066 | if (inode->i_dquot[cnt] == NODQUOT) | |
1067 | continue; | |
1068 | if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) | |
1069 | goto warn_put_all; | |
1070 | } | |
1071 | ||
1072 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1073 | if (inode->i_dquot[cnt] == NODQUOT) | |
1074 | continue; | |
1075 | dquot_incr_inodes(inode->i_dquot[cnt], number); | |
1076 | } | |
1077 | ret = QUOTA_OK; | |
1078 | warn_put_all: | |
1079 | spin_unlock(&dq_data_lock); | |
1080 | if (ret == QUOTA_OK) | |
1081 | /* Dirtify all the dquots - this can block when journalling */ | |
1082 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1083 | if (inode->i_dquot[cnt]) | |
1084 | mark_dquot_dirty(inode->i_dquot[cnt]); | |
1085 | flush_warnings((struct dquot **)inode->i_dquot, warntype); | |
1086 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1087 | return ret; | |
1088 | } | |
1089 | ||
1090 | /* | |
1091 | * This operation can block, but only after everything is updated | |
1092 | */ | |
1093 | int dquot_free_space(struct inode *inode, qsize_t number) | |
1094 | { | |
1095 | unsigned int cnt; | |
1096 | ||
1097 | /* First test before acquiring semaphore - solves deadlocks when we | |
1098 | * re-enter the quota code and are already holding the semaphore */ | |
1099 | if (IS_NOQUOTA(inode)) { | |
1100 | out_sub: | |
1101 | inode_sub_bytes(inode, number); | |
1102 | return QUOTA_OK; | |
1103 | } | |
1104 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1105 | /* Now recheck reliably when holding dqptr_sem */ | |
1106 | if (IS_NOQUOTA(inode)) { | |
1107 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1108 | goto out_sub; | |
1109 | } | |
1110 | spin_lock(&dq_data_lock); | |
1111 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1112 | if (inode->i_dquot[cnt] == NODQUOT) | |
1113 | continue; | |
1114 | dquot_decr_space(inode->i_dquot[cnt], number); | |
1115 | } | |
1116 | inode_sub_bytes(inode, number); | |
1117 | spin_unlock(&dq_data_lock); | |
1118 | /* Dirtify all the dquots - this can block when journalling */ | |
1119 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1120 | if (inode->i_dquot[cnt]) | |
1121 | mark_dquot_dirty(inode->i_dquot[cnt]); | |
1122 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1123 | return QUOTA_OK; | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * This operation can block, but only after everything is updated | |
1128 | */ | |
1129 | int dquot_free_inode(const struct inode *inode, unsigned long number) | |
1130 | { | |
1131 | unsigned int cnt; | |
1132 | ||
1133 | /* First test before acquiring semaphore - solves deadlocks when we | |
1134 | * re-enter the quota code and are already holding the semaphore */ | |
1135 | if (IS_NOQUOTA(inode)) | |
1136 | return QUOTA_OK; | |
1137 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1138 | /* Now recheck reliably when holding dqptr_sem */ | |
1139 | if (IS_NOQUOTA(inode)) { | |
1140 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1141 | return QUOTA_OK; | |
1142 | } | |
1143 | spin_lock(&dq_data_lock); | |
1144 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1145 | if (inode->i_dquot[cnt] == NODQUOT) | |
1146 | continue; | |
1147 | dquot_decr_inodes(inode->i_dquot[cnt], number); | |
1148 | } | |
1149 | spin_unlock(&dq_data_lock); | |
1150 | /* Dirtify all the dquots - this can block when journalling */ | |
1151 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1152 | if (inode->i_dquot[cnt]) | |
1153 | mark_dquot_dirty(inode->i_dquot[cnt]); | |
1154 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1155 | return QUOTA_OK; | |
1156 | } | |
1157 | ||
1158 | /* | |
1159 | * Transfer the number of inode and blocks from one diskquota to an other. | |
1160 | * | |
1161 | * This operation can block, but only after everything is updated | |
1162 | * A transaction must be started when entering this function. | |
1163 | */ | |
1164 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | |
1165 | { | |
1166 | qsize_t space; | |
1167 | struct dquot *transfer_from[MAXQUOTAS]; | |
1168 | struct dquot *transfer_to[MAXQUOTAS]; | |
1169 | int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid, | |
1170 | chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; | |
1171 | char warntype[MAXQUOTAS]; | |
1172 | ||
1173 | /* First test before acquiring semaphore - solves deadlocks when we | |
1174 | * re-enter the quota code and are already holding the semaphore */ | |
1175 | if (IS_NOQUOTA(inode)) | |
1176 | return QUOTA_OK; | |
1177 | /* Clear the arrays */ | |
1178 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1179 | transfer_to[cnt] = transfer_from[cnt] = NODQUOT; | |
1180 | warntype[cnt] = NOWARN; | |
1181 | } | |
1182 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1183 | /* Now recheck reliably when holding dqptr_sem */ | |
1184 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | |
1185 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1186 | return QUOTA_OK; | |
1187 | } | |
1188 | /* First build the transfer_to list - here we can block on | |
1189 | * reading/instantiating of dquots. We know that the transaction for | |
1190 | * us was already started so we don't violate lock ranking here */ | |
1191 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1192 | switch (cnt) { | |
1193 | case USRQUOTA: | |
1194 | if (!chuid) | |
1195 | continue; | |
1196 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); | |
1197 | break; | |
1198 | case GRPQUOTA: | |
1199 | if (!chgid) | |
1200 | continue; | |
1201 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); | |
1202 | break; | |
1203 | } | |
1204 | } | |
1205 | spin_lock(&dq_data_lock); | |
1206 | space = inode_get_bytes(inode); | |
1207 | /* Build the transfer_from list and check the limits */ | |
1208 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1209 | if (transfer_to[cnt] == NODQUOT) | |
1210 | continue; | |
1211 | transfer_from[cnt] = inode->i_dquot[cnt]; | |
1212 | if (check_idq(transfer_to[cnt], 1, warntype+cnt) == NO_QUOTA || | |
1213 | check_bdq(transfer_to[cnt], space, 0, warntype+cnt) == NO_QUOTA) | |
1214 | goto warn_put_all; | |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Finally perform the needed transfer from transfer_from to transfer_to | |
1219 | */ | |
1220 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1221 | /* | |
1222 | * Skip changes for same uid or gid or for turned off quota-type. | |
1223 | */ | |
1224 | if (transfer_to[cnt] == NODQUOT) | |
1225 | continue; | |
1226 | ||
1227 | /* Due to IO error we might not have transfer_from[] structure */ | |
1228 | if (transfer_from[cnt]) { | |
1229 | dquot_decr_inodes(transfer_from[cnt], 1); | |
1230 | dquot_decr_space(transfer_from[cnt], space); | |
1231 | } | |
1232 | ||
1233 | dquot_incr_inodes(transfer_to[cnt], 1); | |
1234 | dquot_incr_space(transfer_to[cnt], space); | |
1235 | ||
1236 | inode->i_dquot[cnt] = transfer_to[cnt]; | |
1237 | } | |
1238 | ret = QUOTA_OK; | |
1239 | warn_put_all: | |
1240 | spin_unlock(&dq_data_lock); | |
1241 | /* Dirtify all the dquots - this can block when journalling */ | |
1242 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1243 | if (transfer_from[cnt]) | |
1244 | mark_dquot_dirty(transfer_from[cnt]); | |
1245 | if (transfer_to[cnt]) | |
1246 | mark_dquot_dirty(transfer_to[cnt]); | |
1247 | } | |
1248 | flush_warnings(transfer_to, warntype); | |
1249 | ||
1250 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1251 | if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT) | |
1252 | dqput(transfer_from[cnt]); | |
1253 | if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT) | |
1254 | dqput(transfer_to[cnt]); | |
1255 | } | |
1256 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | |
1257 | return ret; | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * Write info of quota file to disk | |
1262 | */ | |
1263 | int dquot_commit_info(struct super_block *sb, int type) | |
1264 | { | |
1265 | int ret; | |
1266 | struct quota_info *dqopt = sb_dqopt(sb); | |
1267 | ||
1268 | down(&dqopt->dqio_sem); | |
1269 | ret = dqopt->ops[type]->write_file_info(sb, type); | |
1270 | up(&dqopt->dqio_sem); | |
1271 | return ret; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * Definitions of diskquota operations. | |
1276 | */ | |
1277 | struct dquot_operations dquot_operations = { | |
1278 | .initialize = dquot_initialize, | |
1279 | .drop = dquot_drop, | |
1280 | .alloc_space = dquot_alloc_space, | |
1281 | .alloc_inode = dquot_alloc_inode, | |
1282 | .free_space = dquot_free_space, | |
1283 | .free_inode = dquot_free_inode, | |
1284 | .transfer = dquot_transfer, | |
1285 | .write_dquot = dquot_commit, | |
1286 | .acquire_dquot = dquot_acquire, | |
1287 | .release_dquot = dquot_release, | |
1288 | .mark_dirty = dquot_mark_dquot_dirty, | |
1289 | .write_info = dquot_commit_info | |
1290 | }; | |
1291 | ||
1292 | static inline void set_enable_flags(struct quota_info *dqopt, int type) | |
1293 | { | |
1294 | switch (type) { | |
1295 | case USRQUOTA: | |
1296 | dqopt->flags |= DQUOT_USR_ENABLED; | |
1297 | break; | |
1298 | case GRPQUOTA: | |
1299 | dqopt->flags |= DQUOT_GRP_ENABLED; | |
1300 | break; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | static inline void reset_enable_flags(struct quota_info *dqopt, int type) | |
1305 | { | |
1306 | switch (type) { | |
1307 | case USRQUOTA: | |
1308 | dqopt->flags &= ~DQUOT_USR_ENABLED; | |
1309 | break; | |
1310 | case GRPQUOTA: | |
1311 | dqopt->flags &= ~DQUOT_GRP_ENABLED; | |
1312 | break; | |
1313 | } | |
1314 | } | |
1315 | ||
1316 | /* | |
1317 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) | |
1318 | */ | |
1319 | int vfs_quota_off(struct super_block *sb, int type) | |
1320 | { | |
1321 | int cnt; | |
1322 | struct quota_info *dqopt = sb_dqopt(sb); | |
1323 | struct inode *toputinode[MAXQUOTAS]; | |
1da177e4 LT |
1324 | |
1325 | /* We need to serialize quota_off() for device */ | |
1326 | down(&dqopt->dqonoff_sem); | |
1327 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | |
1328 | toputinode[cnt] = NULL; | |
1da177e4 LT |
1329 | if (type != -1 && cnt != type) |
1330 | continue; | |
1331 | if (!sb_has_quota_enabled(sb, cnt)) | |
1332 | continue; | |
1333 | reset_enable_flags(dqopt, cnt); | |
1334 | ||
1335 | /* Note: these are blocking operations */ | |
1336 | drop_dquot_ref(sb, cnt); | |
1337 | invalidate_dquots(sb, cnt); | |
1338 | /* | |
1339 | * Now all dquots should be invalidated, all writes done so we should be only | |
1340 | * users of the info. No locks needed. | |
1341 | */ | |
1342 | if (info_dirty(&dqopt->info[cnt])) | |
1343 | sb->dq_op->write_info(sb, cnt); | |
1344 | if (dqopt->ops[cnt]->free_file_info) | |
1345 | dqopt->ops[cnt]->free_file_info(sb, cnt); | |
1346 | put_quota_format(dqopt->info[cnt].dqi_format); | |
1347 | ||
1348 | toputinode[cnt] = dqopt->files[cnt]; | |
1da177e4 | 1349 | dqopt->files[cnt] = NULL; |
1da177e4 LT |
1350 | dqopt->info[cnt].dqi_flags = 0; |
1351 | dqopt->info[cnt].dqi_igrace = 0; | |
1352 | dqopt->info[cnt].dqi_bgrace = 0; | |
1353 | dqopt->ops[cnt] = NULL; | |
1354 | } | |
1355 | up(&dqopt->dqonoff_sem); | |
1356 | /* Sync the superblock so that buffers with quota data are written to | |
7b7b1ace | 1357 | * disk (and so userspace sees correct data afterwards). */ |
1da177e4 LT |
1358 | if (sb->s_op->sync_fs) |
1359 | sb->s_op->sync_fs(sb, 1); | |
1360 | sync_blockdev(sb->s_bdev); | |
1361 | /* Now the quota files are just ordinary files and we can set the | |
1362 | * inode flags back. Moreover we discard the pagecache so that | |
1363 | * userspace sees the writes we did bypassing the pagecache. We | |
1364 | * must also discard the blockdev buffers so that we see the | |
1365 | * changes done by userspace on the next quotaon() */ | |
1366 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | |
1367 | if (toputinode[cnt]) { | |
1368 | down(&dqopt->dqonoff_sem); | |
1369 | /* If quota was reenabled in the meantime, we have | |
1370 | * nothing to do */ | |
1371 | if (!sb_has_quota_enabled(sb, cnt)) { | |
1372 | down(&toputinode[cnt]->i_sem); | |
1373 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | | |
1374 | S_NOATIME | S_NOQUOTA); | |
1375 | truncate_inode_pages(&toputinode[cnt]->i_data, 0); | |
1376 | up(&toputinode[cnt]->i_sem); | |
1377 | mark_inode_dirty(toputinode[cnt]); | |
1378 | iput(toputinode[cnt]); | |
1379 | } | |
1380 | up(&dqopt->dqonoff_sem); | |
1da177e4 LT |
1381 | } |
1382 | if (sb->s_bdev) | |
1383 | invalidate_bdev(sb->s_bdev, 0); | |
1384 | return 0; | |
1385 | } | |
1386 | ||
1387 | /* | |
1388 | * Turn quotas on on a device | |
1389 | */ | |
1390 | ||
1391 | /* Helper function when we already have the inode */ | |
1392 | static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) | |
1393 | { | |
1394 | struct quota_format_type *fmt = find_quota_format(format_id); | |
1395 | struct super_block *sb = inode->i_sb; | |
1396 | struct quota_info *dqopt = sb_dqopt(sb); | |
1397 | int error; | |
1398 | int oldflags = -1; | |
1399 | ||
1400 | if (!fmt) | |
1401 | return -ESRCH; | |
1402 | if (!S_ISREG(inode->i_mode)) { | |
1403 | error = -EACCES; | |
1404 | goto out_fmt; | |
1405 | } | |
1406 | if (IS_RDONLY(inode)) { | |
1407 | error = -EROFS; | |
1408 | goto out_fmt; | |
1409 | } | |
1410 | if (!sb->s_op->quota_write || !sb->s_op->quota_read) { | |
1411 | error = -EINVAL; | |
1412 | goto out_fmt; | |
1413 | } | |
1414 | ||
1415 | /* As we bypass the pagecache we must now flush the inode so that | |
1416 | * we see all the changes from userspace... */ | |
1417 | write_inode_now(inode, 1); | |
1418 | /* And now flush the block cache so that kernel sees the changes */ | |
1419 | invalidate_bdev(sb->s_bdev, 0); | |
1420 | down(&inode->i_sem); | |
1421 | down(&dqopt->dqonoff_sem); | |
1422 | if (sb_has_quota_enabled(sb, type)) { | |
1423 | error = -EBUSY; | |
1424 | goto out_lock; | |
1425 | } | |
1426 | /* We don't want quota and atime on quota files (deadlocks possible) | |
1427 | * Also nobody should write to the file - we use special IO operations | |
1428 | * which ignore the immutable bit. */ | |
1429 | down_write(&dqopt->dqptr_sem); | |
1430 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); | |
1431 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; | |
1432 | up_write(&dqopt->dqptr_sem); | |
31e7ad6a | 1433 | sb->dq_op->drop(inode); |
1da177e4 LT |
1434 | |
1435 | error = -EIO; | |
1436 | dqopt->files[type] = igrab(inode); | |
1437 | if (!dqopt->files[type]) | |
1438 | goto out_lock; | |
1439 | error = -EINVAL; | |
1440 | if (!fmt->qf_ops->check_quota_file(sb, type)) | |
1441 | goto out_file_init; | |
1442 | ||
1443 | dqopt->ops[type] = fmt->qf_ops; | |
1444 | dqopt->info[type].dqi_format = fmt; | |
1445 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | |
1446 | down(&dqopt->dqio_sem); | |
1447 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { | |
1448 | up(&dqopt->dqio_sem); | |
1449 | goto out_file_init; | |
1450 | } | |
1451 | up(&dqopt->dqio_sem); | |
1452 | up(&inode->i_sem); | |
1453 | set_enable_flags(dqopt, type); | |
1454 | ||
1455 | add_dquot_ref(sb, type); | |
1456 | up(&dqopt->dqonoff_sem); | |
1457 | ||
1458 | return 0; | |
1459 | ||
1460 | out_file_init: | |
1461 | dqopt->files[type] = NULL; | |
1462 | iput(inode); | |
1463 | out_lock: | |
1464 | up(&dqopt->dqonoff_sem); | |
1465 | if (oldflags != -1) { | |
1466 | down_write(&dqopt->dqptr_sem); | |
1467 | /* Set the flags back (in the case of accidental quotaon() | |
1468 | * on a wrong file we don't want to mess up the flags) */ | |
1469 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); | |
1470 | inode->i_flags |= oldflags; | |
1471 | up_write(&dqopt->dqptr_sem); | |
1472 | } | |
1473 | up(&inode->i_sem); | |
1474 | out_fmt: | |
1475 | put_quota_format(fmt); | |
1476 | ||
1477 | return error; | |
1478 | } | |
1479 | ||
1480 | /* Actual function called from quotactl() */ | |
1481 | int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) | |
1482 | { | |
1483 | struct nameidata nd; | |
1484 | int error; | |
1485 | ||
1486 | error = path_lookup(path, LOOKUP_FOLLOW, &nd); | |
1487 | if (error < 0) | |
1488 | return error; | |
1489 | error = security_quota_on(nd.dentry); | |
1490 | if (error) | |
1491 | goto out_path; | |
1492 | /* Quota file not on the same filesystem? */ | |
1493 | if (nd.mnt->mnt_sb != sb) | |
1494 | error = -EXDEV; | |
7b7b1ace | 1495 | else |
1da177e4 | 1496 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); |
1da177e4 LT |
1497 | out_path: |
1498 | path_release(&nd); | |
1499 | return error; | |
1500 | } | |
1501 | ||
1502 | /* | |
1503 | * This function is used when filesystem needs to initialize quotas | |
1504 | * during mount time. | |
1505 | */ | |
84de856e CH |
1506 | int vfs_quota_on_mount(struct super_block *sb, char *qf_name, |
1507 | int format_id, int type) | |
1da177e4 | 1508 | { |
84de856e | 1509 | struct dentry *dentry; |
1da177e4 LT |
1510 | int error; |
1511 | ||
2fa389c5 | 1512 | dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); |
84de856e CH |
1513 | if (IS_ERR(dentry)) |
1514 | return PTR_ERR(dentry); | |
1515 | ||
154f484b JK |
1516 | if (!dentry->d_inode) { |
1517 | error = -ENOENT; | |
1518 | goto out; | |
1519 | } | |
1520 | ||
1da177e4 | 1521 | error = security_quota_on(dentry); |
84de856e CH |
1522 | if (!error) |
1523 | error = vfs_quota_on_inode(dentry->d_inode, type, format_id); | |
1524 | ||
154f484b | 1525 | out: |
84de856e CH |
1526 | dput(dentry); |
1527 | return error; | |
1da177e4 LT |
1528 | } |
1529 | ||
1530 | /* Generic routine for getting common part of quota structure */ | |
1531 | static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) | |
1532 | { | |
1533 | struct mem_dqblk *dm = &dquot->dq_dqb; | |
1534 | ||
1535 | spin_lock(&dq_data_lock); | |
1536 | di->dqb_bhardlimit = dm->dqb_bhardlimit; | |
1537 | di->dqb_bsoftlimit = dm->dqb_bsoftlimit; | |
1538 | di->dqb_curspace = dm->dqb_curspace; | |
1539 | di->dqb_ihardlimit = dm->dqb_ihardlimit; | |
1540 | di->dqb_isoftlimit = dm->dqb_isoftlimit; | |
1541 | di->dqb_curinodes = dm->dqb_curinodes; | |
1542 | di->dqb_btime = dm->dqb_btime; | |
1543 | di->dqb_itime = dm->dqb_itime; | |
1544 | di->dqb_valid = QIF_ALL; | |
1545 | spin_unlock(&dq_data_lock); | |
1546 | } | |
1547 | ||
1548 | int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | |
1549 | { | |
1550 | struct dquot *dquot; | |
1551 | ||
1552 | down(&sb_dqopt(sb)->dqonoff_sem); | |
1553 | if (!(dquot = dqget(sb, id, type))) { | |
1554 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1555 | return -ESRCH; | |
1556 | } | |
1557 | do_get_dqblk(dquot, di); | |
1558 | dqput(dquot); | |
1559 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1560 | return 0; | |
1561 | } | |
1562 | ||
1563 | /* Generic routine for setting common part of quota structure */ | |
1564 | static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | |
1565 | { | |
1566 | struct mem_dqblk *dm = &dquot->dq_dqb; | |
1567 | int check_blim = 0, check_ilim = 0; | |
1568 | ||
1569 | spin_lock(&dq_data_lock); | |
1570 | if (di->dqb_valid & QIF_SPACE) { | |
1571 | dm->dqb_curspace = di->dqb_curspace; | |
1572 | check_blim = 1; | |
1573 | } | |
1574 | if (di->dqb_valid & QIF_BLIMITS) { | |
1575 | dm->dqb_bsoftlimit = di->dqb_bsoftlimit; | |
1576 | dm->dqb_bhardlimit = di->dqb_bhardlimit; | |
1577 | check_blim = 1; | |
1578 | } | |
1579 | if (di->dqb_valid & QIF_INODES) { | |
1580 | dm->dqb_curinodes = di->dqb_curinodes; | |
1581 | check_ilim = 1; | |
1582 | } | |
1583 | if (di->dqb_valid & QIF_ILIMITS) { | |
1584 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | |
1585 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | |
1586 | check_ilim = 1; | |
1587 | } | |
1588 | if (di->dqb_valid & QIF_BTIME) | |
1589 | dm->dqb_btime = di->dqb_btime; | |
1590 | if (di->dqb_valid & QIF_ITIME) | |
1591 | dm->dqb_itime = di->dqb_itime; | |
1592 | ||
1593 | if (check_blim) { | |
1594 | if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) { | |
1595 | dm->dqb_btime = 0; | |
1596 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | |
1597 | } | |
1598 | else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ | |
1599 | dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; | |
1600 | } | |
1601 | if (check_ilim) { | |
1602 | if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { | |
1603 | dm->dqb_itime = 0; | |
1604 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | |
1605 | } | |
1606 | else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ | |
1607 | dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | |
1608 | } | |
1609 | if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) | |
1610 | clear_bit(DQ_FAKE_B, &dquot->dq_flags); | |
1611 | else | |
1612 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | |
1613 | spin_unlock(&dq_data_lock); | |
1614 | mark_dquot_dirty(dquot); | |
1615 | } | |
1616 | ||
1617 | int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | |
1618 | { | |
1619 | struct dquot *dquot; | |
1620 | ||
1621 | down(&sb_dqopt(sb)->dqonoff_sem); | |
1622 | if (!(dquot = dqget(sb, id, type))) { | |
1623 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1624 | return -ESRCH; | |
1625 | } | |
1626 | do_set_dqblk(dquot, di); | |
1627 | dqput(dquot); | |
1628 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1629 | return 0; | |
1630 | } | |
1631 | ||
1632 | /* Generic routine for getting common part of quota file information */ | |
1633 | int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |
1634 | { | |
1635 | struct mem_dqinfo *mi; | |
1636 | ||
1637 | down(&sb_dqopt(sb)->dqonoff_sem); | |
1638 | if (!sb_has_quota_enabled(sb, type)) { | |
1639 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1640 | return -ESRCH; | |
1641 | } | |
1642 | mi = sb_dqopt(sb)->info + type; | |
1643 | spin_lock(&dq_data_lock); | |
1644 | ii->dqi_bgrace = mi->dqi_bgrace; | |
1645 | ii->dqi_igrace = mi->dqi_igrace; | |
1646 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | |
1647 | ii->dqi_valid = IIF_ALL; | |
1648 | spin_unlock(&dq_data_lock); | |
1649 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1650 | return 0; | |
1651 | } | |
1652 | ||
1653 | /* Generic routine for setting common part of quota file information */ | |
1654 | int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | |
1655 | { | |
1656 | struct mem_dqinfo *mi; | |
1657 | ||
1658 | down(&sb_dqopt(sb)->dqonoff_sem); | |
1659 | if (!sb_has_quota_enabled(sb, type)) { | |
1660 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1661 | return -ESRCH; | |
1662 | } | |
1663 | mi = sb_dqopt(sb)->info + type; | |
1664 | spin_lock(&dq_data_lock); | |
1665 | if (ii->dqi_valid & IIF_BGRACE) | |
1666 | mi->dqi_bgrace = ii->dqi_bgrace; | |
1667 | if (ii->dqi_valid & IIF_IGRACE) | |
1668 | mi->dqi_igrace = ii->dqi_igrace; | |
1669 | if (ii->dqi_valid & IIF_FLAGS) | |
1670 | mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); | |
1671 | spin_unlock(&dq_data_lock); | |
1672 | mark_info_dirty(sb, type); | |
1673 | /* Force write to disk */ | |
1674 | sb->dq_op->write_info(sb, type); | |
1675 | up(&sb_dqopt(sb)->dqonoff_sem); | |
1676 | return 0; | |
1677 | } | |
1678 | ||
1679 | struct quotactl_ops vfs_quotactl_ops = { | |
1680 | .quota_on = vfs_quota_on, | |
1681 | .quota_off = vfs_quota_off, | |
1682 | .quota_sync = vfs_quota_sync, | |
1683 | .get_info = vfs_get_dqinfo, | |
1684 | .set_info = vfs_set_dqinfo, | |
1685 | .get_dqblk = vfs_get_dqblk, | |
1686 | .set_dqblk = vfs_set_dqblk | |
1687 | }; | |
1688 | ||
1689 | static ctl_table fs_dqstats_table[] = { | |
1690 | { | |
1691 | .ctl_name = FS_DQ_LOOKUPS, | |
1692 | .procname = "lookups", | |
1693 | .data = &dqstats.lookups, | |
1694 | .maxlen = sizeof(int), | |
1695 | .mode = 0444, | |
1696 | .proc_handler = &proc_dointvec, | |
1697 | }, | |
1698 | { | |
1699 | .ctl_name = FS_DQ_DROPS, | |
1700 | .procname = "drops", | |
1701 | .data = &dqstats.drops, | |
1702 | .maxlen = sizeof(int), | |
1703 | .mode = 0444, | |
1704 | .proc_handler = &proc_dointvec, | |
1705 | }, | |
1706 | { | |
1707 | .ctl_name = FS_DQ_READS, | |
1708 | .procname = "reads", | |
1709 | .data = &dqstats.reads, | |
1710 | .maxlen = sizeof(int), | |
1711 | .mode = 0444, | |
1712 | .proc_handler = &proc_dointvec, | |
1713 | }, | |
1714 | { | |
1715 | .ctl_name = FS_DQ_WRITES, | |
1716 | .procname = "writes", | |
1717 | .data = &dqstats.writes, | |
1718 | .maxlen = sizeof(int), | |
1719 | .mode = 0444, | |
1720 | .proc_handler = &proc_dointvec, | |
1721 | }, | |
1722 | { | |
1723 | .ctl_name = FS_DQ_CACHE_HITS, | |
1724 | .procname = "cache_hits", | |
1725 | .data = &dqstats.cache_hits, | |
1726 | .maxlen = sizeof(int), | |
1727 | .mode = 0444, | |
1728 | .proc_handler = &proc_dointvec, | |
1729 | }, | |
1730 | { | |
1731 | .ctl_name = FS_DQ_ALLOCATED, | |
1732 | .procname = "allocated_dquots", | |
1733 | .data = &dqstats.allocated_dquots, | |
1734 | .maxlen = sizeof(int), | |
1735 | .mode = 0444, | |
1736 | .proc_handler = &proc_dointvec, | |
1737 | }, | |
1738 | { | |
1739 | .ctl_name = FS_DQ_FREE, | |
1740 | .procname = "free_dquots", | |
1741 | .data = &dqstats.free_dquots, | |
1742 | .maxlen = sizeof(int), | |
1743 | .mode = 0444, | |
1744 | .proc_handler = &proc_dointvec, | |
1745 | }, | |
1746 | { | |
1747 | .ctl_name = FS_DQ_SYNCS, | |
1748 | .procname = "syncs", | |
1749 | .data = &dqstats.syncs, | |
1750 | .maxlen = sizeof(int), | |
1751 | .mode = 0444, | |
1752 | .proc_handler = &proc_dointvec, | |
1753 | }, | |
1754 | { | |
1755 | .ctl_name = FS_DQ_WARNINGS, | |
1756 | .procname = "warnings", | |
1757 | .data = &flag_print_warnings, | |
1758 | .maxlen = sizeof(int), | |
1759 | .mode = 0644, | |
1760 | .proc_handler = &proc_dointvec, | |
1761 | }, | |
1762 | { .ctl_name = 0 }, | |
1763 | }; | |
1764 | ||
1765 | static ctl_table fs_table[] = { | |
1766 | { | |
1767 | .ctl_name = FS_DQSTATS, | |
1768 | .procname = "quota", | |
1769 | .mode = 0555, | |
1770 | .child = fs_dqstats_table, | |
1771 | }, | |
1772 | { .ctl_name = 0 }, | |
1773 | }; | |
1774 | ||
1775 | static ctl_table sys_table[] = { | |
1776 | { | |
1777 | .ctl_name = CTL_FS, | |
1778 | .procname = "fs", | |
1779 | .mode = 0555, | |
1780 | .child = fs_table, | |
1781 | }, | |
1782 | { .ctl_name = 0 }, | |
1783 | }; | |
1784 | ||
1785 | static int __init dquot_init(void) | |
1786 | { | |
1787 | int i; | |
1788 | unsigned long nr_hash, order; | |
1789 | ||
1790 | printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); | |
1791 | ||
1792 | register_sysctl_table(sys_table, 0); | |
1793 | ||
1794 | dquot_cachep = kmem_cache_create("dquot", | |
1795 | sizeof(struct dquot), sizeof(unsigned long) * 4, | |
1796 | SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, | |
1797 | NULL, NULL); | |
1798 | ||
1799 | order = 0; | |
1800 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | |
1801 | if (!dquot_hash) | |
1802 | panic("Cannot create dquot hash table"); | |
1803 | ||
1804 | /* Find power-of-two hlist_heads which can fit into allocation */ | |
1805 | nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); | |
1806 | dq_hash_bits = 0; | |
1807 | do { | |
1808 | dq_hash_bits++; | |
1809 | } while (nr_hash >> dq_hash_bits); | |
1810 | dq_hash_bits--; | |
1811 | ||
1812 | nr_hash = 1UL << dq_hash_bits; | |
1813 | dq_hash_mask = nr_hash - 1; | |
1814 | for (i = 0; i < nr_hash; i++) | |
1815 | INIT_HLIST_HEAD(dquot_hash + i); | |
1816 | ||
1817 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", | |
1818 | nr_hash, order, (PAGE_SIZE << order)); | |
1819 | ||
1820 | set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory); | |
1821 | ||
1822 | return 0; | |
1823 | } | |
1824 | module_init(dquot_init); | |
1825 | ||
1826 | EXPORT_SYMBOL(register_quota_format); | |
1827 | EXPORT_SYMBOL(unregister_quota_format); | |
1828 | EXPORT_SYMBOL(dqstats); | |
1829 | EXPORT_SYMBOL(dq_data_lock); | |
1830 | EXPORT_SYMBOL(vfs_quota_on); | |
1831 | EXPORT_SYMBOL(vfs_quota_on_mount); | |
1832 | EXPORT_SYMBOL(vfs_quota_off); | |
1833 | EXPORT_SYMBOL(vfs_quota_sync); | |
1834 | EXPORT_SYMBOL(vfs_get_dqinfo); | |
1835 | EXPORT_SYMBOL(vfs_set_dqinfo); | |
1836 | EXPORT_SYMBOL(vfs_get_dqblk); | |
1837 | EXPORT_SYMBOL(vfs_set_dqblk); | |
1838 | EXPORT_SYMBOL(dquot_commit); | |
1839 | EXPORT_SYMBOL(dquot_commit_info); | |
1840 | EXPORT_SYMBOL(dquot_acquire); | |
1841 | EXPORT_SYMBOL(dquot_release); | |
1842 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | |
1843 | EXPORT_SYMBOL(dquot_initialize); | |
1844 | EXPORT_SYMBOL(dquot_drop); | |
1845 | EXPORT_SYMBOL(dquot_alloc_space); | |
1846 | EXPORT_SYMBOL(dquot_alloc_inode); | |
1847 | EXPORT_SYMBOL(dquot_free_space); | |
1848 | EXPORT_SYMBOL(dquot_free_inode); | |
1849 | EXPORT_SYMBOL(dquot_transfer); |