]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4ce3121f NS |
2 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
4ce3121f NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
4ce3121f NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
4ce3121f NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 LT |
18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | |
6ca1c906 | 20 | #include "xfs_format.h" |
239880ef | 21 | #include "xfs_log_format.h" |
70a9883c | 22 | #include "xfs_shared.h" |
239880ef | 23 | #include "xfs_trans_resv.h" |
a844f451 | 24 | #include "xfs_bit.h" |
1da177e4 | 25 | #include "xfs_mount.h" |
1da177e4 LT |
26 | #include "xfs_inode.h" |
27 | #include "xfs_bmap.h" | |
68988114 | 28 | #include "xfs_bmap_util.h" |
239880ef DC |
29 | #include "xfs_alloc.h" |
30 | #include "xfs_quota.h" | |
1da177e4 | 31 | #include "xfs_error.h" |
239880ef | 32 | #include "xfs_trans.h" |
1da177e4 LT |
33 | #include "xfs_buf_item.h" |
34 | #include "xfs_trans_space.h" | |
35 | #include "xfs_trans_priv.h" | |
1da177e4 | 36 | #include "xfs_qm.h" |
3fe58f30 | 37 | #include "xfs_cksum.h" |
0b1b213f | 38 | #include "xfs_trace.h" |
239880ef | 39 | #include "xfs_log.h" |
a4fbe6ab | 40 | #include "xfs_bmap_btree.h" |
1da177e4 | 41 | |
1da177e4 | 42 | /* |
bf72de31 CH |
43 | * Lock order: |
44 | * | |
45 | * ip->i_lock | |
9f920f11 | 46 | * qi->qi_tree_lock |
b84a3a96 CH |
47 | * dquot->q_qlock (xfs_dqlock() and friends) |
48 | * dquot->q_flush (xfs_dqflock() and friends) | |
49 | * qi->qi_lru_lock | |
bf72de31 CH |
50 | * |
51 | * If two dquots need to be locked the order is user before group/project, | |
52 | * otherwise by the lowest id first, see xfs_dqlock2. | |
53 | */ | |
1da177e4 | 54 | |
1da177e4 LT |
55 | #ifdef DEBUG |
56 | xfs_buftarg_t *xfs_dqerror_target; | |
57 | int xfs_do_dqerror; | |
58 | int xfs_dqreq_num; | |
59 | int xfs_dqerror_mod = 33; | |
60 | #endif | |
61 | ||
a05931ce CH |
62 | struct kmem_zone *xfs_qm_dqtrxzone; |
63 | static struct kmem_zone *xfs_qm_dqzone; | |
64 | ||
f112a049 DC |
65 | static struct lock_class_key xfs_dquot_group_class; |
66 | static struct lock_class_key xfs_dquot_project_class; | |
98b8c7a0 | 67 | |
1da177e4 LT |
68 | /* |
69 | * This is called to free all the memory associated with a dquot | |
70 | */ | |
71 | void | |
72 | xfs_qm_dqdestroy( | |
73 | xfs_dquot_t *dqp) | |
74 | { | |
f8739c3c | 75 | ASSERT(list_empty(&dqp->q_lru)); |
1da177e4 LT |
76 | |
77 | mutex_destroy(&dqp->q_qlock); | |
a05931ce | 78 | kmem_zone_free(xfs_qm_dqzone, dqp); |
0b1b213f | 79 | |
48776fd2 | 80 | XFS_STATS_DEC(xs_qm_dquot); |
1da177e4 LT |
81 | } |
82 | ||
1da177e4 LT |
83 | /* |
84 | * If default limits are in force, push them into the dquot now. | |
85 | * We overwrite the dquot limits only if they are zero and this | |
86 | * is not the root dquot. | |
87 | */ | |
88 | void | |
89 | xfs_qm_adjust_dqlimits( | |
4b6eae2e BF |
90 | struct xfs_mount *mp, |
91 | struct xfs_dquot *dq) | |
1da177e4 | 92 | { |
4b6eae2e BF |
93 | struct xfs_quotainfo *q = mp->m_quotainfo; |
94 | struct xfs_disk_dquot *d = &dq->q_core; | |
b1366451 | 95 | int prealloc = 0; |
1da177e4 LT |
96 | |
97 | ASSERT(d->d_id); | |
98 | ||
b1366451 | 99 | if (q->qi_bsoftlimit && !d->d_blk_softlimit) { |
1149d96a | 100 | d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); |
b1366451 BF |
101 | prealloc = 1; |
102 | } | |
103 | if (q->qi_bhardlimit && !d->d_blk_hardlimit) { | |
1149d96a | 104 | d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); |
b1366451 BF |
105 | prealloc = 1; |
106 | } | |
1da177e4 | 107 | if (q->qi_isoftlimit && !d->d_ino_softlimit) |
1149d96a | 108 | d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); |
1da177e4 | 109 | if (q->qi_ihardlimit && !d->d_ino_hardlimit) |
1149d96a | 110 | d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); |
1da177e4 | 111 | if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) |
1149d96a | 112 | d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); |
1da177e4 | 113 | if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) |
1149d96a | 114 | d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); |
b1366451 BF |
115 | |
116 | if (prealloc) | |
117 | xfs_dquot_set_prealloc_limits(dq); | |
1da177e4 LT |
118 | } |
119 | ||
120 | /* | |
121 | * Check the limits and timers of a dquot and start or reset timers | |
122 | * if necessary. | |
123 | * This gets called even when quota enforcement is OFF, which makes our | |
124 | * life a little less complicated. (We just don't reject any quota | |
125 | * reservations in that case, when enforcement is off). | |
126 | * We also return 0 as the values of the timers in Q_GETQUOTA calls, when | |
127 | * enforcement's off. | |
128 | * In contrast, warnings are a little different in that they don't | |
754002b4 NS |
129 | * 'automatically' get started when limits get exceeded. They do |
130 | * get reset to zero, however, when we find the count to be under | |
131 | * the soft limit (they are only ever set non-zero via userspace). | |
1da177e4 LT |
132 | */ |
133 | void | |
134 | xfs_qm_adjust_dqtimers( | |
135 | xfs_mount_t *mp, | |
136 | xfs_disk_dquot_t *d) | |
137 | { | |
138 | ASSERT(d->d_id); | |
139 | ||
ea15ab3c | 140 | #ifdef DEBUG |
1149d96a CH |
141 | if (d->d_blk_hardlimit) |
142 | ASSERT(be64_to_cpu(d->d_blk_softlimit) <= | |
143 | be64_to_cpu(d->d_blk_hardlimit)); | |
144 | if (d->d_ino_hardlimit) | |
145 | ASSERT(be64_to_cpu(d->d_ino_softlimit) <= | |
146 | be64_to_cpu(d->d_ino_hardlimit)); | |
147 | if (d->d_rtb_hardlimit) | |
148 | ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= | |
149 | be64_to_cpu(d->d_rtb_hardlimit)); | |
1da177e4 | 150 | #endif |
ea15ab3c | 151 | |
1da177e4 | 152 | if (!d->d_btimer) { |
1149d96a | 153 | if ((d->d_blk_softlimit && |
d0a3fe67 | 154 | (be64_to_cpu(d->d_bcount) > |
1149d96a CH |
155 | be64_to_cpu(d->d_blk_softlimit))) || |
156 | (d->d_blk_hardlimit && | |
d0a3fe67 | 157 | (be64_to_cpu(d->d_bcount) > |
1149d96a CH |
158 | be64_to_cpu(d->d_blk_hardlimit)))) { |
159 | d->d_btimer = cpu_to_be32(get_seconds() + | |
8a7b8a89 | 160 | mp->m_quotainfo->qi_btimelimit); |
754002b4 NS |
161 | } else { |
162 | d->d_bwarns = 0; | |
1da177e4 LT |
163 | } |
164 | } else { | |
165 | if ((!d->d_blk_softlimit || | |
d0a3fe67 | 166 | (be64_to_cpu(d->d_bcount) <= |
1149d96a | 167 | be64_to_cpu(d->d_blk_softlimit))) && |
1da177e4 | 168 | (!d->d_blk_hardlimit || |
d0a3fe67 | 169 | (be64_to_cpu(d->d_bcount) <= |
1149d96a | 170 | be64_to_cpu(d->d_blk_hardlimit)))) { |
1da177e4 LT |
171 | d->d_btimer = 0; |
172 | } | |
173 | } | |
174 | ||
175 | if (!d->d_itimer) { | |
1149d96a | 176 | if ((d->d_ino_softlimit && |
d0a3fe67 | 177 | (be64_to_cpu(d->d_icount) > |
1149d96a CH |
178 | be64_to_cpu(d->d_ino_softlimit))) || |
179 | (d->d_ino_hardlimit && | |
d0a3fe67 | 180 | (be64_to_cpu(d->d_icount) > |
1149d96a CH |
181 | be64_to_cpu(d->d_ino_hardlimit)))) { |
182 | d->d_itimer = cpu_to_be32(get_seconds() + | |
8a7b8a89 | 183 | mp->m_quotainfo->qi_itimelimit); |
754002b4 NS |
184 | } else { |
185 | d->d_iwarns = 0; | |
1da177e4 LT |
186 | } |
187 | } else { | |
188 | if ((!d->d_ino_softlimit || | |
d0a3fe67 | 189 | (be64_to_cpu(d->d_icount) <= |
1149d96a | 190 | be64_to_cpu(d->d_ino_softlimit))) && |
1da177e4 | 191 | (!d->d_ino_hardlimit || |
d0a3fe67 | 192 | (be64_to_cpu(d->d_icount) <= |
1149d96a | 193 | be64_to_cpu(d->d_ino_hardlimit)))) { |
1da177e4 LT |
194 | d->d_itimer = 0; |
195 | } | |
196 | } | |
197 | ||
198 | if (!d->d_rtbtimer) { | |
1149d96a | 199 | if ((d->d_rtb_softlimit && |
d0a3fe67 | 200 | (be64_to_cpu(d->d_rtbcount) > |
1149d96a CH |
201 | be64_to_cpu(d->d_rtb_softlimit))) || |
202 | (d->d_rtb_hardlimit && | |
d0a3fe67 | 203 | (be64_to_cpu(d->d_rtbcount) > |
1149d96a CH |
204 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
205 | d->d_rtbtimer = cpu_to_be32(get_seconds() + | |
8a7b8a89 | 206 | mp->m_quotainfo->qi_rtbtimelimit); |
754002b4 NS |
207 | } else { |
208 | d->d_rtbwarns = 0; | |
1da177e4 LT |
209 | } |
210 | } else { | |
211 | if ((!d->d_rtb_softlimit || | |
d0a3fe67 | 212 | (be64_to_cpu(d->d_rtbcount) <= |
1149d96a | 213 | be64_to_cpu(d->d_rtb_softlimit))) && |
1da177e4 | 214 | (!d->d_rtb_hardlimit || |
d0a3fe67 | 215 | (be64_to_cpu(d->d_rtbcount) <= |
1149d96a | 216 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
1da177e4 LT |
217 | d->d_rtbtimer = 0; |
218 | } | |
219 | } | |
220 | } | |
221 | ||
1da177e4 LT |
222 | /* |
223 | * initialize a buffer full of dquots and log the whole thing | |
224 | */ | |
225 | STATIC void | |
226 | xfs_qm_init_dquot_blk( | |
227 | xfs_trans_t *tp, | |
228 | xfs_mount_t *mp, | |
229 | xfs_dqid_t id, | |
230 | uint type, | |
231 | xfs_buf_t *bp) | |
232 | { | |
8a7b8a89 | 233 | struct xfs_quotainfo *q = mp->m_quotainfo; |
1da177e4 LT |
234 | xfs_dqblk_t *d; |
235 | int curid, i; | |
236 | ||
237 | ASSERT(tp); | |
0c842ad4 | 238 | ASSERT(xfs_buf_islocked(bp)); |
1da177e4 | 239 | |
62926044 | 240 | d = bp->b_addr; |
1da177e4 LT |
241 | |
242 | /* | |
243 | * ID of the first dquot in the block - id's are zero based. | |
244 | */ | |
8a7b8a89 | 245 | curid = id - (id % q->qi_dqperchunk); |
1da177e4 | 246 | ASSERT(curid >= 0); |
8a7b8a89 | 247 | memset(d, 0, BBTOB(q->qi_dqchunklen)); |
49d35a5c CH |
248 | for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { |
249 | d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); | |
250 | d->dd_diskdq.d_version = XFS_DQUOT_VERSION; | |
251 | d->dd_diskdq.d_id = cpu_to_be32(curid); | |
252 | d->dd_diskdq.d_flags = type; | |
6fcdc59d | 253 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
3fe58f30 | 254 | uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid); |
6fcdc59d DC |
255 | xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), |
256 | XFS_DQUOT_CRC_OFF); | |
257 | } | |
49d35a5c CH |
258 | } |
259 | ||
1da177e4 | 260 | xfs_trans_dquot_buf(tp, bp, |
c1155410 DC |
261 | (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : |
262 | ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : | |
263 | XFS_BLF_GDQUOT_BUF))); | |
8a7b8a89 | 264 | xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); |
1da177e4 LT |
265 | } |
266 | ||
b1366451 BF |
267 | /* |
268 | * Initialize the dynamic speculative preallocation thresholds. The lo/hi | |
269 | * watermarks correspond to the soft and hard limits by default. If a soft limit | |
270 | * is not specified, we use 95% of the hard limit. | |
271 | */ | |
272 | void | |
273 | xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) | |
274 | { | |
275 | __uint64_t space; | |
276 | ||
277 | dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit); | |
278 | dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit); | |
279 | if (!dqp->q_prealloc_lo_wmark) { | |
280 | dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark; | |
281 | do_div(dqp->q_prealloc_lo_wmark, 100); | |
282 | dqp->q_prealloc_lo_wmark *= 95; | |
283 | } | |
284 | ||
285 | space = dqp->q_prealloc_hi_wmark; | |
286 | ||
287 | do_div(space, 100); | |
288 | dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space; | |
289 | dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3; | |
290 | dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5; | |
291 | } | |
292 | ||
1da177e4 LT |
293 | /* |
294 | * Allocate a block and fill it with dquots. | |
295 | * This is called when the bmapi finds a hole. | |
296 | */ | |
297 | STATIC int | |
298 | xfs_qm_dqalloc( | |
efa092f3 | 299 | xfs_trans_t **tpp, |
1da177e4 LT |
300 | xfs_mount_t *mp, |
301 | xfs_dquot_t *dqp, | |
302 | xfs_inode_t *quotip, | |
303 | xfs_fileoff_t offset_fsb, | |
304 | xfs_buf_t **O_bpp) | |
305 | { | |
306 | xfs_fsblock_t firstblock; | |
307 | xfs_bmap_free_t flist; | |
308 | xfs_bmbt_irec_t map; | |
309 | int nmaps, error, committed; | |
310 | xfs_buf_t *bp; | |
efa092f3 | 311 | xfs_trans_t *tp = *tpp; |
1da177e4 LT |
312 | |
313 | ASSERT(tp != NULL); | |
0b1b213f CH |
314 | |
315 | trace_xfs_dqalloc(dqp); | |
1da177e4 LT |
316 | |
317 | /* | |
318 | * Initialize the bmap freelist prior to calling bmapi code. | |
319 | */ | |
9d87c319 | 320 | xfs_bmap_init(&flist, &firstblock); |
1da177e4 LT |
321 | xfs_ilock(quotip, XFS_ILOCK_EXCL); |
322 | /* | |
323 | * Return if this type of quotas is turned off while we didn't | |
324 | * have an inode lock | |
325 | */ | |
6967b964 | 326 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { |
1da177e4 | 327 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
2451337d | 328 | return -ESRCH; |
1da177e4 LT |
329 | } |
330 | ||
ddc3415a | 331 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); |
1da177e4 | 332 | nmaps = 1; |
c0dc7828 DC |
333 | error = xfs_bmapi_write(tp, quotip, offset_fsb, |
334 | XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, | |
335 | &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), | |
336 | &map, &nmaps, &flist); | |
337 | if (error) | |
1da177e4 | 338 | goto error0; |
1da177e4 LT |
339 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); |
340 | ASSERT(nmaps == 1); | |
341 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | |
342 | (map.br_startblock != HOLESTARTBLOCK)); | |
343 | ||
344 | /* | |
345 | * Keep track of the blkno to save a lookup later | |
346 | */ | |
347 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | |
348 | ||
349 | /* now we can just get the buffer (there's nothing to read yet) */ | |
350 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, | |
351 | dqp->q_blkno, | |
8a7b8a89 | 352 | mp->m_quotainfo->qi_dqchunklen, |
1da177e4 | 353 | 0); |
36de9556 | 354 | if (!bp) { |
2451337d | 355 | error = -ENOMEM; |
1da177e4 | 356 | goto error1; |
36de9556 | 357 | } |
1813dd64 | 358 | bp->b_ops = &xfs_dquot_buf_ops; |
2a30f36d | 359 | |
1da177e4 LT |
360 | /* |
361 | * Make a chunk of dquots out of this buffer and log | |
362 | * the entire thing. | |
363 | */ | |
1149d96a | 364 | xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), |
c8ad20ff | 365 | dqp->dq_flags & XFS_DQ_ALLTYPES, bp); |
1da177e4 | 366 | |
efa092f3 TS |
367 | /* |
368 | * xfs_bmap_finish() may commit the current transaction and | |
369 | * start a second transaction if the freelist is not empty. | |
370 | * | |
371 | * Since we still want to modify this buffer, we need to | |
372 | * ensure that the buffer is not released on commit of | |
373 | * the first transaction and ensure the buffer is added to the | |
374 | * second transaction. | |
375 | * | |
376 | * If there is only one transaction then don't stop the buffer | |
377 | * from being released when it commits later on. | |
378 | */ | |
379 | ||
380 | xfs_trans_bhold(tp, bp); | |
381 | ||
f7c99b6f | 382 | if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { |
1da177e4 LT |
383 | goto error1; |
384 | } | |
385 | ||
efa092f3 TS |
386 | if (committed) { |
387 | tp = *tpp; | |
388 | xfs_trans_bjoin(tp, bp); | |
389 | } else { | |
390 | xfs_trans_bhold_release(tp, bp); | |
391 | } | |
392 | ||
1da177e4 LT |
393 | *O_bpp = bp; |
394 | return 0; | |
395 | ||
396 | error1: | |
397 | xfs_bmap_cancel(&flist); | |
398 | error0: | |
399 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | |
400 | ||
d99831ff | 401 | return error; |
1da177e4 | 402 | } |
9aede1d8 | 403 | |
c6319198 DC |
404 | STATIC int |
405 | xfs_qm_dqrepair( | |
406 | struct xfs_mount *mp, | |
407 | struct xfs_trans *tp, | |
408 | struct xfs_dquot *dqp, | |
409 | xfs_dqid_t firstid, | |
410 | struct xfs_buf **bpp) | |
411 | { | |
412 | int error; | |
413 | struct xfs_disk_dquot *ddq; | |
414 | struct xfs_dqblk *d; | |
415 | int i; | |
416 | ||
417 | /* | |
418 | * Read the buffer without verification so we get the corrupted | |
612cfbfe | 419 | * buffer returned to us. make sure we verify it on write, though. |
c6319198 DC |
420 | */ |
421 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno, | |
422 | mp->m_quotainfo->qi_dqchunklen, | |
423 | 0, bpp, NULL); | |
424 | ||
425 | if (error) { | |
426 | ASSERT(*bpp == NULL); | |
b474c7ae | 427 | return error; |
c6319198 | 428 | } |
1813dd64 | 429 | (*bpp)->b_ops = &xfs_dquot_buf_ops; |
c6319198 DC |
430 | |
431 | ASSERT(xfs_buf_islocked(*bpp)); | |
432 | d = (struct xfs_dqblk *)(*bpp)->b_addr; | |
433 | ||
434 | /* Do the actual repair of dquots in this buffer */ | |
435 | for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) { | |
436 | ddq = &d[i].dd_diskdq; | |
9aede1d8 | 437 | error = xfs_dqcheck(mp, ddq, firstid + i, |
c6319198 DC |
438 | dqp->dq_flags & XFS_DQ_ALLTYPES, |
439 | XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair"); | |
440 | if (error) { | |
441 | /* repair failed, we're screwed */ | |
442 | xfs_trans_brelse(tp, *bpp); | |
2451337d | 443 | return -EIO; |
c6319198 DC |
444 | } |
445 | } | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
1da177e4 LT |
450 | /* |
451 | * Maps a dquot to the buffer containing its on-disk version. | |
452 | * This returns a ptr to the buffer containing the on-disk dquot | |
453 | * in the bpp param, and a ptr to the on-disk dquot within that buffer | |
454 | */ | |
455 | STATIC int | |
456 | xfs_qm_dqtobp( | |
efa092f3 | 457 | xfs_trans_t **tpp, |
1da177e4 LT |
458 | xfs_dquot_t *dqp, |
459 | xfs_disk_dquot_t **O_ddpp, | |
460 | xfs_buf_t **O_bpp, | |
461 | uint flags) | |
462 | { | |
113a5683 CS |
463 | struct xfs_bmbt_irec map; |
464 | int nmaps = 1, error; | |
465 | struct xfs_buf *bp; | |
995961c4 | 466 | struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp); |
113a5683 CS |
467 | struct xfs_mount *mp = dqp->q_mount; |
468 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); | |
469 | struct xfs_trans *tp = (tpp ? *tpp : NULL); | |
f4df8adc | 470 | uint lock_mode; |
1da177e4 | 471 | |
acecf1b5 | 472 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
1da177e4 | 473 | |
f4df8adc | 474 | lock_mode = xfs_ilock_data_map_shared(quotip); |
6967b964 | 475 | if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { |
1da177e4 | 476 | /* |
acecf1b5 CH |
477 | * Return if this type of quotas is turned off while we |
478 | * didn't have the quota inode lock. | |
1da177e4 | 479 | */ |
f4df8adc | 480 | xfs_iunlock(quotip, lock_mode); |
2451337d | 481 | return -ESRCH; |
acecf1b5 CH |
482 | } |
483 | ||
484 | /* | |
485 | * Find the block map; no allocations yet | |
486 | */ | |
5c8ed202 DC |
487 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, |
488 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); | |
acecf1b5 | 489 | |
f4df8adc | 490 | xfs_iunlock(quotip, lock_mode); |
acecf1b5 CH |
491 | if (error) |
492 | return error; | |
493 | ||
494 | ASSERT(nmaps == 1); | |
495 | ASSERT(map.br_blockcount == 1); | |
496 | ||
497 | /* | |
498 | * Offset of dquot in the (fixed sized) dquot chunk. | |
499 | */ | |
500 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * | |
501 | sizeof(xfs_dqblk_t); | |
502 | ||
503 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); | |
504 | if (map.br_startblock == HOLESTARTBLOCK) { | |
1da177e4 | 505 | /* |
acecf1b5 | 506 | * We don't allocate unless we're asked to |
1da177e4 | 507 | */ |
acecf1b5 | 508 | if (!(flags & XFS_QMOPT_DQALLOC)) |
2451337d | 509 | return -ENOENT; |
1da177e4 | 510 | |
acecf1b5 CH |
511 | ASSERT(tp); |
512 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | |
513 | dqp->q_fileoffset, &bp); | |
1da177e4 | 514 | if (error) |
acecf1b5 CH |
515 | return error; |
516 | tp = *tpp; | |
517 | } else { | |
518 | trace_xfs_dqtobp_read(dqp); | |
1da177e4 LT |
519 | |
520 | /* | |
acecf1b5 CH |
521 | * store the blkno etc so that we don't have to do the |
522 | * mapping all the time | |
1da177e4 | 523 | */ |
acecf1b5 | 524 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
0b1b213f | 525 | |
8a7b8a89 CH |
526 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, |
527 | dqp->q_blkno, | |
528 | mp->m_quotainfo->qi_dqchunklen, | |
1813dd64 | 529 | 0, &bp, &xfs_dquot_buf_ops); |
acecf1b5 | 530 | |
2451337d | 531 | if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { |
c6319198 DC |
532 | xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff * |
533 | mp->m_quotainfo->qi_dqperchunk; | |
534 | ASSERT(bp == NULL); | |
535 | error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp); | |
536 | } | |
1da177e4 | 537 | |
c6319198 DC |
538 | if (error) { |
539 | ASSERT(bp == NULL); | |
b474c7ae | 540 | return error; |
1da177e4 | 541 | } |
1da177e4 LT |
542 | } |
543 | ||
c6319198 | 544 | ASSERT(xfs_buf_islocked(bp)); |
1da177e4 | 545 | *O_bpp = bp; |
c6319198 | 546 | *O_ddpp = bp->b_addr + dqp->q_bufoffset; |
1da177e4 | 547 | |
d99831ff | 548 | return 0; |
1da177e4 LT |
549 | } |
550 | ||
551 | ||
552 | /* | |
553 | * Read in the ondisk dquot using dqtobp() then copy it to an incore version, | |
554 | * and release the buffer immediately. | |
555 | * | |
97e7ade5 | 556 | * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. |
1da177e4 | 557 | */ |
7ae44407 | 558 | int |
1da177e4 | 559 | xfs_qm_dqread( |
97e7ade5 CH |
560 | struct xfs_mount *mp, |
561 | xfs_dqid_t id, | |
562 | uint type, | |
563 | uint flags, | |
564 | struct xfs_dquot **O_dqpp) | |
1da177e4 | 565 | { |
97e7ade5 CH |
566 | struct xfs_dquot *dqp; |
567 | struct xfs_disk_dquot *ddqp; | |
568 | struct xfs_buf *bp; | |
569 | struct xfs_trans *tp = NULL; | |
570 | int error; | |
92b2e5b3 | 571 | |
a05931ce | 572 | dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); |
92b2e5b3 CH |
573 | |
574 | dqp->dq_flags = type; | |
575 | dqp->q_core.d_id = cpu_to_be32(id); | |
576 | dqp->q_mount = mp; | |
f8739c3c | 577 | INIT_LIST_HEAD(&dqp->q_lru); |
92b2e5b3 CH |
578 | mutex_init(&dqp->q_qlock); |
579 | init_waitqueue_head(&dqp->q_pinwait); | |
580 | ||
581 | /* | |
582 | * Because we want to use a counting completion, complete | |
583 | * the flush completion once to allow a single access to | |
584 | * the flush completion without blocking. | |
585 | */ | |
586 | init_completion(&dqp->q_flush); | |
587 | complete(&dqp->q_flush); | |
588 | ||
589 | /* | |
590 | * Make sure group quotas have a different lock class than user | |
591 | * quotas. | |
592 | */ | |
f112a049 DC |
593 | switch (type) { |
594 | case XFS_DQ_USER: | |
595 | /* uses the default lock class */ | |
596 | break; | |
597 | case XFS_DQ_GROUP: | |
598 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); | |
599 | break; | |
600 | case XFS_DQ_PROJ: | |
601 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); | |
602 | break; | |
603 | default: | |
604 | ASSERT(0); | |
605 | break; | |
606 | } | |
92b2e5b3 | 607 | |
48776fd2 | 608 | XFS_STATS_INC(xs_qm_dquot); |
1da177e4 | 609 | |
0b1b213f CH |
610 | trace_xfs_dqread(dqp); |
611 | ||
97e7ade5 CH |
612 | if (flags & XFS_QMOPT_DQALLOC) { |
613 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); | |
410b11a6 | 614 | error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc, |
3d3c8b52 | 615 | XFS_QM_DQALLOC_SPACE_RES(mp), 0); |
97e7ade5 CH |
616 | if (error) |
617 | goto error1; | |
97e7ade5 CH |
618 | } |
619 | ||
1da177e4 LT |
620 | /* |
621 | * get a pointer to the on-disk dquot and the buffer containing it | |
622 | * dqp already knows its own type (GROUP/USER). | |
623 | */ | |
97e7ade5 CH |
624 | error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); |
625 | if (error) { | |
626 | /* | |
627 | * This can happen if quotas got turned off (ESRCH), | |
628 | * or if the dquot didn't exist on disk and we ask to | |
629 | * allocate (ENOENT). | |
630 | */ | |
631 | trace_xfs_dqread_fail(dqp); | |
97e7ade5 | 632 | goto error1; |
1da177e4 LT |
633 | } |
634 | ||
635 | /* copy everything from disk dquot to the incore dquot */ | |
636 | memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); | |
1da177e4 LT |
637 | xfs_qm_dquot_logitem_init(dqp); |
638 | ||
639 | /* | |
640 | * Reservation counters are defined as reservation plus current usage | |
25985edc | 641 | * to avoid having to add every time. |
1da177e4 | 642 | */ |
1149d96a CH |
643 | dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); |
644 | dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); | |
645 | dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); | |
1da177e4 | 646 | |
b1366451 BF |
647 | /* initialize the dquot speculative prealloc thresholds */ |
648 | xfs_dquot_set_prealloc_limits(dqp); | |
649 | ||
1da177e4 | 650 | /* Mark the buf so that this will stay incore a little longer */ |
38f23232 | 651 | xfs_buf_set_ref(bp, XFS_DQUOT_REF); |
1da177e4 LT |
652 | |
653 | /* | |
654 | * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) | |
655 | * So we need to release with xfs_trans_brelse(). | |
656 | * The strategy here is identical to that of inodes; we lock | |
657 | * the dquot in xfs_qm_dqget() before making it accessible to | |
658 | * others. This is because dquots, like inodes, need a good level of | |
659 | * concurrency, and we don't want to take locks on the entire buffers | |
660 | * for dquot accesses. | |
661 | * Note also that the dquot buffer may even be dirty at this point, if | |
662 | * this particular dquot was repaired. We still aren't afraid to | |
663 | * brelse it because we have the changes incore. | |
664 | */ | |
0c842ad4 | 665 | ASSERT(xfs_buf_islocked(bp)); |
1da177e4 LT |
666 | xfs_trans_brelse(tp, bp); |
667 | ||
1da177e4 | 668 | if (tp) { |
70393313 | 669 | error = xfs_trans_commit(tp); |
97e7ade5 CH |
670 | if (error) |
671 | goto error0; | |
1da177e4 LT |
672 | } |
673 | ||
674 | *O_dqpp = dqp; | |
97e7ade5 | 675 | return error; |
1da177e4 | 676 | |
97e7ade5 | 677 | error1: |
1da177e4 | 678 | if (tp) |
4906e215 | 679 | xfs_trans_cancel(tp); |
97e7ade5 | 680 | error0: |
1da177e4 LT |
681 | xfs_qm_dqdestroy(dqp); |
682 | *O_dqpp = NULL; | |
97e7ade5 | 683 | return error; |
1da177e4 LT |
684 | } |
685 | ||
1da177e4 LT |
686 | /* |
687 | * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a | |
688 | * a locked dquot, doing an allocation (if requested) as needed. | |
689 | * When both an inode and an id are given, the inode's id takes precedence. | |
690 | * That is, if the id changes while we don't hold the ilock inside this | |
691 | * function, the new dquot is returned, not necessarily the one requested | |
692 | * in the id argument. | |
693 | */ | |
694 | int | |
695 | xfs_qm_dqget( | |
696 | xfs_mount_t *mp, | |
697 | xfs_inode_t *ip, /* locked inode (optional) */ | |
c8ad20ff NS |
698 | xfs_dqid_t id, /* uid/projid/gid depending on type */ |
699 | uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ | |
1da177e4 LT |
700 | uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ |
701 | xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ | |
702 | { | |
9f920f11 | 703 | struct xfs_quotainfo *qi = mp->m_quotainfo; |
329e0875 | 704 | struct radix_tree_root *tree = xfs_dquot_tree(qi, type); |
9f920f11 CH |
705 | struct xfs_dquot *dqp; |
706 | int error; | |
1da177e4 LT |
707 | |
708 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | |
709 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || | |
c8ad20ff | 710 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || |
1da177e4 | 711 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { |
2451337d | 712 | return -ESRCH; |
1da177e4 | 713 | } |
1da177e4 LT |
714 | |
715 | #ifdef DEBUG | |
716 | if (xfs_do_dqerror) { | |
717 | if ((xfs_dqerror_target == mp->m_ddev_targp) && | |
718 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { | |
0b932ccc | 719 | xfs_debug(mp, "Returning error in dqget"); |
2451337d | 720 | return -EIO; |
1da177e4 LT |
721 | } |
722 | } | |
1da177e4 | 723 | |
c8ad20ff NS |
724 | ASSERT(type == XFS_DQ_USER || |
725 | type == XFS_DQ_PROJ || | |
726 | type == XFS_DQ_GROUP); | |
1da177e4 | 727 | if (ip) { |
579aa9ca | 728 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
36731410 | 729 | ASSERT(xfs_inode_dquot(ip, type) == NULL); |
1da177e4 LT |
730 | } |
731 | #endif | |
92678554 CH |
732 | |
733 | restart: | |
9f920f11 CH |
734 | mutex_lock(&qi->qi_tree_lock); |
735 | dqp = radix_tree_lookup(tree, id); | |
736 | if (dqp) { | |
737 | xfs_dqlock(dqp); | |
738 | if (dqp->dq_flags & XFS_DQ_FREEING) { | |
739 | xfs_dqunlock(dqp); | |
740 | mutex_unlock(&qi->qi_tree_lock); | |
741 | trace_xfs_dqget_freeing(dqp); | |
742 | delay(1); | |
743 | goto restart; | |
744 | } | |
1da177e4 | 745 | |
9f920f11 CH |
746 | dqp->q_nrefs++; |
747 | mutex_unlock(&qi->qi_tree_lock); | |
748 | ||
749 | trace_xfs_dqget_hit(dqp); | |
48776fd2 | 750 | XFS_STATS_INC(xs_qm_dqcachehits); |
9f920f11 CH |
751 | *O_dqpp = dqp; |
752 | return 0; | |
1da177e4 | 753 | } |
9f920f11 CH |
754 | mutex_unlock(&qi->qi_tree_lock); |
755 | XFS_STATS_INC(xs_qm_dqcachemisses); | |
1da177e4 LT |
756 | |
757 | /* | |
758 | * Dquot cache miss. We don't want to keep the inode lock across | |
759 | * a (potential) disk read. Also we don't want to deal with the lock | |
760 | * ordering between quotainode and this inode. OTOH, dropping the inode | |
761 | * lock here means dealing with a chown that can happen before | |
762 | * we re-acquire the lock. | |
763 | */ | |
764 | if (ip) | |
765 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1da177e4 | 766 | |
97e7ade5 | 767 | error = xfs_qm_dqread(mp, id, type, flags, &dqp); |
1da177e4 | 768 | |
7ae44407 CH |
769 | if (ip) |
770 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
771 | ||
772 | if (error) | |
773 | return error; | |
1da177e4 | 774 | |
1da177e4 | 775 | if (ip) { |
1da177e4 LT |
776 | /* |
777 | * A dquot could be attached to this inode by now, since | |
778 | * we had dropped the ilock. | |
779 | */ | |
36731410 | 780 | if (xfs_this_quota_on(mp, type)) { |
9f920f11 CH |
781 | struct xfs_dquot *dqp1; |
782 | ||
36731410 CS |
783 | dqp1 = xfs_inode_dquot(ip, type); |
784 | if (dqp1) { | |
1da177e4 | 785 | xfs_qm_dqdestroy(dqp); |
36731410 | 786 | dqp = dqp1; |
1da177e4 LT |
787 | xfs_dqlock(dqp); |
788 | goto dqret; | |
789 | } | |
790 | } else { | |
36731410 CS |
791 | /* inode stays locked on return */ |
792 | xfs_qm_dqdestroy(dqp); | |
2451337d | 793 | return -ESRCH; |
1da177e4 LT |
794 | } |
795 | } | |
796 | ||
9f920f11 | 797 | mutex_lock(&qi->qi_tree_lock); |
2451337d | 798 | error = radix_tree_insert(tree, id, dqp); |
9f920f11 | 799 | if (unlikely(error)) { |
2451337d | 800 | WARN_ON(error != -EEXIST); |
9f920f11 | 801 | |
1da177e4 | 802 | /* |
9f920f11 CH |
803 | * Duplicate found. Just throw away the new dquot and start |
804 | * over. | |
1da177e4 | 805 | */ |
9f920f11 CH |
806 | mutex_unlock(&qi->qi_tree_lock); |
807 | trace_xfs_dqget_dup(dqp); | |
808 | xfs_qm_dqdestroy(dqp); | |
809 | XFS_STATS_INC(xs_qm_dquot_dups); | |
810 | goto restart; | |
1da177e4 LT |
811 | } |
812 | ||
1da177e4 LT |
813 | /* |
814 | * We return a locked dquot to the caller, with a reference taken | |
815 | */ | |
816 | xfs_dqlock(dqp); | |
817 | dqp->q_nrefs = 1; | |
818 | ||
9f920f11 | 819 | qi->qi_dquots++; |
9f920f11 CH |
820 | mutex_unlock(&qi->qi_tree_lock); |
821 | ||
1da177e4 | 822 | dqret: |
579aa9ca | 823 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
0b1b213f | 824 | trace_xfs_dqget_miss(dqp); |
1da177e4 | 825 | *O_dqpp = dqp; |
d99831ff | 826 | return 0; |
1da177e4 LT |
827 | } |
828 | ||
f8739c3c CH |
829 | /* |
830 | * Release a reference to the dquot (decrement ref-count) and unlock it. | |
831 | * | |
832 | * If there is a group quota attached to this dquot, carefully release that | |
833 | * too without tripping over deadlocks'n'stuff. | |
834 | */ | |
835 | void | |
836 | xfs_qm_dqput( | |
837 | struct xfs_dquot *dqp) | |
838 | { | |
839 | ASSERT(dqp->q_nrefs > 0); | |
840 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | |
841 | ||
842 | trace_xfs_dqput(dqp); | |
843 | ||
3c353375 DC |
844 | if (--dqp->q_nrefs == 0) { |
845 | struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; | |
846 | trace_xfs_dqput_free(dqp); | |
847 | ||
848 | if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) | |
849 | XFS_STATS_INC(xs_qm_dquot_unused); | |
850 | } | |
851 | xfs_dqunlock(dqp); | |
1da177e4 LT |
852 | } |
853 | ||
854 | /* | |
855 | * Release a dquot. Flush it if dirty, then dqput() it. | |
856 | * dquot must not be locked. | |
857 | */ | |
858 | void | |
859 | xfs_qm_dqrele( | |
860 | xfs_dquot_t *dqp) | |
861 | { | |
7d095257 CH |
862 | if (!dqp) |
863 | return; | |
864 | ||
0b1b213f | 865 | trace_xfs_dqrele(dqp); |
1da177e4 LT |
866 | |
867 | xfs_dqlock(dqp); | |
868 | /* | |
869 | * We don't care to flush it if the dquot is dirty here. | |
870 | * That will create stutters that we want to avoid. | |
871 | * Instead we do a delayed write when we try to reclaim | |
872 | * a dirty dquot. Also xfs_sync will take part of the burden... | |
873 | */ | |
874 | xfs_qm_dqput(dqp); | |
875 | } | |
876 | ||
ca30b2a7 CH |
877 | /* |
878 | * This is the dquot flushing I/O completion routine. It is called | |
879 | * from interrupt level when the buffer containing the dquot is | |
880 | * flushed to disk. It is responsible for removing the dquot logitem | |
881 | * from the AIL if it has not been re-logged, and unlocking the dquot's | |
882 | * flush lock. This behavior is very similar to that of inodes.. | |
883 | */ | |
884 | STATIC void | |
885 | xfs_qm_dqflush_done( | |
886 | struct xfs_buf *bp, | |
887 | struct xfs_log_item *lip) | |
888 | { | |
889 | xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; | |
890 | xfs_dquot_t *dqp = qip->qli_dquot; | |
891 | struct xfs_ail *ailp = lip->li_ailp; | |
892 | ||
893 | /* | |
894 | * We only want to pull the item from the AIL if its | |
895 | * location in the log has not changed since we started the flush. | |
896 | * Thus, we only bother if the dquot's lsn has | |
897 | * not changed. First we check the lsn outside the lock | |
898 | * since it's cheaper, and then we recheck while | |
899 | * holding the lock before removing the dquot from the AIL. | |
900 | */ | |
901 | if ((lip->li_flags & XFS_LI_IN_AIL) && | |
902 | lip->li_lsn == qip->qli_flush_lsn) { | |
903 | ||
904 | /* xfs_trans_ail_delete() drops the AIL lock. */ | |
905 | spin_lock(&ailp->xa_lock); | |
906 | if (lip->li_lsn == qip->qli_flush_lsn) | |
04913fdd | 907 | xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE); |
ca30b2a7 CH |
908 | else |
909 | spin_unlock(&ailp->xa_lock); | |
910 | } | |
911 | ||
912 | /* | |
913 | * Release the dq's flush lock since we're done with it. | |
914 | */ | |
915 | xfs_dqfunlock(dqp); | |
916 | } | |
1da177e4 LT |
917 | |
918 | /* | |
919 | * Write a modified dquot to disk. | |
920 | * The dquot must be locked and the flush lock too taken by caller. | |
921 | * The flush lock will not be unlocked until the dquot reaches the disk, | |
922 | * but the dquot is free to be unlocked and modified by the caller | |
923 | * in the interim. Dquot is still locked on return. This behavior is | |
924 | * identical to that of inodes. | |
925 | */ | |
926 | int | |
927 | xfs_qm_dqflush( | |
fe7257fd CH |
928 | struct xfs_dquot *dqp, |
929 | struct xfs_buf **bpp) | |
1da177e4 | 930 | { |
acecf1b5 CH |
931 | struct xfs_mount *mp = dqp->q_mount; |
932 | struct xfs_buf *bp; | |
933 | struct xfs_disk_dquot *ddqp; | |
1da177e4 | 934 | int error; |
1da177e4 LT |
935 | |
936 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | |
e1f49cf2 | 937 | ASSERT(!completion_done(&dqp->q_flush)); |
acecf1b5 | 938 | |
0b1b213f | 939 | trace_xfs_dqflush(dqp); |
1da177e4 | 940 | |
fe7257fd CH |
941 | *bpp = NULL; |
942 | ||
1da177e4 LT |
943 | xfs_qm_dqunpin_wait(dqp); |
944 | ||
945 | /* | |
946 | * This may have been unpinned because the filesystem is shutting | |
947 | * down forcibly. If that's the case we must not write this dquot | |
dea96095 CH |
948 | * to disk, because the log record didn't make it to disk. |
949 | * | |
950 | * We also have to remove the log item from the AIL in this case, | |
951 | * as we wait for an emptry AIL as part of the unmount process. | |
1da177e4 | 952 | */ |
acecf1b5 | 953 | if (XFS_FORCED_SHUTDOWN(mp)) { |
dea96095 | 954 | struct xfs_log_item *lip = &dqp->q_logitem.qli_item; |
acecf1b5 | 955 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
dea96095 CH |
956 | |
957 | spin_lock(&mp->m_ail->xa_lock); | |
958 | if (lip->li_flags & XFS_LI_IN_AIL) | |
04913fdd DC |
959 | xfs_trans_ail_delete(mp->m_ail, lip, |
960 | SHUTDOWN_CORRUPT_INCORE); | |
dea96095 CH |
961 | else |
962 | spin_unlock(&mp->m_ail->xa_lock); | |
2451337d | 963 | error = -EIO; |
fe7257fd | 964 | goto out_unlock; |
1da177e4 LT |
965 | } |
966 | ||
967 | /* | |
968 | * Get the buffer containing the on-disk dquot | |
1da177e4 | 969 | */ |
acecf1b5 | 970 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
5fd364fe DC |
971 | mp->m_quotainfo->qi_dqchunklen, 0, &bp, |
972 | &xfs_dquot_buf_ops); | |
fe7257fd CH |
973 | if (error) |
974 | goto out_unlock; | |
1da177e4 | 975 | |
acecf1b5 CH |
976 | /* |
977 | * Calculate the location of the dquot inside the buffer. | |
978 | */ | |
62926044 | 979 | ddqp = bp->b_addr + dqp->q_bufoffset; |
acecf1b5 CH |
980 | |
981 | /* | |
982 | * A simple sanity check in case we got a corrupted dquot.. | |
983 | */ | |
9aede1d8 | 984 | error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, |
a0fa2b67 DC |
985 | XFS_QMOPT_DOWARN, "dqflush (incore copy)"); |
986 | if (error) { | |
acecf1b5 CH |
987 | xfs_buf_relse(bp); |
988 | xfs_dqfunlock(dqp); | |
989 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | |
2451337d | 990 | return -EIO; |
1da177e4 LT |
991 | } |
992 | ||
993 | /* This is the only portion of data that needs to persist */ | |
acecf1b5 | 994 | memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); |
1da177e4 LT |
995 | |
996 | /* | |
997 | * Clear the dirty field and remember the flush lsn for later use. | |
998 | */ | |
acecf1b5 | 999 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
1da177e4 | 1000 | |
7b2e2a31 DC |
1001 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, |
1002 | &dqp->q_logitem.qli_item.li_lsn); | |
1da177e4 | 1003 | |
3fe58f30 CH |
1004 | /* |
1005 | * copy the lsn into the on-disk dquot now while we have the in memory | |
1006 | * dquot here. This can't be done later in the write verifier as we | |
1007 | * can't get access to the log item at that point in time. | |
6fcdc59d DC |
1008 | * |
1009 | * We also calculate the CRC here so that the on-disk dquot in the | |
1010 | * buffer always has a valid CRC. This ensures there is no possibility | |
1011 | * of a dquot without an up-to-date CRC getting to disk. | |
3fe58f30 CH |
1012 | */ |
1013 | if (xfs_sb_version_hascrc(&mp->m_sb)) { | |
1014 | struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp; | |
1015 | ||
1016 | dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); | |
6fcdc59d DC |
1017 | xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), |
1018 | XFS_DQUOT_CRC_OFF); | |
3fe58f30 CH |
1019 | } |
1020 | ||
1da177e4 LT |
1021 | /* |
1022 | * Attach an iodone routine so that we can remove this dquot from the | |
1023 | * AIL and release the flush lock once the dquot is synced to disk. | |
1024 | */ | |
ca30b2a7 CH |
1025 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, |
1026 | &dqp->q_logitem.qli_item); | |
1027 | ||
1da177e4 LT |
1028 | /* |
1029 | * If the buffer is pinned then push on the log so we won't | |
1030 | * get stuck waiting in the write for too long. | |
1031 | */ | |
811e64c7 | 1032 | if (xfs_buf_ispinned(bp)) { |
0b1b213f | 1033 | trace_xfs_dqflush_force(dqp); |
a14a348b | 1034 | xfs_log_force(mp, 0); |
1da177e4 LT |
1035 | } |
1036 | ||
0b1b213f | 1037 | trace_xfs_dqflush_done(dqp); |
fe7257fd CH |
1038 | *bpp = bp; |
1039 | return 0; | |
0b1b213f | 1040 | |
fe7257fd CH |
1041 | out_unlock: |
1042 | xfs_dqfunlock(dqp); | |
2451337d | 1043 | return -EIO; |
1da177e4 LT |
1044 | } |
1045 | ||
5bb87a33 CH |
1046 | /* |
1047 | * Lock two xfs_dquot structures. | |
1048 | * | |
1049 | * To avoid deadlocks we always lock the quota structure with | |
1050 | * the lowerd id first. | |
1051 | */ | |
1da177e4 LT |
1052 | void |
1053 | xfs_dqlock2( | |
1054 | xfs_dquot_t *d1, | |
1055 | xfs_dquot_t *d2) | |
1056 | { | |
1057 | if (d1 && d2) { | |
1058 | ASSERT(d1 != d2); | |
1149d96a CH |
1059 | if (be32_to_cpu(d1->q_core.d_id) > |
1060 | be32_to_cpu(d2->q_core.d_id)) { | |
5bb87a33 CH |
1061 | mutex_lock(&d2->q_qlock); |
1062 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); | |
1da177e4 | 1063 | } else { |
5bb87a33 CH |
1064 | mutex_lock(&d1->q_qlock); |
1065 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); | |
1da177e4 | 1066 | } |
5bb87a33 CH |
1067 | } else if (d1) { |
1068 | mutex_lock(&d1->q_qlock); | |
1069 | } else if (d2) { | |
1070 | mutex_lock(&d2->q_qlock); | |
1da177e4 LT |
1071 | } |
1072 | } | |
1073 | ||
a05931ce CH |
1074 | int __init |
1075 | xfs_qm_init(void) | |
1076 | { | |
1077 | xfs_qm_dqzone = | |
1078 | kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot"); | |
1079 | if (!xfs_qm_dqzone) | |
1080 | goto out; | |
1081 | ||
1082 | xfs_qm_dqtrxzone = | |
1083 | kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx"); | |
1084 | if (!xfs_qm_dqtrxzone) | |
1085 | goto out_free_dqzone; | |
1086 | ||
1087 | return 0; | |
1088 | ||
1089 | out_free_dqzone: | |
1090 | kmem_zone_destroy(xfs_qm_dqzone); | |
1091 | out: | |
1092 | return -ENOMEM; | |
1093 | } | |
1094 | ||
1c2ccc66 | 1095 | void |
a05931ce CH |
1096 | xfs_qm_exit(void) |
1097 | { | |
1098 | kmem_zone_destroy(xfs_qm_dqtrxzone); | |
1099 | kmem_zone_destroy(xfs_qm_dqzone); | |
1100 | } |