]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
bed92eae AJ |
2 | /* |
3 | * Copyright (C) 2011 STRATO. All rights reserved. | |
bed92eae AJ |
4 | */ |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/writeback.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/rbtree.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/workqueue.h> | |
55e301fd | 13 | #include <linux/btrfs.h> |
7aa6d359 | 14 | #include <linux/sched/mm.h> |
bed92eae AJ |
15 | |
16 | #include "ctree.h" | |
17 | #include "transaction.h" | |
18 | #include "disk-io.h" | |
19 | #include "locking.h" | |
20 | #include "ulist.h" | |
bed92eae | 21 | #include "backref.h" |
2f232036 | 22 | #include "extent_io.h" |
fcebe456 | 23 | #include "qgroup.h" |
aac0023c | 24 | #include "block-group.h" |
49e5fb46 | 25 | #include "sysfs.h" |
f3a84ccd | 26 | #include "tree-mod-log.h" |
c7f13d42 | 27 | #include "fs.h" |
07e81dc9 | 28 | #include "accessors.h" |
a0231804 | 29 | #include "extent-tree.h" |
45c40c8f | 30 | #include "root-tree.h" |
27137fac | 31 | #include "tree-checker.h" |
e69bcee3 | 32 | |
6b0cd63b BB |
33 | enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info) |
34 | { | |
35 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) | |
36 | return BTRFS_QGROUP_MODE_DISABLED; | |
182940f4 BB |
37 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) |
38 | return BTRFS_QGROUP_MODE_SIMPLE; | |
6b0cd63b BB |
39 | return BTRFS_QGROUP_MODE_FULL; |
40 | } | |
41 | ||
182940f4 BB |
42 | bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info) |
43 | { | |
44 | return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED; | |
45 | } | |
46 | ||
47 | bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info) | |
48 | { | |
49 | return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL; | |
50 | } | |
51 | ||
f59c0347 QW |
52 | /* |
53 | * Helpers to access qgroup reservation | |
54 | * | |
55 | * Callers should ensure the lock context and type are valid | |
56 | */ | |
57 | ||
58 | static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) | |
59 | { | |
60 | u64 ret = 0; | |
61 | int i; | |
62 | ||
63 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) | |
64 | ret += qgroup->rsv.values[i]; | |
65 | ||
66 | return ret; | |
67 | } | |
68 | ||
69 | #ifdef CONFIG_BTRFS_DEBUG | |
70 | static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) | |
71 | { | |
72 | if (type == BTRFS_QGROUP_RSV_DATA) | |
73 | return "data"; | |
733e03a0 QW |
74 | if (type == BTRFS_QGROUP_RSV_META_PERTRANS) |
75 | return "meta_pertrans"; | |
76 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) | |
77 | return "meta_prealloc"; | |
f59c0347 QW |
78 | return NULL; |
79 | } | |
80 | #endif | |
81 | ||
64ee4e75 QW |
82 | static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, |
83 | struct btrfs_qgroup *qgroup, u64 num_bytes, | |
f59c0347 QW |
84 | enum btrfs_qgroup_rsv_type type) |
85 | { | |
64ee4e75 | 86 | trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); |
f59c0347 QW |
87 | qgroup->rsv.values[type] += num_bytes; |
88 | } | |
89 | ||
64ee4e75 QW |
90 | static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, |
91 | struct btrfs_qgroup *qgroup, u64 num_bytes, | |
f59c0347 QW |
92 | enum btrfs_qgroup_rsv_type type) |
93 | { | |
64ee4e75 | 94 | trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); |
f59c0347 QW |
95 | if (qgroup->rsv.values[type] >= num_bytes) { |
96 | qgroup->rsv.values[type] -= num_bytes; | |
97 | return; | |
98 | } | |
99 | #ifdef CONFIG_BTRFS_DEBUG | |
100 | WARN_RATELIMIT(1, | |
101 | "qgroup %llu %s reserved space underflow, have %llu to free %llu", | |
102 | qgroup->qgroupid, qgroup_rsv_type_str(type), | |
103 | qgroup->rsv.values[type], num_bytes); | |
104 | #endif | |
105 | qgroup->rsv.values[type] = 0; | |
106 | } | |
107 | ||
64ee4e75 QW |
108 | static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, |
109 | struct btrfs_qgroup *dest, | |
110 | struct btrfs_qgroup *src) | |
f59c0347 QW |
111 | { |
112 | int i; | |
113 | ||
114 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) | |
64ee4e75 | 115 | qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); |
f59c0347 QW |
116 | } |
117 | ||
64ee4e75 QW |
118 | static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, |
119 | struct btrfs_qgroup *dest, | |
f59c0347 QW |
120 | struct btrfs_qgroup *src) |
121 | { | |
122 | int i; | |
123 | ||
124 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) | |
64ee4e75 | 125 | qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); |
f59c0347 QW |
126 | } |
127 | ||
9c542136 QW |
128 | static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, |
129 | int mod) | |
130 | { | |
131 | if (qg->old_refcnt < seq) | |
132 | qg->old_refcnt = seq; | |
133 | qg->old_refcnt += mod; | |
134 | } | |
135 | ||
136 | static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, | |
137 | int mod) | |
138 | { | |
139 | if (qg->new_refcnt < seq) | |
140 | qg->new_refcnt = seq; | |
141 | qg->new_refcnt += mod; | |
142 | } | |
143 | ||
144 | static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) | |
145 | { | |
146 | if (qg->old_refcnt < seq) | |
147 | return 0; | |
148 | return qg->old_refcnt - seq; | |
149 | } | |
150 | ||
151 | static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) | |
152 | { | |
153 | if (qg->new_refcnt < seq) | |
154 | return 0; | |
155 | return qg->new_refcnt - seq; | |
156 | } | |
157 | ||
bed92eae AJ |
158 | /* |
159 | * glue structure to represent the relations between qgroups. | |
160 | */ | |
161 | struct btrfs_qgroup_list { | |
162 | struct list_head next_group; | |
163 | struct list_head next_member; | |
164 | struct btrfs_qgroup *group; | |
165 | struct btrfs_qgroup *member; | |
166 | }; | |
167 | ||
b382a324 JS |
168 | static int |
169 | qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, | |
170 | int init_flags); | |
171 | static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); | |
2f232036 | 172 | |
58400fce | 173 | /* must be called with qgroup_ioctl_lock held */ |
bed92eae AJ |
174 | static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, |
175 | u64 qgroupid) | |
176 | { | |
177 | struct rb_node *n = fs_info->qgroup_tree.rb_node; | |
178 | struct btrfs_qgroup *qgroup; | |
179 | ||
180 | while (n) { | |
181 | qgroup = rb_entry(n, struct btrfs_qgroup, node); | |
182 | if (qgroup->qgroupid < qgroupid) | |
183 | n = n->rb_left; | |
184 | else if (qgroup->qgroupid > qgroupid) | |
185 | n = n->rb_right; | |
186 | else | |
187 | return qgroup; | |
188 | } | |
189 | return NULL; | |
190 | } | |
191 | ||
8d54518b QW |
192 | /* |
193 | * Add qgroup to the filesystem's qgroup tree. | |
194 | * | |
195 | * Must be called with qgroup_lock held and @prealloc preallocated. | |
196 | * | |
eefaf0a1 | 197 | * The control on the lifespan of @prealloc would be transferred to this |
8d54518b QW |
198 | * function, thus caller should no longer touch @prealloc. |
199 | */ | |
bed92eae | 200 | static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, |
8d54518b | 201 | struct btrfs_qgroup *prealloc, |
bed92eae AJ |
202 | u64 qgroupid) |
203 | { | |
204 | struct rb_node **p = &fs_info->qgroup_tree.rb_node; | |
205 | struct rb_node *parent = NULL; | |
206 | struct btrfs_qgroup *qgroup; | |
207 | ||
8d54518b QW |
208 | /* Caller must have pre-allocated @prealloc. */ |
209 | ASSERT(prealloc); | |
210 | ||
bed92eae AJ |
211 | while (*p) { |
212 | parent = *p; | |
213 | qgroup = rb_entry(parent, struct btrfs_qgroup, node); | |
214 | ||
8d54518b | 215 | if (qgroup->qgroupid < qgroupid) { |
bed92eae | 216 | p = &(*p)->rb_left; |
8d54518b | 217 | } else if (qgroup->qgroupid > qgroupid) { |
bed92eae | 218 | p = &(*p)->rb_right; |
8d54518b QW |
219 | } else { |
220 | kfree(prealloc); | |
bed92eae | 221 | return qgroup; |
8d54518b | 222 | } |
bed92eae AJ |
223 | } |
224 | ||
8d54518b | 225 | qgroup = prealloc; |
bed92eae AJ |
226 | qgroup->qgroupid = qgroupid; |
227 | INIT_LIST_HEAD(&qgroup->groups); | |
228 | INIT_LIST_HEAD(&qgroup->members); | |
229 | INIT_LIST_HEAD(&qgroup->dirty); | |
686c4a5a | 230 | INIT_LIST_HEAD(&qgroup->iterator); |
dce28769 | 231 | INIT_LIST_HEAD(&qgroup->nested_iterator); |
bed92eae AJ |
232 | |
233 | rb_link_node(&qgroup->node, parent, p); | |
234 | rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); | |
235 | ||
236 | return qgroup; | |
237 | } | |
238 | ||
49e5fb46 QW |
239 | static void __del_qgroup_rb(struct btrfs_fs_info *fs_info, |
240 | struct btrfs_qgroup *qgroup) | |
bed92eae | 241 | { |
bed92eae AJ |
242 | struct btrfs_qgroup_list *list; |
243 | ||
bed92eae | 244 | list_del(&qgroup->dirty); |
bed92eae AJ |
245 | while (!list_empty(&qgroup->groups)) { |
246 | list = list_first_entry(&qgroup->groups, | |
247 | struct btrfs_qgroup_list, next_group); | |
248 | list_del(&list->next_group); | |
249 | list_del(&list->next_member); | |
250 | kfree(list); | |
251 | } | |
252 | ||
253 | while (!list_empty(&qgroup->members)) { | |
254 | list = list_first_entry(&qgroup->members, | |
255 | struct btrfs_qgroup_list, next_member); | |
256 | list_del(&list->next_group); | |
257 | list_del(&list->next_member); | |
258 | kfree(list); | |
259 | } | |
4082bd3d | 260 | } |
bed92eae | 261 | |
4082bd3d WS |
262 | /* must be called with qgroup_lock held */ |
263 | static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) | |
264 | { | |
265 | struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); | |
266 | ||
267 | if (!qgroup) | |
268 | return -ENOENT; | |
269 | ||
270 | rb_erase(&qgroup->node, &fs_info->qgroup_tree); | |
49e5fb46 | 271 | __del_qgroup_rb(fs_info, qgroup); |
bed92eae AJ |
272 | return 0; |
273 | } | |
274 | ||
a8f6f619 SY |
275 | /* |
276 | * Add relation specified by two qgroups. | |
277 | * | |
79ace7b8 QW |
278 | * Must be called with qgroup_lock held, the ownership of @prealloc is |
279 | * transferred to this function and caller should not touch it anymore. | |
a8f6f619 SY |
280 | * |
281 | * Return: 0 on success | |
282 | * -ENOENT if one of the qgroups is NULL | |
283 | * <0 other errors | |
284 | */ | |
79ace7b8 QW |
285 | static int __add_relation_rb(struct btrfs_qgroup_list *prealloc, |
286 | struct btrfs_qgroup *member, | |
287 | struct btrfs_qgroup *parent) | |
bed92eae | 288 | { |
79ace7b8 QW |
289 | if (!member || !parent) { |
290 | kfree(prealloc); | |
bed92eae | 291 | return -ENOENT; |
79ace7b8 | 292 | } |
bed92eae | 293 | |
79ace7b8 QW |
294 | prealloc->group = parent; |
295 | prealloc->member = member; | |
296 | list_add_tail(&prealloc->next_group, &member->groups); | |
297 | list_add_tail(&prealloc->next_member, &parent->members); | |
bed92eae AJ |
298 | |
299 | return 0; | |
300 | } | |
301 | ||
a8f6f619 | 302 | /* |
03ad2531 | 303 | * Add relation specified by two qgroup ids. |
a8f6f619 SY |
304 | * |
305 | * Must be called with qgroup_lock held. | |
306 | * | |
307 | * Return: 0 on success | |
308 | * -ENOENT if one of the ids does not exist | |
309 | * <0 other errors | |
310 | */ | |
79ace7b8 QW |
311 | static int add_relation_rb(struct btrfs_fs_info *fs_info, |
312 | struct btrfs_qgroup_list *prealloc, | |
313 | u64 memberid, u64 parentid) | |
a8f6f619 SY |
314 | { |
315 | struct btrfs_qgroup *member; | |
316 | struct btrfs_qgroup *parent; | |
317 | ||
318 | member = find_qgroup_rb(fs_info, memberid); | |
319 | parent = find_qgroup_rb(fs_info, parentid); | |
320 | ||
79ace7b8 | 321 | return __add_relation_rb(prealloc, member, parent); |
a8f6f619 SY |
322 | } |
323 | ||
324 | /* Must be called with qgroup_lock held */ | |
bed92eae AJ |
325 | static int del_relation_rb(struct btrfs_fs_info *fs_info, |
326 | u64 memberid, u64 parentid) | |
327 | { | |
328 | struct btrfs_qgroup *member; | |
329 | struct btrfs_qgroup *parent; | |
330 | struct btrfs_qgroup_list *list; | |
331 | ||
332 | member = find_qgroup_rb(fs_info, memberid); | |
333 | parent = find_qgroup_rb(fs_info, parentid); | |
334 | if (!member || !parent) | |
335 | return -ENOENT; | |
336 | ||
337 | list_for_each_entry(list, &member->groups, next_group) { | |
338 | if (list->group == parent) { | |
339 | list_del(&list->next_group); | |
340 | list_del(&list->next_member); | |
341 | kfree(list); | |
342 | return 0; | |
343 | } | |
344 | } | |
345 | return -ENOENT; | |
346 | } | |
347 | ||
faa2dbf0 JB |
348 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
349 | int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, | |
350 | u64 rfer, u64 excl) | |
351 | { | |
352 | struct btrfs_qgroup *qgroup; | |
353 | ||
354 | qgroup = find_qgroup_rb(fs_info, qgroupid); | |
355 | if (!qgroup) | |
356 | return -EINVAL; | |
357 | if (qgroup->rfer != rfer || qgroup->excl != excl) | |
358 | return -EINVAL; | |
359 | return 0; | |
360 | } | |
361 | #endif | |
362 | ||
e562a8bd QW |
363 | static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info) |
364 | { | |
182940f4 BB |
365 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) |
366 | return; | |
e562a8bd | 367 | fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | |
e15e9f43 QW |
368 | BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | |
369 | BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); | |
e562a8bd QW |
370 | } |
371 | ||
bd7c1ea3 BB |
372 | static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info, |
373 | struct extent_buffer *leaf, int slot, | |
374 | struct btrfs_qgroup_status_item *ptr) | |
375 | { | |
376 | ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); | |
377 | ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr)); | |
378 | fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr); | |
379 | } | |
380 | ||
bed92eae AJ |
381 | /* |
382 | * The full config is read in one go, only called from open_ctree() | |
383 | * It doesn't use any locking, as at this point we're still single-threaded | |
384 | */ | |
385 | int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) | |
386 | { | |
387 | struct btrfs_key key; | |
388 | struct btrfs_key found_key; | |
389 | struct btrfs_root *quota_root = fs_info->quota_root; | |
390 | struct btrfs_path *path = NULL; | |
391 | struct extent_buffer *l; | |
392 | int slot; | |
393 | int ret = 0; | |
394 | u64 flags = 0; | |
b382a324 | 395 | u64 rescan_progress = 0; |
bed92eae | 396 | |
e0761451 | 397 | if (!fs_info->quota_root) |
bed92eae AJ |
398 | return 0; |
399 | ||
323b88f4 | 400 | fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); |
1e8f9158 WS |
401 | if (!fs_info->qgroup_ulist) { |
402 | ret = -ENOMEM; | |
403 | goto out; | |
404 | } | |
405 | ||
bed92eae AJ |
406 | path = btrfs_alloc_path(); |
407 | if (!path) { | |
408 | ret = -ENOMEM; | |
409 | goto out; | |
410 | } | |
411 | ||
49e5fb46 QW |
412 | ret = btrfs_sysfs_add_qgroups(fs_info); |
413 | if (ret < 0) | |
414 | goto out; | |
bed92eae AJ |
415 | /* default this to quota off, in case no status key is found */ |
416 | fs_info->qgroup_flags = 0; | |
417 | ||
418 | /* | |
419 | * pass 1: read status, all qgroup infos and limits | |
420 | */ | |
421 | key.objectid = 0; | |
422 | key.type = 0; | |
423 | key.offset = 0; | |
424 | ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); | |
425 | if (ret) | |
426 | goto out; | |
427 | ||
428 | while (1) { | |
429 | struct btrfs_qgroup *qgroup; | |
430 | ||
431 | slot = path->slots[0]; | |
432 | l = path->nodes[0]; | |
433 | btrfs_item_key_to_cpu(l, &found_key, slot); | |
434 | ||
435 | if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { | |
436 | struct btrfs_qgroup_status_item *ptr; | |
437 | ||
438 | ptr = btrfs_item_ptr(l, slot, | |
439 | struct btrfs_qgroup_status_item); | |
440 | ||
441 | if (btrfs_qgroup_status_version(l, ptr) != | |
442 | BTRFS_QGROUP_STATUS_VERSION) { | |
efe120a0 FH |
443 | btrfs_err(fs_info, |
444 | "old qgroup version, quota disabled"); | |
bed92eae AJ |
445 | goto out; |
446 | } | |
182940f4 | 447 | fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr); |
bd7c1ea3 BB |
448 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) { |
449 | qgroup_read_enable_gen(fs_info, l, slot, ptr); | |
450 | } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) { | |
e562a8bd | 451 | qgroup_mark_inconsistent(fs_info); |
efe120a0 | 452 | btrfs_err(fs_info, |
5d163e0e | 453 | "qgroup generation mismatch, marked as inconsistent"); |
bed92eae | 454 | } |
b382a324 | 455 | rescan_progress = btrfs_qgroup_status_rescan(l, ptr); |
bed92eae AJ |
456 | goto next1; |
457 | } | |
458 | ||
459 | if (found_key.type != BTRFS_QGROUP_INFO_KEY && | |
460 | found_key.type != BTRFS_QGROUP_LIMIT_KEY) | |
461 | goto next1; | |
462 | ||
463 | qgroup = find_qgroup_rb(fs_info, found_key.offset); | |
464 | if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || | |
465 | (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { | |
d41e36a0 | 466 | btrfs_err(fs_info, "inconsistent qgroup config"); |
e562a8bd | 467 | qgroup_mark_inconsistent(fs_info); |
bed92eae AJ |
468 | } |
469 | if (!qgroup) { | |
8d54518b QW |
470 | struct btrfs_qgroup *prealloc; |
471 | ||
472 | prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); | |
473 | if (!prealloc) { | |
474 | ret = -ENOMEM; | |
bed92eae AJ |
475 | goto out; |
476 | } | |
8d54518b | 477 | qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); |
bed92eae | 478 | } |
49e5fb46 QW |
479 | ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); |
480 | if (ret < 0) | |
481 | goto out; | |
482 | ||
bed92eae AJ |
483 | switch (found_key.type) { |
484 | case BTRFS_QGROUP_INFO_KEY: { | |
485 | struct btrfs_qgroup_info_item *ptr; | |
486 | ||
487 | ptr = btrfs_item_ptr(l, slot, | |
488 | struct btrfs_qgroup_info_item); | |
489 | qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); | |
490 | qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); | |
491 | qgroup->excl = btrfs_qgroup_info_excl(l, ptr); | |
492 | qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); | |
493 | /* generation currently unused */ | |
494 | break; | |
495 | } | |
496 | case BTRFS_QGROUP_LIMIT_KEY: { | |
497 | struct btrfs_qgroup_limit_item *ptr; | |
498 | ||
499 | ptr = btrfs_item_ptr(l, slot, | |
500 | struct btrfs_qgroup_limit_item); | |
501 | qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); | |
502 | qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); | |
503 | qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); | |
504 | qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); | |
505 | qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); | |
506 | break; | |
507 | } | |
508 | } | |
509 | next1: | |
510 | ret = btrfs_next_item(quota_root, path); | |
511 | if (ret < 0) | |
512 | goto out; | |
513 | if (ret) | |
514 | break; | |
515 | } | |
516 | btrfs_release_path(path); | |
517 | ||
518 | /* | |
519 | * pass 2: read all qgroup relations | |
520 | */ | |
521 | key.objectid = 0; | |
522 | key.type = BTRFS_QGROUP_RELATION_KEY; | |
523 | key.offset = 0; | |
524 | ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); | |
525 | if (ret) | |
526 | goto out; | |
527 | while (1) { | |
79ace7b8 QW |
528 | struct btrfs_qgroup_list *list = NULL; |
529 | ||
bed92eae AJ |
530 | slot = path->slots[0]; |
531 | l = path->nodes[0]; | |
532 | btrfs_item_key_to_cpu(l, &found_key, slot); | |
533 | ||
534 | if (found_key.type != BTRFS_QGROUP_RELATION_KEY) | |
535 | goto next2; | |
536 | ||
537 | if (found_key.objectid > found_key.offset) { | |
538 | /* parent <- member, not needed to build config */ | |
539 | /* FIXME should we omit the key completely? */ | |
540 | goto next2; | |
541 | } | |
542 | ||
79ace7b8 QW |
543 | list = kzalloc(sizeof(*list), GFP_KERNEL); |
544 | if (!list) { | |
545 | ret = -ENOMEM; | |
546 | goto out; | |
547 | } | |
548 | ret = add_relation_rb(fs_info, list, found_key.objectid, | |
bed92eae | 549 | found_key.offset); |
79ace7b8 | 550 | list = NULL; |
ff24858c | 551 | if (ret == -ENOENT) { |
efe120a0 FH |
552 | btrfs_warn(fs_info, |
553 | "orphan qgroup relation 0x%llx->0x%llx", | |
c1c9ff7c | 554 | found_key.objectid, found_key.offset); |
ff24858c AJ |
555 | ret = 0; /* ignore the error */ |
556 | } | |
bed92eae AJ |
557 | if (ret) |
558 | goto out; | |
559 | next2: | |
560 | ret = btrfs_next_item(quota_root, path); | |
561 | if (ret < 0) | |
562 | goto out; | |
563 | if (ret) | |
564 | break; | |
565 | } | |
566 | out: | |
3d05cad3 | 567 | btrfs_free_path(path); |
bed92eae | 568 | fs_info->qgroup_flags |= flags; |
e0761451 BB |
569 | if (ret >= 0) { |
570 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON) | |
571 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | |
572 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) | |
573 | ret = qgroup_rescan_init(fs_info, rescan_progress, 0); | |
574 | } else { | |
1e8f9158 | 575 | ulist_free(fs_info->qgroup_ulist); |
eb1716af | 576 | fs_info->qgroup_ulist = NULL; |
b382a324 | 577 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
49e5fb46 | 578 | btrfs_sysfs_del_qgroups(fs_info); |
eb1716af | 579 | } |
1e8f9158 | 580 | |
bed92eae AJ |
581 | return ret < 0 ? ret : 0; |
582 | } | |
583 | ||
5958253c QW |
584 | /* |
585 | * Called in close_ctree() when quota is still enabled. This verifies we don't | |
586 | * leak some reserved space. | |
587 | * | |
588 | * Return false if no reserved space is left. | |
589 | * Return true if some reserved space is leaked. | |
590 | */ | |
591 | bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) | |
592 | { | |
593 | struct rb_node *node; | |
594 | bool ret = false; | |
595 | ||
182940f4 | 596 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) |
5958253c QW |
597 | return ret; |
598 | /* | |
599 | * Since we're unmounting, there is no race and no need to grab qgroup | |
600 | * lock. And here we don't go post-order to provide a more user | |
601 | * friendly sorted result. | |
602 | */ | |
603 | for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { | |
604 | struct btrfs_qgroup *qgroup; | |
605 | int i; | |
606 | ||
607 | qgroup = rb_entry(node, struct btrfs_qgroup, node); | |
608 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { | |
609 | if (qgroup->rsv.values[i]) { | |
610 | ret = true; | |
611 | btrfs_warn(fs_info, | |
06f67c47 | 612 | "qgroup %hu/%llu has unreleased space, type %d rsv %llu", |
5958253c QW |
613 | btrfs_qgroup_level(qgroup->qgroupid), |
614 | btrfs_qgroup_subvolid(qgroup->qgroupid), | |
615 | i, qgroup->rsv.values[i]); | |
616 | } | |
617 | } | |
618 | } | |
619 | return ret; | |
620 | } | |
621 | ||
bed92eae | 622 | /* |
e685da14 WS |
623 | * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), |
624 | * first two are in single-threaded paths.And for the third one, we have set | |
625 | * quota_root to be null with qgroup_lock held before, so it is safe to clean | |
626 | * up the in-memory structures without qgroup_lock held. | |
bed92eae AJ |
627 | */ |
628 | void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) | |
629 | { | |
630 | struct rb_node *n; | |
631 | struct btrfs_qgroup *qgroup; | |
bed92eae AJ |
632 | |
633 | while ((n = rb_first(&fs_info->qgroup_tree))) { | |
634 | qgroup = rb_entry(n, struct btrfs_qgroup, node); | |
635 | rb_erase(n, &fs_info->qgroup_tree); | |
49e5fb46 | 636 | __del_qgroup_rb(fs_info, qgroup); |
0bb78830 FM |
637 | btrfs_sysfs_del_one_qgroup(fs_info, qgroup); |
638 | kfree(qgroup); | |
bed92eae | 639 | } |
1e7bac1e | 640 | /* |
52042d8e | 641 | * We call btrfs_free_qgroup_config() when unmounting |
01327610 | 642 | * filesystem and disabling quota, so we set qgroup_ulist |
1e7bac1e WS |
643 | * to be null here to avoid double free. |
644 | */ | |
1e8f9158 | 645 | ulist_free(fs_info->qgroup_ulist); |
1e7bac1e | 646 | fs_info->qgroup_ulist = NULL; |
49e5fb46 | 647 | btrfs_sysfs_del_qgroups(fs_info); |
bed92eae AJ |
648 | } |
649 | ||
711169c4 LF |
650 | static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, |
651 | u64 dst) | |
bed92eae AJ |
652 | { |
653 | int ret; | |
711169c4 | 654 | struct btrfs_root *quota_root = trans->fs_info->quota_root; |
bed92eae AJ |
655 | struct btrfs_path *path; |
656 | struct btrfs_key key; | |
657 | ||
658 | path = btrfs_alloc_path(); | |
659 | if (!path) | |
660 | return -ENOMEM; | |
661 | ||
662 | key.objectid = src; | |
663 | key.type = BTRFS_QGROUP_RELATION_KEY; | |
664 | key.offset = dst; | |
665 | ||
666 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); | |
667 | ||
50564b65 | 668 | btrfs_mark_buffer_dirty(trans, path->nodes[0]); |
bed92eae AJ |
669 | |
670 | btrfs_free_path(path); | |
671 | return ret; | |
672 | } | |
673 | ||
99d7f09a LF |
674 | static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, |
675 | u64 dst) | |
bed92eae AJ |
676 | { |
677 | int ret; | |
99d7f09a | 678 | struct btrfs_root *quota_root = trans->fs_info->quota_root; |
bed92eae AJ |
679 | struct btrfs_path *path; |
680 | struct btrfs_key key; | |
681 | ||
682 | path = btrfs_alloc_path(); | |
683 | if (!path) | |
684 | return -ENOMEM; | |
685 | ||
686 | key.objectid = src; | |
687 | key.type = BTRFS_QGROUP_RELATION_KEY; | |
688 | key.offset = dst; | |
689 | ||
690 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); | |
691 | if (ret < 0) | |
692 | goto out; | |
693 | ||
694 | if (ret > 0) { | |
695 | ret = -ENOENT; | |
696 | goto out; | |
697 | } | |
698 | ||
699 | ret = btrfs_del_item(trans, quota_root, path); | |
700 | out: | |
701 | btrfs_free_path(path); | |
702 | return ret; | |
703 | } | |
704 | ||
705 | static int add_qgroup_item(struct btrfs_trans_handle *trans, | |
706 | struct btrfs_root *quota_root, u64 qgroupid) | |
707 | { | |
708 | int ret; | |
709 | struct btrfs_path *path; | |
710 | struct btrfs_qgroup_info_item *qgroup_info; | |
711 | struct btrfs_qgroup_limit_item *qgroup_limit; | |
712 | struct extent_buffer *leaf; | |
713 | struct btrfs_key key; | |
714 | ||
f5ee5c9a | 715 | if (btrfs_is_testing(quota_root->fs_info)) |
faa2dbf0 | 716 | return 0; |
fccb84c9 | 717 | |
bed92eae AJ |
718 | path = btrfs_alloc_path(); |
719 | if (!path) | |
720 | return -ENOMEM; | |
721 | ||
722 | key.objectid = 0; | |
723 | key.type = BTRFS_QGROUP_INFO_KEY; | |
724 | key.offset = qgroupid; | |
725 | ||
0b4699dc MF |
726 | /* |
727 | * Avoid a transaction abort by catching -EEXIST here. In that | |
728 | * case, we proceed by re-initializing the existing structure | |
729 | * on disk. | |
730 | */ | |
731 | ||
bed92eae AJ |
732 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
733 | sizeof(*qgroup_info)); | |
0b4699dc | 734 | if (ret && ret != -EEXIST) |
bed92eae AJ |
735 | goto out; |
736 | ||
737 | leaf = path->nodes[0]; | |
738 | qgroup_info = btrfs_item_ptr(leaf, path->slots[0], | |
739 | struct btrfs_qgroup_info_item); | |
740 | btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); | |
741 | btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); | |
742 | btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); | |
743 | btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); | |
744 | btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); | |
745 | ||
50564b65 | 746 | btrfs_mark_buffer_dirty(trans, leaf); |
bed92eae AJ |
747 | |
748 | btrfs_release_path(path); | |
749 | ||
750 | key.type = BTRFS_QGROUP_LIMIT_KEY; | |
751 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, | |
752 | sizeof(*qgroup_limit)); | |
0b4699dc | 753 | if (ret && ret != -EEXIST) |
bed92eae AJ |
754 | goto out; |
755 | ||
756 | leaf = path->nodes[0]; | |
757 | qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], | |
758 | struct btrfs_qgroup_limit_item); | |
759 | btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); | |
760 | btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); | |
761 | btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); | |
762 | btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); | |
763 | btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); | |
764 | ||
50564b65 | 765 | btrfs_mark_buffer_dirty(trans, leaf); |
bed92eae AJ |
766 | |
767 | ret = 0; | |
768 | out: | |
769 | btrfs_free_path(path); | |
770 | return ret; | |
771 | } | |
772 | ||
69104618 | 773 | static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid) |
bed92eae AJ |
774 | { |
775 | int ret; | |
69104618 | 776 | struct btrfs_root *quota_root = trans->fs_info->quota_root; |
bed92eae AJ |
777 | struct btrfs_path *path; |
778 | struct btrfs_key key; | |
779 | ||
780 | path = btrfs_alloc_path(); | |
781 | if (!path) | |
782 | return -ENOMEM; | |
783 | ||
784 | key.objectid = 0; | |
785 | key.type = BTRFS_QGROUP_INFO_KEY; | |
786 | key.offset = qgroupid; | |
787 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); | |
788 | if (ret < 0) | |
789 | goto out; | |
790 | ||
791 | if (ret > 0) { | |
792 | ret = -ENOENT; | |
793 | goto out; | |
794 | } | |
795 | ||
796 | ret = btrfs_del_item(trans, quota_root, path); | |
797 | if (ret) | |
798 | goto out; | |
799 | ||
800 | btrfs_release_path(path); | |
801 | ||
802 | key.type = BTRFS_QGROUP_LIMIT_KEY; | |
803 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); | |
804 | if (ret < 0) | |
805 | goto out; | |
806 | ||
807 | if (ret > 0) { | |
808 | ret = -ENOENT; | |
809 | goto out; | |
810 | } | |
811 | ||
812 | ret = btrfs_del_item(trans, quota_root, path); | |
813 | ||
814 | out: | |
815 | btrfs_free_path(path); | |
816 | return ret; | |
817 | } | |
818 | ||
819 | static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, | |
1510e71c | 820 | struct btrfs_qgroup *qgroup) |
bed92eae | 821 | { |
ac8a866a | 822 | struct btrfs_root *quota_root = trans->fs_info->quota_root; |
bed92eae AJ |
823 | struct btrfs_path *path; |
824 | struct btrfs_key key; | |
825 | struct extent_buffer *l; | |
826 | struct btrfs_qgroup_limit_item *qgroup_limit; | |
827 | int ret; | |
828 | int slot; | |
829 | ||
830 | key.objectid = 0; | |
831 | key.type = BTRFS_QGROUP_LIMIT_KEY; | |
1510e71c | 832 | key.offset = qgroup->qgroupid; |
bed92eae AJ |
833 | |
834 | path = btrfs_alloc_path(); | |
84cbe2f7 WS |
835 | if (!path) |
836 | return -ENOMEM; | |
837 | ||
ac8a866a | 838 | ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); |
bed92eae AJ |
839 | if (ret > 0) |
840 | ret = -ENOENT; | |
841 | ||
842 | if (ret) | |
843 | goto out; | |
844 | ||
845 | l = path->nodes[0]; | |
846 | slot = path->slots[0]; | |
a3df41ee | 847 | qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); |
1510e71c DY |
848 | btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); |
849 | btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); | |
850 | btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); | |
851 | btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); | |
852 | btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); | |
bed92eae | 853 | |
50564b65 | 854 | btrfs_mark_buffer_dirty(trans, l); |
bed92eae AJ |
855 | |
856 | out: | |
857 | btrfs_free_path(path); | |
858 | return ret; | |
859 | } | |
860 | ||
861 | static int update_qgroup_info_item(struct btrfs_trans_handle *trans, | |
bed92eae AJ |
862 | struct btrfs_qgroup *qgroup) |
863 | { | |
3e07e9a0 LF |
864 | struct btrfs_fs_info *fs_info = trans->fs_info; |
865 | struct btrfs_root *quota_root = fs_info->quota_root; | |
bed92eae AJ |
866 | struct btrfs_path *path; |
867 | struct btrfs_key key; | |
868 | struct extent_buffer *l; | |
869 | struct btrfs_qgroup_info_item *qgroup_info; | |
870 | int ret; | |
871 | int slot; | |
872 | ||
3e07e9a0 | 873 | if (btrfs_is_testing(fs_info)) |
faa2dbf0 | 874 | return 0; |
fccb84c9 | 875 | |
bed92eae AJ |
876 | key.objectid = 0; |
877 | key.type = BTRFS_QGROUP_INFO_KEY; | |
878 | key.offset = qgroup->qgroupid; | |
879 | ||
880 | path = btrfs_alloc_path(); | |
84cbe2f7 WS |
881 | if (!path) |
882 | return -ENOMEM; | |
883 | ||
3e07e9a0 | 884 | ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); |
bed92eae AJ |
885 | if (ret > 0) |
886 | ret = -ENOENT; | |
887 | ||
888 | if (ret) | |
889 | goto out; | |
890 | ||
891 | l = path->nodes[0]; | |
892 | slot = path->slots[0]; | |
a3df41ee | 893 | qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); |
bed92eae AJ |
894 | btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); |
895 | btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); | |
896 | btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); | |
897 | btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); | |
898 | btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); | |
899 | ||
50564b65 | 900 | btrfs_mark_buffer_dirty(trans, l); |
bed92eae AJ |
901 | |
902 | out: | |
903 | btrfs_free_path(path); | |
904 | return ret; | |
905 | } | |
906 | ||
2e980acd | 907 | static int update_qgroup_status_item(struct btrfs_trans_handle *trans) |
bed92eae | 908 | { |
2e980acd LF |
909 | struct btrfs_fs_info *fs_info = trans->fs_info; |
910 | struct btrfs_root *quota_root = fs_info->quota_root; | |
bed92eae AJ |
911 | struct btrfs_path *path; |
912 | struct btrfs_key key; | |
913 | struct extent_buffer *l; | |
914 | struct btrfs_qgroup_status_item *ptr; | |
915 | int ret; | |
916 | int slot; | |
917 | ||
918 | key.objectid = 0; | |
919 | key.type = BTRFS_QGROUP_STATUS_KEY; | |
920 | key.offset = 0; | |
921 | ||
922 | path = btrfs_alloc_path(); | |
84cbe2f7 WS |
923 | if (!path) |
924 | return -ENOMEM; | |
925 | ||
2e980acd | 926 | ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1); |
bed92eae AJ |
927 | if (ret > 0) |
928 | ret = -ENOENT; | |
929 | ||
930 | if (ret) | |
931 | goto out; | |
932 | ||
933 | l = path->nodes[0]; | |
934 | slot = path->slots[0]; | |
935 | ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); | |
e71564c0 QW |
936 | btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags & |
937 | BTRFS_QGROUP_STATUS_FLAGS_MASK); | |
bed92eae | 938 | btrfs_set_qgroup_status_generation(l, ptr, trans->transid); |
2f232036 JS |
939 | btrfs_set_qgroup_status_rescan(l, ptr, |
940 | fs_info->qgroup_rescan_progress.objectid); | |
bed92eae | 941 | |
50564b65 | 942 | btrfs_mark_buffer_dirty(trans, l); |
bed92eae AJ |
943 | |
944 | out: | |
945 | btrfs_free_path(path); | |
946 | return ret; | |
947 | } | |
948 | ||
949 | /* | |
950 | * called with qgroup_lock held | |
951 | */ | |
952 | static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, | |
953 | struct btrfs_root *root) | |
954 | { | |
955 | struct btrfs_path *path; | |
956 | struct btrfs_key key; | |
06b3a860 | 957 | struct extent_buffer *leaf = NULL; |
bed92eae | 958 | int ret; |
06b3a860 | 959 | int nr = 0; |
bed92eae | 960 | |
bed92eae AJ |
961 | path = btrfs_alloc_path(); |
962 | if (!path) | |
963 | return -ENOMEM; | |
964 | ||
06b3a860 WS |
965 | key.objectid = 0; |
966 | key.offset = 0; | |
967 | key.type = 0; | |
bed92eae | 968 | |
06b3a860 | 969 | while (1) { |
bed92eae | 970 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
06b3a860 WS |
971 | if (ret < 0) |
972 | goto out; | |
973 | leaf = path->nodes[0]; | |
974 | nr = btrfs_header_nritems(leaf); | |
975 | if (!nr) | |
bed92eae | 976 | break; |
06b3a860 WS |
977 | /* |
978 | * delete the leaf one by one | |
979 | * since the whole tree is going | |
980 | * to be deleted. | |
981 | */ | |
982 | path->slots[0] = 0; | |
983 | ret = btrfs_del_items(trans, root, path, 0, nr); | |
bed92eae AJ |
984 | if (ret) |
985 | goto out; | |
06b3a860 | 986 | |
bed92eae AJ |
987 | btrfs_release_path(path); |
988 | } | |
989 | ret = 0; | |
990 | out: | |
bed92eae AJ |
991 | btrfs_free_path(path); |
992 | return ret; | |
993 | } | |
994 | ||
182940f4 BB |
995 | int btrfs_quota_enable(struct btrfs_fs_info *fs_info, |
996 | struct btrfs_ioctl_quota_ctl_args *quota_ctl_args) | |
bed92eae AJ |
997 | { |
998 | struct btrfs_root *quota_root; | |
7708f029 | 999 | struct btrfs_root *tree_root = fs_info->tree_root; |
bed92eae AJ |
1000 | struct btrfs_path *path = NULL; |
1001 | struct btrfs_qgroup_status_item *ptr; | |
1002 | struct extent_buffer *leaf; | |
1003 | struct btrfs_key key; | |
7708f029 WS |
1004 | struct btrfs_key found_key; |
1005 | struct btrfs_qgroup *qgroup = NULL; | |
8d54518b | 1006 | struct btrfs_qgroup *prealloc = NULL; |
340f1aa2 | 1007 | struct btrfs_trans_handle *trans = NULL; |
a855fbe6 | 1008 | struct ulist *ulist = NULL; |
182940f4 | 1009 | const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA); |
bed92eae | 1010 | int ret = 0; |
7708f029 | 1011 | int slot; |
bed92eae | 1012 | |
232796df FM |
1013 | /* |
1014 | * We need to have subvol_sem write locked, to prevent races between | |
1015 | * concurrent tasks trying to enable quotas, because we will unlock | |
1016 | * and relock qgroup_ioctl_lock before setting fs_info->quota_root | |
1017 | * and before setting BTRFS_FS_QUOTA_ENABLED. | |
1018 | */ | |
1019 | lockdep_assert_held_write(&fs_info->subvol_sem); | |
1020 | ||
ef3eccc1 JB |
1021 | if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { |
1022 | btrfs_err(fs_info, | |
1023 | "qgroups are currently unsupported in extent tree v2"); | |
1024 | return -EINVAL; | |
1025 | } | |
1026 | ||
f2f6ed3d | 1027 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
5d23515b | 1028 | if (fs_info->quota_root) |
bed92eae | 1029 | goto out; |
bed92eae | 1030 | |
a855fbe6 FM |
1031 | ulist = ulist_alloc(GFP_KERNEL); |
1032 | if (!ulist) { | |
7503b83d DS |
1033 | ret = -ENOMEM; |
1034 | goto out; | |
1035 | } | |
1036 | ||
49e5fb46 QW |
1037 | ret = btrfs_sysfs_add_qgroups(fs_info); |
1038 | if (ret < 0) | |
1039 | goto out; | |
a855fbe6 FM |
1040 | |
1041 | /* | |
1042 | * Unlock qgroup_ioctl_lock before starting the transaction. This is to | |
1043 | * avoid lock acquisition inversion problems (reported by lockdep) between | |
1044 | * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we | |
1045 | * start a transaction. | |
1046 | * After we started the transaction lock qgroup_ioctl_lock again and | |
1047 | * check if someone else created the quota root in the meanwhile. If so, | |
1048 | * just return success and release the transaction handle. | |
1049 | * | |
1050 | * Also we don't need to worry about someone else calling | |
1051 | * btrfs_sysfs_add_qgroups() after we unlock and getting an error because | |
1052 | * that function returns 0 (success) when the sysfs entries already exist. | |
1053 | */ | |
1054 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
1055 | ||
340f1aa2 NB |
1056 | /* |
1057 | * 1 for quota root item | |
1058 | * 1 for BTRFS_QGROUP_STATUS item | |
1059 | * | |
1060 | * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items | |
1061 | * per subvolume. However those are not currently reserved since it | |
1062 | * would be a lot of overkill. | |
1063 | */ | |
1064 | trans = btrfs_start_transaction(tree_root, 2); | |
a855fbe6 FM |
1065 | |
1066 | mutex_lock(&fs_info->qgroup_ioctl_lock); | |
340f1aa2 NB |
1067 | if (IS_ERR(trans)) { |
1068 | ret = PTR_ERR(trans); | |
1069 | trans = NULL; | |
1070 | goto out; | |
1071 | } | |
1072 | ||
a855fbe6 FM |
1073 | if (fs_info->quota_root) |
1074 | goto out; | |
1075 | ||
1076 | fs_info->qgroup_ulist = ulist; | |
1077 | ulist = NULL; | |
1078 | ||
bed92eae AJ |
1079 | /* |
1080 | * initially create the quota tree | |
1081 | */ | |
9b7a2440 | 1082 | quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID); |
bed92eae AJ |
1083 | if (IS_ERR(quota_root)) { |
1084 | ret = PTR_ERR(quota_root); | |
340f1aa2 | 1085 | btrfs_abort_transaction(trans, ret); |
bed92eae AJ |
1086 | goto out; |
1087 | } | |
1088 | ||
1089 | path = btrfs_alloc_path(); | |
5b7ff5b3 TI |
1090 | if (!path) { |
1091 | ret = -ENOMEM; | |
340f1aa2 | 1092 | btrfs_abort_transaction(trans, ret); |
5b7ff5b3 TI |
1093 | goto out_free_root; |
1094 | } | |
bed92eae AJ |
1095 | |
1096 | key.objectid = 0; | |
1097 | key.type = BTRFS_QGROUP_STATUS_KEY; | |
1098 | key.offset = 0; | |
1099 | ||
1100 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, | |
1101 | sizeof(*ptr)); | |
340f1aa2 NB |
1102 | if (ret) { |
1103 | btrfs_abort_transaction(trans, ret); | |
5b7ff5b3 | 1104 | goto out_free_path; |
340f1aa2 | 1105 | } |
bed92eae AJ |
1106 | |
1107 | leaf = path->nodes[0]; | |
1108 | ptr = btrfs_item_ptr(leaf, path->slots[0], | |
1109 | struct btrfs_qgroup_status_item); | |
1110 | btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); | |
1111 | btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); | |
182940f4 | 1112 | fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON; |
bd7c1ea3 | 1113 | if (simple) { |
182940f4 | 1114 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; |
bd7c1ea3 BB |
1115 | btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid); |
1116 | } else { | |
182940f4 | 1117 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
bd7c1ea3 | 1118 | } |
e71564c0 QW |
1119 | btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags & |
1120 | BTRFS_QGROUP_STATUS_FLAGS_MASK); | |
2f232036 | 1121 | btrfs_set_qgroup_status_rescan(leaf, ptr, 0); |
bed92eae | 1122 | |
50564b65 | 1123 | btrfs_mark_buffer_dirty(trans, leaf); |
bed92eae | 1124 | |
7708f029 WS |
1125 | key.objectid = 0; |
1126 | key.type = BTRFS_ROOT_REF_KEY; | |
1127 | key.offset = 0; | |
1128 | ||
1129 | btrfs_release_path(path); | |
1130 | ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); | |
1131 | if (ret > 0) | |
1132 | goto out_add_root; | |
340f1aa2 NB |
1133 | if (ret < 0) { |
1134 | btrfs_abort_transaction(trans, ret); | |
7708f029 | 1135 | goto out_free_path; |
340f1aa2 | 1136 | } |
7708f029 WS |
1137 | |
1138 | while (1) { | |
1139 | slot = path->slots[0]; | |
1140 | leaf = path->nodes[0]; | |
1141 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
1142 | ||
1143 | if (found_key.type == BTRFS_ROOT_REF_KEY) { | |
5223cc60 JB |
1144 | |
1145 | /* Release locks on tree_root before we access quota_root */ | |
1146 | btrfs_release_path(path); | |
1147 | ||
8d54518b QW |
1148 | /* We should not have a stray @prealloc pointer. */ |
1149 | ASSERT(prealloc == NULL); | |
1150 | prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); | |
1151 | if (!prealloc) { | |
1152 | ret = -ENOMEM; | |
1153 | btrfs_abort_transaction(trans, ret); | |
1154 | goto out_free_path; | |
1155 | } | |
1156 | ||
7708f029 WS |
1157 | ret = add_qgroup_item(trans, quota_root, |
1158 | found_key.offset); | |
340f1aa2 NB |
1159 | if (ret) { |
1160 | btrfs_abort_transaction(trans, ret); | |
7708f029 | 1161 | goto out_free_path; |
340f1aa2 | 1162 | } |
7708f029 | 1163 | |
8d54518b QW |
1164 | qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset); |
1165 | prealloc = NULL; | |
7708f029 | 1166 | if (IS_ERR(qgroup)) { |
7708f029 | 1167 | ret = PTR_ERR(qgroup); |
340f1aa2 | 1168 | btrfs_abort_transaction(trans, ret); |
7708f029 WS |
1169 | goto out_free_path; |
1170 | } | |
49e5fb46 QW |
1171 | ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); |
1172 | if (ret < 0) { | |
1173 | btrfs_abort_transaction(trans, ret); | |
1174 | goto out_free_path; | |
1175 | } | |
5223cc60 JB |
1176 | ret = btrfs_search_slot_for_read(tree_root, &found_key, |
1177 | path, 1, 0); | |
1178 | if (ret < 0) { | |
1179 | btrfs_abort_transaction(trans, ret); | |
1180 | goto out_free_path; | |
1181 | } | |
1182 | if (ret > 0) { | |
1183 | /* | |
1184 | * Shouldn't happen, but in case it does we | |
1185 | * don't need to do the btrfs_next_item, just | |
1186 | * continue. | |
1187 | */ | |
1188 | continue; | |
1189 | } | |
7708f029 WS |
1190 | } |
1191 | ret = btrfs_next_item(tree_root, path); | |
340f1aa2 NB |
1192 | if (ret < 0) { |
1193 | btrfs_abort_transaction(trans, ret); | |
7708f029 | 1194 | goto out_free_path; |
340f1aa2 | 1195 | } |
7708f029 WS |
1196 | if (ret) |
1197 | break; | |
1198 | } | |
1199 | ||
1200 | out_add_root: | |
1201 | btrfs_release_path(path); | |
1202 | ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); | |
340f1aa2 NB |
1203 | if (ret) { |
1204 | btrfs_abort_transaction(trans, ret); | |
7708f029 | 1205 | goto out_free_path; |
340f1aa2 | 1206 | } |
7708f029 | 1207 | |
8d54518b QW |
1208 | ASSERT(prealloc == NULL); |
1209 | prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); | |
1210 | if (!prealloc) { | |
1211 | ret = -ENOMEM; | |
7708f029 WS |
1212 | goto out_free_path; |
1213 | } | |
8d54518b QW |
1214 | qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID); |
1215 | prealloc = NULL; | |
49e5fb46 QW |
1216 | ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); |
1217 | if (ret < 0) { | |
1218 | btrfs_abort_transaction(trans, ret); | |
1219 | goto out_free_path; | |
1220 | } | |
340f1aa2 | 1221 | |
bd7c1ea3 BB |
1222 | fs_info->qgroup_enable_gen = trans->transid; |
1223 | ||
232796df FM |
1224 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
1225 | /* | |
1226 | * Commit the transaction while not holding qgroup_ioctl_lock, to avoid | |
1227 | * a deadlock with tasks concurrently doing other qgroup operations, such | |
1228 | * adding/removing qgroups or adding/deleting qgroup relations for example, | |
1229 | * because all qgroup operations first start or join a transaction and then | |
1230 | * lock the qgroup_ioctl_lock mutex. | |
1231 | * We are safe from a concurrent task trying to enable quotas, by calling | |
1232 | * this function, since we are serialized by fs_info->subvol_sem. | |
1233 | */ | |
340f1aa2 | 1234 | ret = btrfs_commit_transaction(trans); |
b9b8a41a | 1235 | trans = NULL; |
232796df | 1236 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
b9b8a41a | 1237 | if (ret) |
340f1aa2 | 1238 | goto out_free_path; |
340f1aa2 | 1239 | |
9a6f209e FM |
1240 | /* |
1241 | * Set quota enabled flag after committing the transaction, to avoid | |
1242 | * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot | |
1243 | * creation. | |
1244 | */ | |
1245 | spin_lock(&fs_info->qgroup_lock); | |
1246 | fs_info->quota_root = quota_root; | |
1247 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | |
182940f4 BB |
1248 | if (simple) |
1249 | btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA); | |
9a6f209e FM |
1250 | spin_unlock(&fs_info->qgroup_lock); |
1251 | ||
182940f4 BB |
1252 | /* Skip rescan for simple qgroups. */ |
1253 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) | |
1254 | goto out_free_path; | |
1255 | ||
5d23515b NB |
1256 | ret = qgroup_rescan_init(fs_info, 0, 1); |
1257 | if (!ret) { | |
1258 | qgroup_rescan_zero_tracking(fs_info); | |
d61acbbf | 1259 | fs_info->qgroup_rescan_running = true; |
5d23515b NB |
1260 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
1261 | &fs_info->qgroup_rescan_work); | |
331cd946 FM |
1262 | } else { |
1263 | /* | |
1264 | * We have set both BTRFS_FS_QUOTA_ENABLED and | |
1265 | * BTRFS_QGROUP_STATUS_FLAG_ON, so we can only fail with | |
1266 | * -EINPROGRESS. That can happen because someone started the | |
1267 | * rescan worker by calling quota rescan ioctl before we | |
1268 | * attempted to initialize the rescan worker. Failure due to | |
1269 | * quotas disabled in the meanwhile is not possible, because | |
1270 | * we are holding a write lock on fs_info->subvol_sem, which | |
1271 | * is also acquired when disabling quotas. | |
1272 | * Ignore such error, and any other error would need to undo | |
1273 | * everything we did in the transaction we just committed. | |
1274 | */ | |
1275 | ASSERT(ret == -EINPROGRESS); | |
1276 | ret = 0; | |
5d23515b NB |
1277 | } |
1278 | ||
5b7ff5b3 | 1279 | out_free_path: |
bed92eae | 1280 | btrfs_free_path(path); |
5b7ff5b3 | 1281 | out_free_root: |
8c38938c | 1282 | if (ret) |
00246528 | 1283 | btrfs_put_root(quota_root); |
5b7ff5b3 | 1284 | out: |
eb1716af | 1285 | if (ret) { |
1e8f9158 | 1286 | ulist_free(fs_info->qgroup_ulist); |
eb1716af | 1287 | fs_info->qgroup_ulist = NULL; |
49e5fb46 | 1288 | btrfs_sysfs_del_qgroups(fs_info); |
eb1716af | 1289 | } |
f2f6ed3d | 1290 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
a855fbe6 FM |
1291 | if (ret && trans) |
1292 | btrfs_end_transaction(trans); | |
1293 | else if (trans) | |
1294 | ret = btrfs_end_transaction(trans); | |
1295 | ulist_free(ulist); | |
8d54518b | 1296 | kfree(prealloc); |
bed92eae AJ |
1297 | return ret; |
1298 | } | |
1299 | ||
af0e2aab BB |
1300 | /* |
1301 | * It is possible to have outstanding ordered extents which reserved bytes | |
1302 | * before we disabled. We need to fully flush delalloc, ordered extents, and a | |
1303 | * commit to ensure that we don't leak such reservations, only to have them | |
1304 | * come back if we re-enable. | |
1305 | * | |
1306 | * - enable simple quotas | |
1307 | * - reserve space | |
1308 | * - release it, store rsv_bytes in OE | |
1309 | * - disable quotas | |
1310 | * - enable simple quotas (qgroup rsv are all 0) | |
1311 | * - OE finishes | |
1312 | * - run delayed refs | |
1313 | * - free rsv_bytes, resulting in miscounting or even underflow | |
1314 | */ | |
1315 | static int flush_reservations(struct btrfs_fs_info *fs_info) | |
1316 | { | |
1317 | struct btrfs_trans_handle *trans; | |
1318 | int ret; | |
1319 | ||
1320 | ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); | |
1321 | if (ret) | |
1322 | return ret; | |
1323 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); | |
1324 | trans = btrfs_join_transaction(fs_info->tree_root); | |
1325 | if (IS_ERR(trans)) | |
1326 | return PTR_ERR(trans); | |
1327 | btrfs_commit_transaction(trans); | |
1328 | ||
1329 | return ret; | |
1330 | } | |
1331 | ||
340f1aa2 | 1332 | int btrfs_quota_disable(struct btrfs_fs_info *fs_info) |
bed92eae | 1333 | { |
bed92eae | 1334 | struct btrfs_root *quota_root; |
340f1aa2 | 1335 | struct btrfs_trans_handle *trans = NULL; |
bed92eae AJ |
1336 | int ret = 0; |
1337 | ||
e804861b | 1338 | /* |
8a4a0b2a FM |
1339 | * We need to have subvol_sem write locked to prevent races with |
1340 | * snapshot creation. | |
e804861b SK |
1341 | */ |
1342 | lockdep_assert_held_write(&fs_info->subvol_sem); | |
1343 | ||
8a4a0b2a FM |
1344 | /* |
1345 | * Lock the cleaner mutex to prevent races with concurrent relocation, | |
1346 | * because relocation may be building backrefs for blocks of the quota | |
1347 | * root while we are deleting the root. This is like dropping fs roots | |
1348 | * of deleted snapshots/subvolumes, we need the same protection. | |
1349 | * | |
1350 | * This also prevents races between concurrent tasks trying to disable | |
1351 | * quotas, because we will unlock and relock qgroup_ioctl_lock across | |
1352 | * BTRFS_FS_QUOTA_ENABLED changes. | |
1353 | */ | |
1354 | mutex_lock(&fs_info->cleaner_mutex); | |
1355 | ||
f2f6ed3d | 1356 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
58400fce | 1357 | if (!fs_info->quota_root) |
f2f6ed3d | 1358 | goto out; |
e804861b | 1359 | |
d4aef1e1 SY |
1360 | /* |
1361 | * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to | |
1362 | * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs | |
1363 | * to lock that mutex while holding a transaction handle and the rescan | |
1364 | * worker needs to commit a transaction. | |
1365 | */ | |
1366 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
1367 | ||
e804861b SK |
1368 | /* |
1369 | * Request qgroup rescan worker to complete and wait for it. This wait | |
1370 | * must be done before transaction start for quota disable since it may | |
1371 | * deadlock with transaction by the qgroup rescan worker. | |
1372 | */ | |
1373 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | |
1374 | btrfs_qgroup_wait_for_completion(fs_info, false); | |
340f1aa2 | 1375 | |
af0e2aab BB |
1376 | ret = flush_reservations(fs_info); |
1377 | if (ret) | |
1378 | goto out_unlock_cleaner; | |
1379 | ||
340f1aa2 NB |
1380 | /* |
1381 | * 1 For the root item | |
1382 | * | |
1383 | * We should also reserve enough items for the quota tree deletion in | |
1384 | * btrfs_clean_quota_tree but this is not done. | |
a855fbe6 FM |
1385 | * |
1386 | * Also, we must always start a transaction without holding the mutex | |
1387 | * qgroup_ioctl_lock, see btrfs_quota_enable(). | |
340f1aa2 NB |
1388 | */ |
1389 | trans = btrfs_start_transaction(fs_info->tree_root, 1); | |
a855fbe6 FM |
1390 | |
1391 | mutex_lock(&fs_info->qgroup_ioctl_lock); | |
340f1aa2 NB |
1392 | if (IS_ERR(trans)) { |
1393 | ret = PTR_ERR(trans); | |
a855fbe6 | 1394 | trans = NULL; |
e804861b | 1395 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
340f1aa2 NB |
1396 | goto out; |
1397 | } | |
1398 | ||
a855fbe6 FM |
1399 | if (!fs_info->quota_root) |
1400 | goto out; | |
1401 | ||
967ef513 | 1402 | spin_lock(&fs_info->qgroup_lock); |
bed92eae AJ |
1403 | quota_root = fs_info->quota_root; |
1404 | fs_info->quota_root = NULL; | |
8ea0ec9e | 1405 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; |
182940f4 | 1406 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE; |
011b46c3 | 1407 | fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL; |
bed92eae AJ |
1408 | spin_unlock(&fs_info->qgroup_lock); |
1409 | ||
e685da14 WS |
1410 | btrfs_free_qgroup_config(fs_info); |
1411 | ||
bed92eae | 1412 | ret = btrfs_clean_quota_tree(trans, quota_root); |
340f1aa2 NB |
1413 | if (ret) { |
1414 | btrfs_abort_transaction(trans, ret); | |
a855fbe6 | 1415 | goto out; |
340f1aa2 | 1416 | } |
bed92eae | 1417 | |
ab9ce7d4 | 1418 | ret = btrfs_del_root(trans, "a_root->root_key); |
340f1aa2 NB |
1419 | if (ret) { |
1420 | btrfs_abort_transaction(trans, ret); | |
a855fbe6 | 1421 | goto out; |
340f1aa2 | 1422 | } |
bed92eae | 1423 | |
b31cb5a6 | 1424 | spin_lock(&fs_info->trans_lock); |
bed92eae | 1425 | list_del("a_root->dirty_list); |
b31cb5a6 | 1426 | spin_unlock(&fs_info->trans_lock); |
bed92eae AJ |
1427 | |
1428 | btrfs_tree_lock(quota_root->node); | |
190a8339 | 1429 | btrfs_clear_buffer_dirty(trans, quota_root->node); |
bed92eae | 1430 | btrfs_tree_unlock(quota_root->node); |
7a163608 FM |
1431 | btrfs_free_tree_block(trans, btrfs_root_id(quota_root), |
1432 | quota_root->node, 0, 1); | |
bed92eae | 1433 | |
00246528 | 1434 | btrfs_put_root(quota_root); |
340f1aa2 | 1435 | |
bed92eae | 1436 | out: |
f2f6ed3d | 1437 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
a855fbe6 FM |
1438 | if (ret && trans) |
1439 | btrfs_end_transaction(trans); | |
1440 | else if (trans) | |
af0e2aab BB |
1441 | ret = btrfs_commit_transaction(trans); |
1442 | out_unlock_cleaner: | |
8a4a0b2a | 1443 | mutex_unlock(&fs_info->cleaner_mutex); |
a855fbe6 | 1444 | |
bed92eae AJ |
1445 | return ret; |
1446 | } | |
1447 | ||
2f232036 JS |
1448 | static void qgroup_dirty(struct btrfs_fs_info *fs_info, |
1449 | struct btrfs_qgroup *qgroup) | |
bed92eae | 1450 | { |
2f232036 JS |
1451 | if (list_empty(&qgroup->dirty)) |
1452 | list_add(&qgroup->dirty, &fs_info->dirty_qgroups); | |
bed92eae AJ |
1453 | } |
1454 | ||
686c4a5a QW |
1455 | static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup) |
1456 | { | |
1457 | if (!list_empty(&qgroup->iterator)) | |
1458 | return; | |
1459 | ||
1460 | list_add_tail(&qgroup->iterator, head); | |
1461 | } | |
1462 | ||
1463 | static void qgroup_iterator_clean(struct list_head *head) | |
1464 | { | |
1465 | while (!list_empty(head)) { | |
1466 | struct btrfs_qgroup *qgroup; | |
1467 | ||
1468 | qgroup = list_first_entry(head, struct btrfs_qgroup, iterator); | |
1469 | list_del_init(&qgroup->iterator); | |
1470 | } | |
1471 | } | |
1472 | ||
9c8b35b1 | 1473 | /* |
429d6275 QW |
1474 | * The easy accounting, we're updating qgroup relationship whose child qgroup |
1475 | * only has exclusive extents. | |
1476 | * | |
52042d8e | 1477 | * In this case, all exclusive extents will also be exclusive for parent, so |
429d6275 QW |
1478 | * excl/rfer just get added/removed. |
1479 | * | |
1480 | * So is qgroup reservation space, which should also be added/removed to | |
1481 | * parent. | |
1482 | * Or when child tries to release reservation space, parent will underflow its | |
1483 | * reservation (for relationship adding case). | |
9c8b35b1 QW |
1484 | * |
1485 | * Caller should hold fs_info->qgroup_lock. | |
1486 | */ | |
a0bdc04b | 1487 | static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root, |
429d6275 | 1488 | struct btrfs_qgroup *src, int sign) |
9c8b35b1 QW |
1489 | { |
1490 | struct btrfs_qgroup *qgroup; | |
a0bdc04b QW |
1491 | struct btrfs_qgroup *cur; |
1492 | LIST_HEAD(qgroup_list); | |
429d6275 | 1493 | u64 num_bytes = src->excl; |
9c8b35b1 QW |
1494 | int ret = 0; |
1495 | ||
1496 | qgroup = find_qgroup_rb(fs_info, ref_root); | |
1497 | if (!qgroup) | |
1498 | goto out; | |
1499 | ||
a0bdc04b QW |
1500 | qgroup_iterator_add(&qgroup_list, qgroup); |
1501 | list_for_each_entry(cur, &qgroup_list, iterator) { | |
1502 | struct btrfs_qgroup_list *glist; | |
9c8b35b1 | 1503 | |
9c8b35b1 QW |
1504 | qgroup->rfer += sign * num_bytes; |
1505 | qgroup->rfer_cmpr += sign * num_bytes; | |
a0bdc04b | 1506 | |
9c8b35b1 QW |
1507 | WARN_ON(sign < 0 && qgroup->excl < num_bytes); |
1508 | qgroup->excl += sign * num_bytes; | |
a0bdc04b QW |
1509 | qgroup->excl_cmpr += sign * num_bytes; |
1510 | ||
429d6275 | 1511 | if (sign > 0) |
64ee4e75 | 1512 | qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); |
429d6275 | 1513 | else |
64ee4e75 | 1514 | qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); |
9c8b35b1 QW |
1515 | qgroup_dirty(fs_info, qgroup); |
1516 | ||
a0bdc04b QW |
1517 | /* Append parent qgroups to @qgroup_list. */ |
1518 | list_for_each_entry(glist, &qgroup->groups, next_group) | |
1519 | qgroup_iterator_add(&qgroup_list, glist->group); | |
9c8b35b1 QW |
1520 | } |
1521 | ret = 0; | |
1522 | out: | |
a0bdc04b | 1523 | qgroup_iterator_clean(&qgroup_list); |
9c8b35b1 QW |
1524 | return ret; |
1525 | } | |
1526 | ||
1527 | ||
1528 | /* | |
1529 | * Quick path for updating qgroup with only excl refs. | |
1530 | * | |
1531 | * In that case, just update all parent will be enough. | |
1532 | * Or we needs to do a full rescan. | |
1533 | * Caller should also hold fs_info->qgroup_lock. | |
1534 | * | |
1535 | * Return 0 for quick update, return >0 for need to full rescan | |
1536 | * and mark INCONSISTENT flag. | |
1537 | * Return < 0 for other error. | |
1538 | */ | |
1539 | static int quick_update_accounting(struct btrfs_fs_info *fs_info, | |
a0bdc04b | 1540 | u64 src, u64 dst, int sign) |
9c8b35b1 QW |
1541 | { |
1542 | struct btrfs_qgroup *qgroup; | |
1543 | int ret = 1; | |
1544 | int err = 0; | |
1545 | ||
1546 | qgroup = find_qgroup_rb(fs_info, src); | |
1547 | if (!qgroup) | |
1548 | goto out; | |
1549 | if (qgroup->excl == qgroup->rfer) { | |
1550 | ret = 0; | |
a0bdc04b | 1551 | err = __qgroup_excl_accounting(fs_info, dst, qgroup, sign); |
9c8b35b1 QW |
1552 | if (err < 0) { |
1553 | ret = err; | |
1554 | goto out; | |
1555 | } | |
1556 | } | |
1557 | out: | |
1558 | if (ret) | |
1559 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; | |
1560 | return ret; | |
1561 | } | |
1562 | ||
5343cd93 | 1563 | int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst) |
bed92eae | 1564 | { |
9f8a6ce6 | 1565 | struct btrfs_fs_info *fs_info = trans->fs_info; |
b7fef4f5 WS |
1566 | struct btrfs_qgroup *parent; |
1567 | struct btrfs_qgroup *member; | |
534e6623 | 1568 | struct btrfs_qgroup_list *list; |
79ace7b8 | 1569 | struct btrfs_qgroup_list *prealloc = NULL; |
bed92eae AJ |
1570 | int ret = 0; |
1571 | ||
8465ecec QW |
1572 | /* Check the level of src and dst first */ |
1573 | if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) | |
1574 | return -EINVAL; | |
1575 | ||
f2f6ed3d | 1576 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
e3b0edd2 | 1577 | if (!fs_info->quota_root) { |
8a36e408 | 1578 | ret = -ENOTCONN; |
f2f6ed3d WS |
1579 | goto out; |
1580 | } | |
b7fef4f5 WS |
1581 | member = find_qgroup_rb(fs_info, src); |
1582 | parent = find_qgroup_rb(fs_info, dst); | |
1583 | if (!member || !parent) { | |
1584 | ret = -EINVAL; | |
1585 | goto out; | |
1586 | } | |
bed92eae | 1587 | |
534e6623 WS |
1588 | /* check if such qgroup relation exist firstly */ |
1589 | list_for_each_entry(list, &member->groups, next_group) { | |
1590 | if (list->group == parent) { | |
1591 | ret = -EEXIST; | |
1592 | goto out; | |
1593 | } | |
1594 | } | |
1595 | ||
79ace7b8 QW |
1596 | prealloc = kzalloc(sizeof(*list), GFP_NOFS); |
1597 | if (!prealloc) { | |
1598 | ret = -ENOMEM; | |
1599 | goto out; | |
1600 | } | |
711169c4 | 1601 | ret = add_qgroup_relation_item(trans, src, dst); |
bed92eae | 1602 | if (ret) |
f2f6ed3d | 1603 | goto out; |
bed92eae | 1604 | |
711169c4 | 1605 | ret = add_qgroup_relation_item(trans, dst, src); |
bed92eae | 1606 | if (ret) { |
99d7f09a | 1607 | del_qgroup_relation_item(trans, src, dst); |
f2f6ed3d | 1608 | goto out; |
bed92eae AJ |
1609 | } |
1610 | ||
1611 | spin_lock(&fs_info->qgroup_lock); | |
79ace7b8 QW |
1612 | ret = __add_relation_rb(prealloc, member, parent); |
1613 | prealloc = NULL; | |
9c8b35b1 QW |
1614 | if (ret < 0) { |
1615 | spin_unlock(&fs_info->qgroup_lock); | |
1616 | goto out; | |
1617 | } | |
a0bdc04b | 1618 | ret = quick_update_accounting(fs_info, src, dst, 1); |
bed92eae | 1619 | spin_unlock(&fs_info->qgroup_lock); |
f2f6ed3d | 1620 | out: |
79ace7b8 | 1621 | kfree(prealloc); |
f2f6ed3d | 1622 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
bed92eae AJ |
1623 | return ret; |
1624 | } | |
1625 | ||
6b36f1aa LF |
1626 | static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, |
1627 | u64 dst) | |
bed92eae | 1628 | { |
6b36f1aa | 1629 | struct btrfs_fs_info *fs_info = trans->fs_info; |
534e6623 WS |
1630 | struct btrfs_qgroup *parent; |
1631 | struct btrfs_qgroup *member; | |
1632 | struct btrfs_qgroup_list *list; | |
73798c46 | 1633 | bool found = false; |
bed92eae | 1634 | int ret = 0; |
73798c46 | 1635 | int ret2; |
bed92eae | 1636 | |
e3b0edd2 | 1637 | if (!fs_info->quota_root) { |
8a36e408 | 1638 | ret = -ENOTCONN; |
f2f6ed3d WS |
1639 | goto out; |
1640 | } | |
bed92eae | 1641 | |
534e6623 WS |
1642 | member = find_qgroup_rb(fs_info, src); |
1643 | parent = find_qgroup_rb(fs_info, dst); | |
73798c46 QW |
1644 | /* |
1645 | * The parent/member pair doesn't exist, then try to delete the dead | |
1646 | * relation items only. | |
1647 | */ | |
1648 | if (!member || !parent) | |
1649 | goto delete_item; | |
534e6623 WS |
1650 | |
1651 | /* check if such qgroup relation exist firstly */ | |
1652 | list_for_each_entry(list, &member->groups, next_group) { | |
73798c46 QW |
1653 | if (list->group == parent) { |
1654 | found = true; | |
1655 | break; | |
1656 | } | |
534e6623 | 1657 | } |
73798c46 QW |
1658 | |
1659 | delete_item: | |
99d7f09a | 1660 | ret = del_qgroup_relation_item(trans, src, dst); |
73798c46 QW |
1661 | if (ret < 0 && ret != -ENOENT) |
1662 | goto out; | |
1663 | ret2 = del_qgroup_relation_item(trans, dst, src); | |
1664 | if (ret2 < 0 && ret2 != -ENOENT) | |
1665 | goto out; | |
bed92eae | 1666 | |
73798c46 QW |
1667 | /* At least one deletion succeeded, return 0 */ |
1668 | if (!ret || !ret2) | |
1669 | ret = 0; | |
1670 | ||
1671 | if (found) { | |
1672 | spin_lock(&fs_info->qgroup_lock); | |
1673 | del_relation_rb(fs_info, src, dst); | |
a0bdc04b | 1674 | ret = quick_update_accounting(fs_info, src, dst, -1); |
73798c46 QW |
1675 | spin_unlock(&fs_info->qgroup_lock); |
1676 | } | |
f2f6ed3d | 1677 | out: |
f5a6b1c5 DY |
1678 | return ret; |
1679 | } | |
1680 | ||
39616c27 LF |
1681 | int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, |
1682 | u64 dst) | |
f5a6b1c5 | 1683 | { |
39616c27 | 1684 | struct btrfs_fs_info *fs_info = trans->fs_info; |
f5a6b1c5 DY |
1685 | int ret = 0; |
1686 | ||
1687 | mutex_lock(&fs_info->qgroup_ioctl_lock); | |
6b36f1aa | 1688 | ret = __del_qgroup_relation(trans, src, dst); |
f2f6ed3d | 1689 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
f5a6b1c5 | 1690 | |
bed92eae AJ |
1691 | return ret; |
1692 | } | |
1693 | ||
49a05ecd | 1694 | int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) |
bed92eae | 1695 | { |
49a05ecd | 1696 | struct btrfs_fs_info *fs_info = trans->fs_info; |
bed92eae AJ |
1697 | struct btrfs_root *quota_root; |
1698 | struct btrfs_qgroup *qgroup; | |
8d54518b | 1699 | struct btrfs_qgroup *prealloc = NULL; |
bed92eae AJ |
1700 | int ret = 0; |
1701 | ||
6ed05643 BB |
1702 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) |
1703 | return 0; | |
1704 | ||
f2f6ed3d | 1705 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
e3b0edd2 | 1706 | if (!fs_info->quota_root) { |
8a36e408 | 1707 | ret = -ENOTCONN; |
f2f6ed3d WS |
1708 | goto out; |
1709 | } | |
e3b0edd2 | 1710 | quota_root = fs_info->quota_root; |
534e6623 WS |
1711 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
1712 | if (qgroup) { | |
1713 | ret = -EEXIST; | |
1714 | goto out; | |
1715 | } | |
bed92eae | 1716 | |
8d54518b QW |
1717 | prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); |
1718 | if (!prealloc) { | |
1719 | ret = -ENOMEM; | |
1720 | goto out; | |
1721 | } | |
1722 | ||
bed92eae | 1723 | ret = add_qgroup_item(trans, quota_root, qgroupid); |
534e6623 WS |
1724 | if (ret) |
1725 | goto out; | |
bed92eae AJ |
1726 | |
1727 | spin_lock(&fs_info->qgroup_lock); | |
8d54518b | 1728 | qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid); |
bed92eae | 1729 | spin_unlock(&fs_info->qgroup_lock); |
8d54518b | 1730 | prealloc = NULL; |
bed92eae | 1731 | |
49e5fb46 | 1732 | ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); |
f2f6ed3d WS |
1733 | out: |
1734 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
8d54518b | 1735 | kfree(prealloc); |
bed92eae AJ |
1736 | return ret; |
1737 | } | |
1738 | ||
3efbee1d | 1739 | int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) |
bed92eae | 1740 | { |
3efbee1d | 1741 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2cf68703 | 1742 | struct btrfs_qgroup *qgroup; |
f5a6b1c5 | 1743 | struct btrfs_qgroup_list *list; |
bed92eae AJ |
1744 | int ret = 0; |
1745 | ||
f2f6ed3d | 1746 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
e3b0edd2 | 1747 | if (!fs_info->quota_root) { |
8a36e408 | 1748 | ret = -ENOTCONN; |
f2f6ed3d WS |
1749 | goto out; |
1750 | } | |
bed92eae | 1751 | |
2cf68703 | 1752 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
534e6623 WS |
1753 | if (!qgroup) { |
1754 | ret = -ENOENT; | |
1755 | goto out; | |
2cf68703 | 1756 | } |
b90e22ba LF |
1757 | |
1758 | /* Check if there are no children of this qgroup */ | |
1759 | if (!list_empty(&qgroup->members)) { | |
1760 | ret = -EBUSY; | |
1761 | goto out; | |
1762 | } | |
1763 | ||
69104618 | 1764 | ret = del_qgroup_item(trans, qgroupid); |
36b96fdc SD |
1765 | if (ret && ret != -ENOENT) |
1766 | goto out; | |
bed92eae | 1767 | |
f5a6b1c5 DY |
1768 | while (!list_empty(&qgroup->groups)) { |
1769 | list = list_first_entry(&qgroup->groups, | |
1770 | struct btrfs_qgroup_list, next_group); | |
6b36f1aa LF |
1771 | ret = __del_qgroup_relation(trans, qgroupid, |
1772 | list->group->qgroupid); | |
f5a6b1c5 DY |
1773 | if (ret) |
1774 | goto out; | |
1775 | } | |
1776 | ||
bed92eae | 1777 | spin_lock(&fs_info->qgroup_lock); |
0b246afa | 1778 | del_qgroup_rb(fs_info, qgroupid); |
bed92eae | 1779 | spin_unlock(&fs_info->qgroup_lock); |
0bb78830 FM |
1780 | |
1781 | /* | |
1782 | * Remove the qgroup from sysfs now without holding the qgroup_lock | |
1783 | * spinlock, since the sysfs_remove_group() function needs to take | |
1784 | * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). | |
1785 | */ | |
1786 | btrfs_sysfs_del_one_qgroup(fs_info, qgroup); | |
1787 | kfree(qgroup); | |
f2f6ed3d WS |
1788 | out: |
1789 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
bed92eae AJ |
1790 | return ret; |
1791 | } | |
1792 | ||
f0042d5e | 1793 | int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, |
bed92eae AJ |
1794 | struct btrfs_qgroup_limit *limit) |
1795 | { | |
f0042d5e | 1796 | struct btrfs_fs_info *fs_info = trans->fs_info; |
bed92eae AJ |
1797 | struct btrfs_qgroup *qgroup; |
1798 | int ret = 0; | |
fe759907 YD |
1799 | /* Sometimes we would want to clear the limit on this qgroup. |
1800 | * To meet this requirement, we treat the -1 as a special value | |
1801 | * which tell kernel to clear the limit on this qgroup. | |
1802 | */ | |
1803 | const u64 CLEAR_VALUE = -1; | |
bed92eae | 1804 | |
f2f6ed3d | 1805 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
e3b0edd2 | 1806 | if (!fs_info->quota_root) { |
8a36e408 | 1807 | ret = -ENOTCONN; |
f2f6ed3d WS |
1808 | goto out; |
1809 | } | |
bed92eae | 1810 | |
ddb47afa WS |
1811 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
1812 | if (!qgroup) { | |
1813 | ret = -ENOENT; | |
1814 | goto out; | |
1815 | } | |
bed92eae | 1816 | |
58400fce | 1817 | spin_lock(&fs_info->qgroup_lock); |
fe759907 YD |
1818 | if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { |
1819 | if (limit->max_rfer == CLEAR_VALUE) { | |
1820 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; | |
1821 | limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; | |
1822 | qgroup->max_rfer = 0; | |
1823 | } else { | |
1824 | qgroup->max_rfer = limit->max_rfer; | |
1825 | } | |
1826 | } | |
1827 | if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { | |
1828 | if (limit->max_excl == CLEAR_VALUE) { | |
1829 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; | |
1830 | limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; | |
1831 | qgroup->max_excl = 0; | |
1832 | } else { | |
1833 | qgroup->max_excl = limit->max_excl; | |
1834 | } | |
1835 | } | |
1836 | if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { | |
1837 | if (limit->rsv_rfer == CLEAR_VALUE) { | |
1838 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; | |
1839 | limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; | |
1840 | qgroup->rsv_rfer = 0; | |
1841 | } else { | |
1842 | qgroup->rsv_rfer = limit->rsv_rfer; | |
1843 | } | |
1844 | } | |
1845 | if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { | |
1846 | if (limit->rsv_excl == CLEAR_VALUE) { | |
1847 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; | |
1848 | limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; | |
1849 | qgroup->rsv_excl = 0; | |
1850 | } else { | |
1851 | qgroup->rsv_excl = limit->rsv_excl; | |
1852 | } | |
1853 | } | |
03477d94 DY |
1854 | qgroup->lim_flags |= limit->flags; |
1855 | ||
bed92eae | 1856 | spin_unlock(&fs_info->qgroup_lock); |
1510e71c | 1857 | |
ac8a866a | 1858 | ret = update_qgroup_limit_item(trans, qgroup); |
1510e71c | 1859 | if (ret) { |
e562a8bd | 1860 | qgroup_mark_inconsistent(fs_info); |
1510e71c DY |
1861 | btrfs_info(fs_info, "unable to update quota limit for %llu", |
1862 | qgroupid); | |
1863 | } | |
1864 | ||
f2f6ed3d WS |
1865 | out: |
1866 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
bed92eae AJ |
1867 | return ret; |
1868 | } | |
1152651a | 1869 | |
33b6b251 DS |
1870 | /* |
1871 | * Inform qgroup to trace one dirty extent, its info is recorded in @record. | |
1872 | * So qgroup can account it at transaction committing time. | |
1873 | * | |
1874 | * No lock version, caller must acquire delayed ref lock and allocated memory, | |
1875 | * then call btrfs_qgroup_trace_extent_post() after exiting lock context. | |
1876 | * | |
1877 | * Return 0 for success insert | |
1878 | * Return >0 for existing record, caller can free @record safely. | |
1879 | * Error is not possible | |
1880 | */ | |
50b3e040 | 1881 | int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, |
cb93b52c QW |
1882 | struct btrfs_delayed_ref_root *delayed_refs, |
1883 | struct btrfs_qgroup_extent_record *record) | |
3368d001 QW |
1884 | { |
1885 | struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; | |
1886 | struct rb_node *parent_node = NULL; | |
1887 | struct btrfs_qgroup_extent_record *entry; | |
1888 | u64 bytenr = record->bytenr; | |
1889 | ||
182940f4 | 1890 | if (!btrfs_qgroup_full_accounting(fs_info)) |
609d9937 | 1891 | return 1; |
182940f4 | 1892 | |
a4666e68 | 1893 | lockdep_assert_held(&delayed_refs->lock); |
50b3e040 | 1894 | trace_btrfs_qgroup_trace_extent(fs_info, record); |
82bd101b | 1895 | |
3368d001 QW |
1896 | while (*p) { |
1897 | parent_node = *p; | |
1898 | entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, | |
1899 | node); | |
1418bae1 | 1900 | if (bytenr < entry->bytenr) { |
3368d001 | 1901 | p = &(*p)->rb_left; |
1418bae1 | 1902 | } else if (bytenr > entry->bytenr) { |
3368d001 | 1903 | p = &(*p)->rb_right; |
1418bae1 QW |
1904 | } else { |
1905 | if (record->data_rsv && !entry->data_rsv) { | |
1906 | entry->data_rsv = record->data_rsv; | |
1907 | entry->data_rsv_refroot = | |
1908 | record->data_rsv_refroot; | |
1909 | } | |
cb93b52c | 1910 | return 1; |
1418bae1 | 1911 | } |
3368d001 QW |
1912 | } |
1913 | ||
1914 | rb_link_node(&record->node, parent_node, p); | |
1915 | rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); | |
cb93b52c QW |
1916 | return 0; |
1917 | } | |
1918 | ||
33b6b251 DS |
1919 | /* |
1920 | * Post handler after qgroup_trace_extent_nolock(). | |
1921 | * | |
1922 | * NOTE: Current qgroup does the expensive backref walk at transaction | |
1923 | * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming | |
1924 | * new transaction. | |
1925 | * This is designed to allow btrfs_find_all_roots() to get correct new_roots | |
1926 | * result. | |
1927 | * | |
1928 | * However for old_roots there is no need to do backref walk at that time, | |
1929 | * since we search commit roots to walk backref and result will always be | |
1930 | * correct. | |
1931 | * | |
1932 | * Due to the nature of no lock version, we can't do backref there. | |
1933 | * So we must call btrfs_qgroup_trace_extent_post() after exiting | |
1934 | * spinlock context. | |
1935 | * | |
1936 | * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result | |
1937 | * using current root, then we can move all expensive backref walk out of | |
1938 | * transaction committing, but not now as qgroup accounting will be wrong again. | |
1939 | */ | |
8949b9a1 | 1940 | int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, |
fb235dc0 QW |
1941 | struct btrfs_qgroup_extent_record *qrecord) |
1942 | { | |
a2c8d27e | 1943 | struct btrfs_backref_walk_ctx ctx = { 0 }; |
fb235dc0 QW |
1944 | int ret; |
1945 | ||
182940f4 BB |
1946 | if (!btrfs_qgroup_full_accounting(trans->fs_info)) |
1947 | return 0; | |
8949b9a1 FM |
1948 | /* |
1949 | * We are always called in a context where we are already holding a | |
1950 | * transaction handle. Often we are called when adding a data delayed | |
1951 | * reference from btrfs_truncate_inode_items() (truncating or unlinking), | |
1952 | * in which case we will be holding a write lock on extent buffer from a | |
1953 | * subvolume tree. In this case we can't allow btrfs_find_all_roots() to | |
1954 | * acquire fs_info->commit_root_sem, because that is a higher level lock | |
1955 | * that must be acquired before locking any extent buffers. | |
1956 | * | |
1957 | * So we want btrfs_find_all_roots() to not acquire the commit_root_sem | |
1958 | * but we can't pass it a non-NULL transaction handle, because otherwise | |
1959 | * it would not use commit roots and would lock extent buffers, causing | |
1960 | * a deadlock if it ends up trying to read lock the same extent buffer | |
1961 | * that was previously write locked at btrfs_truncate_inode_items(). | |
1962 | * | |
1963 | * So pass a NULL transaction handle to btrfs_find_all_roots() and | |
1964 | * explicitly tell it to not acquire the commit_root_sem - if we are | |
1965 | * holding a transaction handle we don't need its protection. | |
1966 | */ | |
1967 | ASSERT(trans != NULL); | |
1968 | ||
e15e9f43 QW |
1969 | if (trans->fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) |
1970 | return 0; | |
1971 | ||
a2c8d27e FM |
1972 | ctx.bytenr = qrecord->bytenr; |
1973 | ctx.fs_info = trans->fs_info; | |
1974 | ||
1975 | ret = btrfs_find_all_roots(&ctx, true); | |
952bd3db | 1976 | if (ret < 0) { |
e562a8bd | 1977 | qgroup_mark_inconsistent(trans->fs_info); |
8949b9a1 | 1978 | btrfs_warn(trans->fs_info, |
952bd3db NB |
1979 | "error accounting new delayed refs extent (err code: %d), quota inconsistent", |
1980 | ret); | |
1981 | return 0; | |
1982 | } | |
fb235dc0 QW |
1983 | |
1984 | /* | |
1985 | * Here we don't need to get the lock of | |
1986 | * trans->transaction->delayed_refs, since inserted qrecord won't | |
1987 | * be deleted, only qrecord->node may be modified (new qrecord insert) | |
1988 | * | |
1989 | * So modifying qrecord->old_roots is safe here | |
1990 | */ | |
a2c8d27e | 1991 | qrecord->old_roots = ctx.roots; |
fb235dc0 QW |
1992 | return 0; |
1993 | } | |
1994 | ||
33b6b251 DS |
1995 | /* |
1996 | * Inform qgroup to trace one dirty extent, specified by @bytenr and | |
1997 | * @num_bytes. | |
1998 | * So qgroup can account it at commit trans time. | |
1999 | * | |
2000 | * Better encapsulated version, with memory allocation and backref walk for | |
2001 | * commit roots. | |
2002 | * So this can sleep. | |
2003 | * | |
2004 | * Return 0 if the operation is done. | |
2005 | * Return <0 for error, like memory allocation failure or invalid parameter | |
2006 | * (NULL trans) | |
2007 | */ | |
a95f3aaf | 2008 | int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
e2896e79 | 2009 | u64 num_bytes) |
cb93b52c | 2010 | { |
a95f3aaf | 2011 | struct btrfs_fs_info *fs_info = trans->fs_info; |
cb93b52c QW |
2012 | struct btrfs_qgroup_extent_record *record; |
2013 | struct btrfs_delayed_ref_root *delayed_refs; | |
2014 | int ret; | |
2015 | ||
182940f4 | 2016 | if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0) |
cb93b52c | 2017 | return 0; |
e2896e79 | 2018 | record = kzalloc(sizeof(*record), GFP_NOFS); |
cb93b52c QW |
2019 | if (!record) |
2020 | return -ENOMEM; | |
2021 | ||
2022 | delayed_refs = &trans->transaction->delayed_refs; | |
2023 | record->bytenr = bytenr; | |
2024 | record->num_bytes = num_bytes; | |
2025 | record->old_roots = NULL; | |
2026 | ||
2027 | spin_lock(&delayed_refs->lock); | |
2ff7e61e | 2028 | ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); |
cb93b52c | 2029 | spin_unlock(&delayed_refs->lock); |
fb235dc0 | 2030 | if (ret > 0) { |
cb93b52c | 2031 | kfree(record); |
fb235dc0 QW |
2032 | return 0; |
2033 | } | |
8949b9a1 | 2034 | return btrfs_qgroup_trace_extent_post(trans, record); |
3368d001 QW |
2035 | } |
2036 | ||
33b6b251 DS |
2037 | /* |
2038 | * Inform qgroup to trace all leaf items of data | |
2039 | * | |
2040 | * Return 0 for success | |
2041 | * Return <0 for error(ENOMEM) | |
2042 | */ | |
33d1f05c | 2043 | int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, |
33d1f05c QW |
2044 | struct extent_buffer *eb) |
2045 | { | |
8d38d7eb | 2046 | struct btrfs_fs_info *fs_info = trans->fs_info; |
33d1f05c QW |
2047 | int nr = btrfs_header_nritems(eb); |
2048 | int i, extent_type, ret; | |
2049 | struct btrfs_key key; | |
2050 | struct btrfs_file_extent_item *fi; | |
2051 | u64 bytenr, num_bytes; | |
2052 | ||
2053 | /* We can be called directly from walk_up_proc() */ | |
182940f4 | 2054 | if (!btrfs_qgroup_full_accounting(fs_info)) |
33d1f05c QW |
2055 | return 0; |
2056 | ||
2057 | for (i = 0; i < nr; i++) { | |
2058 | btrfs_item_key_to_cpu(eb, &key, i); | |
2059 | ||
2060 | if (key.type != BTRFS_EXTENT_DATA_KEY) | |
2061 | continue; | |
2062 | ||
2063 | fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); | |
2064 | /* filter out non qgroup-accountable extents */ | |
2065 | extent_type = btrfs_file_extent_type(eb, fi); | |
2066 | ||
2067 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) | |
2068 | continue; | |
2069 | ||
2070 | bytenr = btrfs_file_extent_disk_bytenr(eb, fi); | |
2071 | if (!bytenr) | |
2072 | continue; | |
2073 | ||
2074 | num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); | |
2075 | ||
e2896e79 | 2076 | ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes); |
33d1f05c QW |
2077 | if (ret) |
2078 | return ret; | |
2079 | } | |
cddf3b2c | 2080 | cond_resched(); |
33d1f05c QW |
2081 | return 0; |
2082 | } | |
2083 | ||
2084 | /* | |
2085 | * Walk up the tree from the bottom, freeing leaves and any interior | |
2086 | * nodes which have had all slots visited. If a node (leaf or | |
2087 | * interior) is freed, the node above it will have it's slot | |
2088 | * incremented. The root node will never be freed. | |
2089 | * | |
2090 | * At the end of this function, we should have a path which has all | |
2091 | * slots incremented to the next position for a search. If we need to | |
2092 | * read a new node it will be NULL and the node above it will have the | |
2093 | * correct slot selected for a later read. | |
2094 | * | |
2095 | * If we increment the root nodes slot counter past the number of | |
2096 | * elements, 1 is returned to signal completion of the search. | |
2097 | */ | |
15b34517 | 2098 | static int adjust_slots_upwards(struct btrfs_path *path, int root_level) |
33d1f05c QW |
2099 | { |
2100 | int level = 0; | |
2101 | int nr, slot; | |
2102 | struct extent_buffer *eb; | |
2103 | ||
2104 | if (root_level == 0) | |
2105 | return 1; | |
2106 | ||
2107 | while (level <= root_level) { | |
2108 | eb = path->nodes[level]; | |
2109 | nr = btrfs_header_nritems(eb); | |
2110 | path->slots[level]++; | |
2111 | slot = path->slots[level]; | |
2112 | if (slot >= nr || level == 0) { | |
2113 | /* | |
2114 | * Don't free the root - we will detect this | |
2115 | * condition after our loop and return a | |
2116 | * positive value for caller to stop walking the tree. | |
2117 | */ | |
2118 | if (level != root_level) { | |
2119 | btrfs_tree_unlock_rw(eb, path->locks[level]); | |
2120 | path->locks[level] = 0; | |
2121 | ||
2122 | free_extent_buffer(eb); | |
2123 | path->nodes[level] = NULL; | |
2124 | path->slots[level] = 0; | |
2125 | } | |
2126 | } else { | |
2127 | /* | |
2128 | * We have a valid slot to walk back down | |
2129 | * from. Stop here so caller can process these | |
2130 | * new nodes. | |
2131 | */ | |
2132 | break; | |
2133 | } | |
2134 | ||
2135 | level++; | |
2136 | } | |
2137 | ||
2138 | eb = path->nodes[root_level]; | |
2139 | if (path->slots[root_level] >= btrfs_header_nritems(eb)) | |
2140 | return 1; | |
2141 | ||
2142 | return 0; | |
2143 | } | |
2144 | ||
25982561 QW |
2145 | /* |
2146 | * Helper function to trace a subtree tree block swap. | |
2147 | * | |
2148 | * The swap will happen in highest tree block, but there may be a lot of | |
2149 | * tree blocks involved. | |
2150 | * | |
2151 | * For example: | |
2152 | * OO = Old tree blocks | |
2153 | * NN = New tree blocks allocated during balance | |
2154 | * | |
2155 | * File tree (257) Reloc tree for 257 | |
2156 | * L2 OO NN | |
2157 | * / \ / \ | |
2158 | * L1 OO OO (a) OO NN (a) | |
2159 | * / \ / \ / \ / \ | |
2160 | * L0 OO OO OO OO OO OO NN NN | |
2161 | * (b) (c) (b) (c) | |
2162 | * | |
2163 | * When calling qgroup_trace_extent_swap(), we will pass: | |
2164 | * @src_eb = OO(a) | |
2165 | * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ] | |
2166 | * @dst_level = 0 | |
2167 | * @root_level = 1 | |
2168 | * | |
2169 | * In that case, qgroup_trace_extent_swap() will search from OO(a) to | |
2170 | * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty. | |
2171 | * | |
2172 | * The main work of qgroup_trace_extent_swap() can be split into 3 parts: | |
2173 | * | |
2174 | * 1) Tree search from @src_eb | |
2175 | * It should acts as a simplified btrfs_search_slot(). | |
2176 | * The key for search can be extracted from @dst_path->nodes[dst_level] | |
2177 | * (first key). | |
2178 | * | |
2179 | * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty | |
2180 | * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty. | |
52042d8e | 2181 | * They should be marked during previous (@dst_level = 1) iteration. |
25982561 QW |
2182 | * |
2183 | * 3) Mark file extents in leaves dirty | |
2184 | * We don't have good way to pick out new file extents only. | |
2185 | * So we still follow the old method by scanning all file extents in | |
2186 | * the leave. | |
2187 | * | |
52042d8e | 2188 | * This function can free us from keeping two paths, thus later we only need |
25982561 QW |
2189 | * to care about how to iterate all new tree blocks in reloc tree. |
2190 | */ | |
2191 | static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, | |
2192 | struct extent_buffer *src_eb, | |
2193 | struct btrfs_path *dst_path, | |
3d0174f7 QW |
2194 | int dst_level, int root_level, |
2195 | bool trace_leaf) | |
25982561 QW |
2196 | { |
2197 | struct btrfs_key key; | |
2198 | struct btrfs_path *src_path; | |
2199 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2200 | u32 nodesize = fs_info->nodesize; | |
2201 | int cur_level = root_level; | |
2202 | int ret; | |
2203 | ||
2204 | BUG_ON(dst_level > root_level); | |
2205 | /* Level mismatch */ | |
2206 | if (btrfs_header_level(src_eb) != root_level) | |
2207 | return -EINVAL; | |
2208 | ||
2209 | src_path = btrfs_alloc_path(); | |
2210 | if (!src_path) { | |
2211 | ret = -ENOMEM; | |
2212 | goto out; | |
2213 | } | |
2214 | ||
2215 | if (dst_level) | |
2216 | btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0); | |
2217 | else | |
2218 | btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0); | |
2219 | ||
2220 | /* For src_path */ | |
67439dad | 2221 | atomic_inc(&src_eb->refs); |
25982561 QW |
2222 | src_path->nodes[root_level] = src_eb; |
2223 | src_path->slots[root_level] = dst_path->slots[root_level]; | |
2224 | src_path->locks[root_level] = 0; | |
2225 | ||
2226 | /* A simplified version of btrfs_search_slot() */ | |
2227 | while (cur_level >= dst_level) { | |
2228 | struct btrfs_key src_key; | |
2229 | struct btrfs_key dst_key; | |
2230 | ||
2231 | if (src_path->nodes[cur_level] == NULL) { | |
25982561 QW |
2232 | struct extent_buffer *eb; |
2233 | int parent_slot; | |
25982561 QW |
2234 | |
2235 | eb = src_path->nodes[cur_level + 1]; | |
2236 | parent_slot = src_path->slots[cur_level + 1]; | |
25982561 | 2237 | |
6b2cb7cb | 2238 | eb = btrfs_read_node_slot(eb, parent_slot); |
25982561 QW |
2239 | if (IS_ERR(eb)) { |
2240 | ret = PTR_ERR(eb); | |
2241 | goto out; | |
25982561 QW |
2242 | } |
2243 | ||
2244 | src_path->nodes[cur_level] = eb; | |
2245 | ||
2246 | btrfs_tree_read_lock(eb); | |
ac5887c8 | 2247 | src_path->locks[cur_level] = BTRFS_READ_LOCK; |
25982561 QW |
2248 | } |
2249 | ||
2250 | src_path->slots[cur_level] = dst_path->slots[cur_level]; | |
2251 | if (cur_level) { | |
2252 | btrfs_node_key_to_cpu(dst_path->nodes[cur_level], | |
2253 | &dst_key, dst_path->slots[cur_level]); | |
2254 | btrfs_node_key_to_cpu(src_path->nodes[cur_level], | |
2255 | &src_key, src_path->slots[cur_level]); | |
2256 | } else { | |
2257 | btrfs_item_key_to_cpu(dst_path->nodes[cur_level], | |
2258 | &dst_key, dst_path->slots[cur_level]); | |
2259 | btrfs_item_key_to_cpu(src_path->nodes[cur_level], | |
2260 | &src_key, src_path->slots[cur_level]); | |
2261 | } | |
2262 | /* Content mismatch, something went wrong */ | |
2263 | if (btrfs_comp_cpu_keys(&dst_key, &src_key)) { | |
2264 | ret = -ENOENT; | |
2265 | goto out; | |
2266 | } | |
2267 | cur_level--; | |
2268 | } | |
2269 | ||
2270 | /* | |
2271 | * Now both @dst_path and @src_path have been populated, record the tree | |
2272 | * blocks for qgroup accounting. | |
2273 | */ | |
2274 | ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start, | |
e2896e79 | 2275 | nodesize); |
25982561 QW |
2276 | if (ret < 0) |
2277 | goto out; | |
e2896e79 DS |
2278 | ret = btrfs_qgroup_trace_extent(trans, dst_path->nodes[dst_level]->start, |
2279 | nodesize); | |
25982561 QW |
2280 | if (ret < 0) |
2281 | goto out; | |
2282 | ||
2283 | /* Record leaf file extents */ | |
3d0174f7 | 2284 | if (dst_level == 0 && trace_leaf) { |
25982561 QW |
2285 | ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]); |
2286 | if (ret < 0) | |
2287 | goto out; | |
2288 | ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]); | |
2289 | } | |
2290 | out: | |
2291 | btrfs_free_path(src_path); | |
2292 | return ret; | |
2293 | } | |
2294 | ||
ea49f3e7 QW |
2295 | /* |
2296 | * Helper function to do recursive generation-aware depth-first search, to | |
2297 | * locate all new tree blocks in a subtree of reloc tree. | |
2298 | * | |
2299 | * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot) | |
2300 | * reloc tree | |
2301 | * L2 NN (a) | |
2302 | * / \ | |
2303 | * L1 OO NN (b) | |
2304 | * / \ / \ | |
2305 | * L0 OO OO OO NN | |
2306 | * (c) (d) | |
2307 | * If we pass: | |
2308 | * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ], | |
2309 | * @cur_level = 1 | |
2310 | * @root_level = 1 | |
2311 | * | |
2312 | * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace | |
2313 | * above tree blocks along with their counter parts in file tree. | |
52042d8e | 2314 | * While during search, old tree blocks OO(c) will be skipped as tree block swap |
ea49f3e7 QW |
2315 | * won't affect OO(c). |
2316 | */ | |
2317 | static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, | |
2318 | struct extent_buffer *src_eb, | |
2319 | struct btrfs_path *dst_path, | |
2320 | int cur_level, int root_level, | |
3d0174f7 | 2321 | u64 last_snapshot, bool trace_leaf) |
ea49f3e7 QW |
2322 | { |
2323 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2324 | struct extent_buffer *eb; | |
2325 | bool need_cleanup = false; | |
2326 | int ret = 0; | |
2327 | int i; | |
2328 | ||
2329 | /* Level sanity check */ | |
7ff2c2a1 NB |
2330 | if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 || |
2331 | root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 || | |
ea49f3e7 QW |
2332 | root_level < cur_level) { |
2333 | btrfs_err_rl(fs_info, | |
2334 | "%s: bad levels, cur_level=%d root_level=%d", | |
2335 | __func__, cur_level, root_level); | |
2336 | return -EUCLEAN; | |
2337 | } | |
2338 | ||
2339 | /* Read the tree block if needed */ | |
2340 | if (dst_path->nodes[cur_level] == NULL) { | |
ea49f3e7 QW |
2341 | int parent_slot; |
2342 | u64 child_gen; | |
ea49f3e7 QW |
2343 | |
2344 | /* | |
2345 | * dst_path->nodes[root_level] must be initialized before | |
2346 | * calling this function. | |
2347 | */ | |
2348 | if (cur_level == root_level) { | |
2349 | btrfs_err_rl(fs_info, | |
2350 | "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d", | |
2351 | __func__, root_level, root_level, cur_level); | |
2352 | return -EUCLEAN; | |
2353 | } | |
2354 | ||
2355 | /* | |
2356 | * We need to get child blockptr/gen from parent before we can | |
2357 | * read it. | |
2358 | */ | |
2359 | eb = dst_path->nodes[cur_level + 1]; | |
2360 | parent_slot = dst_path->slots[cur_level + 1]; | |
ea49f3e7 | 2361 | child_gen = btrfs_node_ptr_generation(eb, parent_slot); |
ea49f3e7 QW |
2362 | |
2363 | /* This node is old, no need to trace */ | |
2364 | if (child_gen < last_snapshot) | |
2365 | goto out; | |
2366 | ||
3acfbd6a | 2367 | eb = btrfs_read_node_slot(eb, parent_slot); |
ea49f3e7 QW |
2368 | if (IS_ERR(eb)) { |
2369 | ret = PTR_ERR(eb); | |
2370 | goto out; | |
ea49f3e7 QW |
2371 | } |
2372 | ||
2373 | dst_path->nodes[cur_level] = eb; | |
2374 | dst_path->slots[cur_level] = 0; | |
2375 | ||
2376 | btrfs_tree_read_lock(eb); | |
ac5887c8 | 2377 | dst_path->locks[cur_level] = BTRFS_READ_LOCK; |
ea49f3e7 QW |
2378 | need_cleanup = true; |
2379 | } | |
2380 | ||
2381 | /* Now record this tree block and its counter part for qgroups */ | |
2382 | ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level, | |
3d0174f7 | 2383 | root_level, trace_leaf); |
ea49f3e7 QW |
2384 | if (ret < 0) |
2385 | goto cleanup; | |
2386 | ||
2387 | eb = dst_path->nodes[cur_level]; | |
2388 | ||
2389 | if (cur_level > 0) { | |
2390 | /* Iterate all child tree blocks */ | |
2391 | for (i = 0; i < btrfs_header_nritems(eb); i++) { | |
2392 | /* Skip old tree blocks as they won't be swapped */ | |
2393 | if (btrfs_node_ptr_generation(eb, i) < last_snapshot) | |
2394 | continue; | |
2395 | dst_path->slots[cur_level] = i; | |
2396 | ||
2397 | /* Recursive call (at most 7 times) */ | |
2398 | ret = qgroup_trace_new_subtree_blocks(trans, src_eb, | |
2399 | dst_path, cur_level - 1, root_level, | |
3d0174f7 | 2400 | last_snapshot, trace_leaf); |
ea49f3e7 QW |
2401 | if (ret < 0) |
2402 | goto cleanup; | |
2403 | } | |
2404 | } | |
2405 | ||
2406 | cleanup: | |
2407 | if (need_cleanup) { | |
2408 | /* Clean up */ | |
2409 | btrfs_tree_unlock_rw(dst_path->nodes[cur_level], | |
2410 | dst_path->locks[cur_level]); | |
2411 | free_extent_buffer(dst_path->nodes[cur_level]); | |
2412 | dst_path->nodes[cur_level] = NULL; | |
2413 | dst_path->slots[cur_level] = 0; | |
2414 | dst_path->locks[cur_level] = 0; | |
2415 | } | |
2416 | out: | |
2417 | return ret; | |
2418 | } | |
2419 | ||
5aea1a4f QW |
2420 | static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans, |
2421 | struct extent_buffer *src_eb, | |
2422 | struct extent_buffer *dst_eb, | |
2423 | u64 last_snapshot, bool trace_leaf) | |
2424 | { | |
2425 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2426 | struct btrfs_path *dst_path = NULL; | |
2427 | int level; | |
2428 | int ret; | |
2429 | ||
182940f4 | 2430 | if (!btrfs_qgroup_full_accounting(fs_info)) |
5aea1a4f QW |
2431 | return 0; |
2432 | ||
2433 | /* Wrong parameter order */ | |
2434 | if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) { | |
2435 | btrfs_err_rl(fs_info, | |
2436 | "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__, | |
2437 | btrfs_header_generation(src_eb), | |
2438 | btrfs_header_generation(dst_eb)); | |
2439 | return -EUCLEAN; | |
2440 | } | |
2441 | ||
2442 | if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) { | |
2443 | ret = -EIO; | |
2444 | goto out; | |
2445 | } | |
2446 | ||
2447 | level = btrfs_header_level(dst_eb); | |
2448 | dst_path = btrfs_alloc_path(); | |
2449 | if (!dst_path) { | |
2450 | ret = -ENOMEM; | |
2451 | goto out; | |
2452 | } | |
2453 | /* For dst_path */ | |
67439dad | 2454 | atomic_inc(&dst_eb->refs); |
5aea1a4f QW |
2455 | dst_path->nodes[level] = dst_eb; |
2456 | dst_path->slots[level] = 0; | |
2457 | dst_path->locks[level] = 0; | |
2458 | ||
2459 | /* Do the generation aware breadth-first search */ | |
2460 | ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level, | |
2461 | level, last_snapshot, trace_leaf); | |
2462 | if (ret < 0) | |
2463 | goto out; | |
2464 | ret = 0; | |
2465 | ||
2466 | out: | |
2467 | btrfs_free_path(dst_path); | |
2468 | if (ret < 0) | |
e562a8bd | 2469 | qgroup_mark_inconsistent(fs_info); |
5aea1a4f QW |
2470 | return ret; |
2471 | } | |
2472 | ||
33b6b251 DS |
2473 | /* |
2474 | * Inform qgroup to trace a whole subtree, including all its child tree | |
2475 | * blocks and data. | |
2476 | * The root tree block is specified by @root_eb. | |
2477 | * | |
2478 | * Normally used by relocation(tree block swap) and subvolume deletion. | |
2479 | * | |
2480 | * Return 0 for success | |
2481 | * Return <0 for error(ENOMEM or tree search error) | |
2482 | */ | |
33d1f05c | 2483 | int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, |
33d1f05c QW |
2484 | struct extent_buffer *root_eb, |
2485 | u64 root_gen, int root_level) | |
2486 | { | |
deb40627 | 2487 | struct btrfs_fs_info *fs_info = trans->fs_info; |
33d1f05c QW |
2488 | int ret = 0; |
2489 | int level; | |
011b46c3 | 2490 | u8 drop_subptree_thres; |
33d1f05c QW |
2491 | struct extent_buffer *eb = root_eb; |
2492 | struct btrfs_path *path = NULL; | |
2493 | ||
b6e6bca5 | 2494 | BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); |
33d1f05c QW |
2495 | BUG_ON(root_eb == NULL); |
2496 | ||
182940f4 | 2497 | if (!btrfs_qgroup_full_accounting(fs_info)) |
33d1f05c QW |
2498 | return 0; |
2499 | ||
011b46c3 QW |
2500 | spin_lock(&fs_info->qgroup_lock); |
2501 | drop_subptree_thres = fs_info->qgroup_drop_subtree_thres; | |
2502 | spin_unlock(&fs_info->qgroup_lock); | |
2503 | ||
2504 | /* | |
2505 | * This function only gets called for snapshot drop, if we hit a high | |
2506 | * node here, it means we are going to change ownership for quite a lot | |
2507 | * of extents, which will greatly slow down btrfs_commit_transaction(). | |
2508 | * | |
2509 | * So here if we find a high tree here, we just skip the accounting and | |
2510 | * mark qgroup inconsistent. | |
2511 | */ | |
2512 | if (root_level >= drop_subptree_thres) { | |
2513 | qgroup_mark_inconsistent(fs_info); | |
2514 | return 0; | |
2515 | } | |
2516 | ||
33d1f05c | 2517 | if (!extent_buffer_uptodate(root_eb)) { |
789d6a3a QW |
2518 | struct btrfs_tree_parent_check check = { |
2519 | .has_first_key = false, | |
2520 | .transid = root_gen, | |
2521 | .level = root_level | |
2522 | }; | |
2523 | ||
2524 | ret = btrfs_read_extent_buffer(root_eb, &check); | |
33d1f05c QW |
2525 | if (ret) |
2526 | goto out; | |
2527 | } | |
2528 | ||
2529 | if (root_level == 0) { | |
8d38d7eb | 2530 | ret = btrfs_qgroup_trace_leaf_items(trans, root_eb); |
33d1f05c QW |
2531 | goto out; |
2532 | } | |
2533 | ||
2534 | path = btrfs_alloc_path(); | |
2535 | if (!path) | |
2536 | return -ENOMEM; | |
2537 | ||
2538 | /* | |
2539 | * Walk down the tree. Missing extent blocks are filled in as | |
2540 | * we go. Metadata is accounted every time we read a new | |
2541 | * extent block. | |
2542 | * | |
2543 | * When we reach a leaf, we account for file extent items in it, | |
2544 | * walk back up the tree (adjusting slot pointers as we go) | |
2545 | * and restart the search process. | |
2546 | */ | |
67439dad | 2547 | atomic_inc(&root_eb->refs); /* For path */ |
33d1f05c QW |
2548 | path->nodes[root_level] = root_eb; |
2549 | path->slots[root_level] = 0; | |
2550 | path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ | |
2551 | walk_down: | |
2552 | level = root_level; | |
2553 | while (level >= 0) { | |
2554 | if (path->nodes[level] == NULL) { | |
2555 | int parent_slot; | |
33d1f05c QW |
2556 | u64 child_bytenr; |
2557 | ||
2558 | /* | |
182c79fc JB |
2559 | * We need to get child blockptr from parent before we |
2560 | * can read it. | |
33d1f05c QW |
2561 | */ |
2562 | eb = path->nodes[level + 1]; | |
2563 | parent_slot = path->slots[level + 1]; | |
2564 | child_bytenr = btrfs_node_blockptr(eb, parent_slot); | |
33d1f05c | 2565 | |
182c79fc | 2566 | eb = btrfs_read_node_slot(eb, parent_slot); |
33d1f05c QW |
2567 | if (IS_ERR(eb)) { |
2568 | ret = PTR_ERR(eb); | |
2569 | goto out; | |
33d1f05c QW |
2570 | } |
2571 | ||
2572 | path->nodes[level] = eb; | |
2573 | path->slots[level] = 0; | |
2574 | ||
2575 | btrfs_tree_read_lock(eb); | |
ac5887c8 | 2576 | path->locks[level] = BTRFS_READ_LOCK; |
33d1f05c | 2577 | |
a95f3aaf | 2578 | ret = btrfs_qgroup_trace_extent(trans, child_bytenr, |
e2896e79 | 2579 | fs_info->nodesize); |
33d1f05c QW |
2580 | if (ret) |
2581 | goto out; | |
2582 | } | |
2583 | ||
2584 | if (level == 0) { | |
8d38d7eb LF |
2585 | ret = btrfs_qgroup_trace_leaf_items(trans, |
2586 | path->nodes[level]); | |
33d1f05c QW |
2587 | if (ret) |
2588 | goto out; | |
2589 | ||
2590 | /* Nonzero return here means we completed our search */ | |
15b34517 | 2591 | ret = adjust_slots_upwards(path, root_level); |
33d1f05c QW |
2592 | if (ret) |
2593 | break; | |
2594 | ||
2595 | /* Restart search with new slots */ | |
2596 | goto walk_down; | |
2597 | } | |
2598 | ||
2599 | level--; | |
2600 | } | |
2601 | ||
2602 | ret = 0; | |
2603 | out: | |
2604 | btrfs_free_path(path); | |
2605 | ||
2606 | return ret; | |
2607 | } | |
2608 | ||
dce28769 QW |
2609 | static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup) |
2610 | { | |
2611 | if (!list_empty(&qgroup->nested_iterator)) | |
2612 | return; | |
2613 | ||
2614 | list_add_tail(&qgroup->nested_iterator, head); | |
2615 | } | |
2616 | ||
2617 | static void qgroup_iterator_nested_clean(struct list_head *head) | |
2618 | { | |
2619 | while (!list_empty(head)) { | |
2620 | struct btrfs_qgroup *qgroup; | |
2621 | ||
2622 | qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator); | |
2623 | list_del_init(&qgroup->nested_iterator); | |
2624 | } | |
2625 | } | |
2626 | ||
d810ef2b QW |
2627 | #define UPDATE_NEW 0 |
2628 | #define UPDATE_OLD 1 | |
2629 | /* | |
2630 | * Walk all of the roots that points to the bytenr and adjust their refcnts. | |
2631 | */ | |
dce28769 QW |
2632 | static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info, |
2633 | struct ulist *roots, struct list_head *qgroups, | |
2634 | u64 seq, int update_old) | |
d810ef2b QW |
2635 | { |
2636 | struct ulist_node *unode; | |
2637 | struct ulist_iterator uiter; | |
d810ef2b | 2638 | struct btrfs_qgroup *qg; |
d810ef2b QW |
2639 | |
2640 | if (!roots) | |
dce28769 | 2641 | return; |
d810ef2b QW |
2642 | ULIST_ITER_INIT(&uiter); |
2643 | while ((unode = ulist_next(roots, &uiter))) { | |
a4a81383 QW |
2644 | LIST_HEAD(tmp); |
2645 | ||
d810ef2b QW |
2646 | qg = find_qgroup_rb(fs_info, unode->val); |
2647 | if (!qg) | |
2648 | continue; | |
2649 | ||
dce28769 | 2650 | qgroup_iterator_nested_add(qgroups, qg); |
a4a81383 QW |
2651 | qgroup_iterator_add(&tmp, qg); |
2652 | list_for_each_entry(qg, &tmp, iterator) { | |
d810ef2b QW |
2653 | struct btrfs_qgroup_list *glist; |
2654 | ||
d810ef2b QW |
2655 | if (update_old) |
2656 | btrfs_qgroup_update_old_refcnt(qg, seq, 1); | |
2657 | else | |
2658 | btrfs_qgroup_update_new_refcnt(qg, seq, 1); | |
a4a81383 | 2659 | |
d810ef2b | 2660 | list_for_each_entry(glist, &qg->groups, next_group) { |
dce28769 | 2661 | qgroup_iterator_nested_add(qgroups, glist->group); |
a4a81383 | 2662 | qgroup_iterator_add(&tmp, glist->group); |
d810ef2b QW |
2663 | } |
2664 | } | |
a4a81383 | 2665 | qgroup_iterator_clean(&tmp); |
d810ef2b | 2666 | } |
d810ef2b QW |
2667 | } |
2668 | ||
823ae5b8 QW |
2669 | /* |
2670 | * Update qgroup rfer/excl counters. | |
2671 | * Rfer update is easy, codes can explain themselves. | |
e69bcee3 | 2672 | * |
260db43c | 2673 | * Excl update is tricky, the update is split into 2 parts. |
823ae5b8 QW |
2674 | * Part 1: Possible exclusive <-> sharing detect: |
2675 | * | A | !A | | |
2676 | * ------------------------------------- | |
2677 | * B | * | - | | |
2678 | * ------------------------------------- | |
2679 | * !B | + | ** | | |
2680 | * ------------------------------------- | |
2681 | * | |
2682 | * Conditions: | |
2683 | * A: cur_old_roots < nr_old_roots (not exclusive before) | |
2684 | * !A: cur_old_roots == nr_old_roots (possible exclusive before) | |
2685 | * B: cur_new_roots < nr_new_roots (not exclusive now) | |
01327610 | 2686 | * !B: cur_new_roots == nr_new_roots (possible exclusive now) |
823ae5b8 QW |
2687 | * |
2688 | * Results: | |
2689 | * +: Possible sharing -> exclusive -: Possible exclusive -> sharing | |
2690 | * *: Definitely not changed. **: Possible unchanged. | |
2691 | * | |
2692 | * For !A and !B condition, the exception is cur_old/new_roots == 0 case. | |
2693 | * | |
2694 | * To make the logic clear, we first use condition A and B to split | |
2695 | * combination into 4 results. | |
2696 | * | |
2697 | * Then, for result "+" and "-", check old/new_roots == 0 case, as in them | |
2698 | * only on variant maybe 0. | |
2699 | * | |
2700 | * Lastly, check result **, since there are 2 variants maybe 0, split them | |
2701 | * again(2x2). | |
2702 | * But this time we don't need to consider other things, the codes and logic | |
2703 | * is easy to understand now. | |
2704 | */ | |
dce28769 QW |
2705 | static void qgroup_update_counters(struct btrfs_fs_info *fs_info, |
2706 | struct list_head *qgroups, u64 nr_old_roots, | |
2707 | u64 nr_new_roots, u64 num_bytes, u64 seq) | |
823ae5b8 | 2708 | { |
823ae5b8 | 2709 | struct btrfs_qgroup *qg; |
823ae5b8 | 2710 | |
dce28769 QW |
2711 | list_for_each_entry(qg, qgroups, nested_iterator) { |
2712 | u64 cur_new_count, cur_old_count; | |
823ae5b8 QW |
2713 | bool dirty = false; |
2714 | ||
823ae5b8 QW |
2715 | cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); |
2716 | cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); | |
2717 | ||
8b317901 QW |
2718 | trace_qgroup_update_counters(fs_info, qg, cur_old_count, |
2719 | cur_new_count); | |
0f5dcf8d | 2720 | |
823ae5b8 QW |
2721 | /* Rfer update part */ |
2722 | if (cur_old_count == 0 && cur_new_count > 0) { | |
2723 | qg->rfer += num_bytes; | |
2724 | qg->rfer_cmpr += num_bytes; | |
2725 | dirty = true; | |
2726 | } | |
2727 | if (cur_old_count > 0 && cur_new_count == 0) { | |
2728 | qg->rfer -= num_bytes; | |
2729 | qg->rfer_cmpr -= num_bytes; | |
2730 | dirty = true; | |
2731 | } | |
2732 | ||
2733 | /* Excl update part */ | |
2734 | /* Exclusive/none -> shared case */ | |
2735 | if (cur_old_count == nr_old_roots && | |
2736 | cur_new_count < nr_new_roots) { | |
2737 | /* Exclusive -> shared */ | |
2738 | if (cur_old_count != 0) { | |
2739 | qg->excl -= num_bytes; | |
2740 | qg->excl_cmpr -= num_bytes; | |
2741 | dirty = true; | |
2742 | } | |
2743 | } | |
2744 | ||
2745 | /* Shared -> exclusive/none case */ | |
2746 | if (cur_old_count < nr_old_roots && | |
2747 | cur_new_count == nr_new_roots) { | |
2748 | /* Shared->exclusive */ | |
2749 | if (cur_new_count != 0) { | |
2750 | qg->excl += num_bytes; | |
2751 | qg->excl_cmpr += num_bytes; | |
2752 | dirty = true; | |
2753 | } | |
2754 | } | |
2755 | ||
2756 | /* Exclusive/none -> exclusive/none case */ | |
2757 | if (cur_old_count == nr_old_roots && | |
2758 | cur_new_count == nr_new_roots) { | |
2759 | if (cur_old_count == 0) { | |
2760 | /* None -> exclusive/none */ | |
2761 | ||
2762 | if (cur_new_count != 0) { | |
2763 | /* None -> exclusive */ | |
2764 | qg->excl += num_bytes; | |
2765 | qg->excl_cmpr += num_bytes; | |
2766 | dirty = true; | |
2767 | } | |
2768 | /* None -> none, nothing changed */ | |
2769 | } else { | |
2770 | /* Exclusive -> exclusive/none */ | |
2771 | ||
2772 | if (cur_new_count == 0) { | |
2773 | /* Exclusive -> none */ | |
2774 | qg->excl -= num_bytes; | |
2775 | qg->excl_cmpr -= num_bytes; | |
2776 | dirty = true; | |
2777 | } | |
2778 | /* Exclusive -> exclusive, nothing changed */ | |
2779 | } | |
2780 | } | |
c05f9429 | 2781 | |
823ae5b8 QW |
2782 | if (dirty) |
2783 | qgroup_dirty(fs_info, qg); | |
2784 | } | |
823ae5b8 QW |
2785 | } |
2786 | ||
5edfd9fd QW |
2787 | /* |
2788 | * Check if the @roots potentially is a list of fs tree roots | |
2789 | * | |
2790 | * Return 0 for definitely not a fs/subvol tree roots ulist | |
2791 | * Return 1 for possible fs/subvol tree roots in the list (considering an empty | |
2792 | * one as well) | |
2793 | */ | |
2794 | static int maybe_fs_roots(struct ulist *roots) | |
2795 | { | |
2796 | struct ulist_node *unode; | |
2797 | struct ulist_iterator uiter; | |
2798 | ||
2799 | /* Empty one, still possible for fs roots */ | |
2800 | if (!roots || roots->nnodes == 0) | |
2801 | return 1; | |
2802 | ||
2803 | ULIST_ITER_INIT(&uiter); | |
2804 | unode = ulist_next(roots, &uiter); | |
2805 | if (!unode) | |
2806 | return 1; | |
2807 | ||
2808 | /* | |
2809 | * If it contains fs tree roots, then it must belong to fs/subvol | |
2810 | * trees. | |
2811 | * If it contains a non-fs tree, it won't be shared with fs/subvol trees. | |
2812 | */ | |
2813 | return is_fstree(unode->val); | |
2814 | } | |
2815 | ||
8696d760 LF |
2816 | int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
2817 | u64 num_bytes, struct ulist *old_roots, | |
2818 | struct ulist *new_roots) | |
550d7a2e | 2819 | { |
8696d760 | 2820 | struct btrfs_fs_info *fs_info = trans->fs_info; |
dce28769 | 2821 | LIST_HEAD(qgroups); |
550d7a2e QW |
2822 | u64 seq; |
2823 | u64 nr_new_roots = 0; | |
2824 | u64 nr_old_roots = 0; | |
2825 | int ret = 0; | |
2826 | ||
26ef8493 | 2827 | /* |
1a9fd417 | 2828 | * If quotas get disabled meanwhile, the resources need to be freed and |
26ef8493 JT |
2829 | * we can't just exit here. |
2830 | */ | |
182940f4 | 2831 | if (!btrfs_qgroup_full_accounting(fs_info) || |
e15e9f43 | 2832 | fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING) |
26ef8493 | 2833 | goto out_free; |
81353d50 | 2834 | |
5edfd9fd QW |
2835 | if (new_roots) { |
2836 | if (!maybe_fs_roots(new_roots)) | |
2837 | goto out_free; | |
550d7a2e | 2838 | nr_new_roots = new_roots->nnodes; |
5edfd9fd QW |
2839 | } |
2840 | if (old_roots) { | |
2841 | if (!maybe_fs_roots(old_roots)) | |
2842 | goto out_free; | |
550d7a2e | 2843 | nr_old_roots = old_roots->nnodes; |
5edfd9fd QW |
2844 | } |
2845 | ||
2846 | /* Quick exit, either not fs tree roots, or won't affect any qgroup */ | |
2847 | if (nr_old_roots == 0 && nr_new_roots == 0) | |
2848 | goto out_free; | |
550d7a2e | 2849 | |
550d7a2e QW |
2850 | BUG_ON(!fs_info->quota_root); |
2851 | ||
c9f6f3cd QW |
2852 | trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, |
2853 | num_bytes, nr_old_roots, nr_new_roots); | |
0f5dcf8d | 2854 | |
550d7a2e QW |
2855 | mutex_lock(&fs_info->qgroup_rescan_lock); |
2856 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { | |
2857 | if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { | |
2858 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
2859 | ret = 0; | |
2860 | goto out_free; | |
2861 | } | |
2862 | } | |
2863 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
2864 | ||
2865 | spin_lock(&fs_info->qgroup_lock); | |
2866 | seq = fs_info->qgroup_seq; | |
2867 | ||
2868 | /* Update old refcnts using old_roots */ | |
dce28769 | 2869 | qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD); |
550d7a2e QW |
2870 | |
2871 | /* Update new refcnts using new_roots */ | |
dce28769 | 2872 | qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW); |
550d7a2e | 2873 | |
dce28769 | 2874 | qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots, |
550d7a2e QW |
2875 | num_bytes, seq); |
2876 | ||
6c8e69e4 FM |
2877 | /* |
2878 | * We're done using the iterator, release all its qgroups while holding | |
2879 | * fs_info->qgroup_lock so that we don't race with btrfs_remove_qgroup() | |
2880 | * and trigger use-after-free accesses to qgroups. | |
2881 | */ | |
2882 | qgroup_iterator_nested_clean(&qgroups); | |
2883 | ||
550d7a2e QW |
2884 | /* |
2885 | * Bump qgroup_seq to avoid seq overlap | |
2886 | */ | |
2887 | fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; | |
550d7a2e QW |
2888 | spin_unlock(&fs_info->qgroup_lock); |
2889 | out_free: | |
550d7a2e QW |
2890 | ulist_free(old_roots); |
2891 | ulist_free(new_roots); | |
2892 | return ret; | |
2893 | } | |
2894 | ||
460fb20a | 2895 | int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) |
550d7a2e | 2896 | { |
460fb20a | 2897 | struct btrfs_fs_info *fs_info = trans->fs_info; |
550d7a2e QW |
2898 | struct btrfs_qgroup_extent_record *record; |
2899 | struct btrfs_delayed_ref_root *delayed_refs; | |
2900 | struct ulist *new_roots = NULL; | |
2901 | struct rb_node *node; | |
c337e7b0 | 2902 | u64 num_dirty_extents = 0; |
9086db86 | 2903 | u64 qgroup_to_skip; |
550d7a2e QW |
2904 | int ret = 0; |
2905 | ||
182940f4 BB |
2906 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) |
2907 | return 0; | |
2908 | ||
550d7a2e | 2909 | delayed_refs = &trans->transaction->delayed_refs; |
9086db86 | 2910 | qgroup_to_skip = delayed_refs->qgroup_to_skip; |
550d7a2e QW |
2911 | while ((node = rb_first(&delayed_refs->dirty_extent_root))) { |
2912 | record = rb_entry(node, struct btrfs_qgroup_extent_record, | |
2913 | node); | |
2914 | ||
c337e7b0 | 2915 | num_dirty_extents++; |
bc074524 | 2916 | trace_btrfs_qgroup_account_extents(fs_info, record); |
0f5dcf8d | 2917 | |
e15e9f43 QW |
2918 | if (!ret && !(fs_info->qgroup_flags & |
2919 | BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)) { | |
a2c8d27e FM |
2920 | struct btrfs_backref_walk_ctx ctx = { 0 }; |
2921 | ||
2922 | ctx.bytenr = record->bytenr; | |
2923 | ctx.fs_info = fs_info; | |
2924 | ||
d1b8b94a QW |
2925 | /* |
2926 | * Old roots should be searched when inserting qgroup | |
75181406 QW |
2927 | * extent record. |
2928 | * | |
2929 | * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case, | |
2930 | * we may have some record inserted during | |
2931 | * NO_ACCOUNTING (thus no old_roots populated), but | |
2932 | * later we start rescan, which clears NO_ACCOUNTING, | |
2933 | * leaving some inserted records without old_roots | |
2934 | * populated. | |
2935 | * | |
2936 | * Those cases are rare and should not cause too much | |
2937 | * time spent during commit_transaction(). | |
d1b8b94a | 2938 | */ |
75181406 | 2939 | if (!record->old_roots) { |
d1b8b94a | 2940 | /* Search commit root to find old_roots */ |
a2c8d27e | 2941 | ret = btrfs_find_all_roots(&ctx, false); |
d1b8b94a QW |
2942 | if (ret < 0) |
2943 | goto cleanup; | |
a2c8d27e FM |
2944 | record->old_roots = ctx.roots; |
2945 | ctx.roots = NULL; | |
d1b8b94a QW |
2946 | } |
2947 | ||
1418bae1 QW |
2948 | /* Free the reserved data space */ |
2949 | btrfs_qgroup_free_refroot(fs_info, | |
2950 | record->data_rsv_refroot, | |
2951 | record->data_rsv, | |
2952 | BTRFS_QGROUP_RSV_DATA); | |
550d7a2e | 2953 | /* |
f3a84ccd FM |
2954 | * Use BTRFS_SEQ_LAST as time_seq to do special search, |
2955 | * which doesn't lock tree or delayed_refs and search | |
2956 | * current root. It's safe inside commit_transaction(). | |
550d7a2e | 2957 | */ |
a2c8d27e | 2958 | ctx.trans = trans; |
f1f0460c | 2959 | ctx.time_seq = BTRFS_SEQ_LAST; |
a2c8d27e | 2960 | ret = btrfs_find_all_roots(&ctx, false); |
550d7a2e QW |
2961 | if (ret < 0) |
2962 | goto cleanup; | |
a2c8d27e | 2963 | new_roots = ctx.roots; |
d1b8b94a | 2964 | if (qgroup_to_skip) { |
9086db86 | 2965 | ulist_del(new_roots, qgroup_to_skip, 0); |
d1b8b94a QW |
2966 | ulist_del(record->old_roots, qgroup_to_skip, |
2967 | 0); | |
2968 | } | |
8696d760 LF |
2969 | ret = btrfs_qgroup_account_extent(trans, record->bytenr, |
2970 | record->num_bytes, | |
2971 | record->old_roots, | |
2972 | new_roots); | |
550d7a2e QW |
2973 | record->old_roots = NULL; |
2974 | new_roots = NULL; | |
2975 | } | |
2976 | cleanup: | |
2977 | ulist_free(record->old_roots); | |
2978 | ulist_free(new_roots); | |
2979 | new_roots = NULL; | |
2980 | rb_erase(node, &delayed_refs->dirty_extent_root); | |
2981 | kfree(record); | |
2982 | ||
2983 | } | |
c337e7b0 QW |
2984 | trace_qgroup_num_dirty_extents(fs_info, trans->transid, |
2985 | num_dirty_extents); | |
550d7a2e QW |
2986 | return ret; |
2987 | } | |
2988 | ||
bed92eae | 2989 | /* |
2f1a6be1 FM |
2990 | * Writes all changed qgroups to disk. |
2991 | * Called by the transaction commit path and the qgroup assign ioctl. | |
bed92eae | 2992 | */ |
280f8bd2 | 2993 | int btrfs_run_qgroups(struct btrfs_trans_handle *trans) |
bed92eae | 2994 | { |
280f8bd2 | 2995 | struct btrfs_fs_info *fs_info = trans->fs_info; |
bed92eae AJ |
2996 | int ret = 0; |
2997 | ||
2f1a6be1 FM |
2998 | /* |
2999 | * In case we are called from the qgroup assign ioctl, assert that we | |
3000 | * are holding the qgroup_ioctl_lock, otherwise we can race with a quota | |
3001 | * disable operation (ioctl) and access a freed quota root. | |
3002 | */ | |
3003 | if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) | |
3004 | lockdep_assert_held(&fs_info->qgroup_ioctl_lock); | |
3005 | ||
e3b0edd2 | 3006 | if (!fs_info->quota_root) |
5d23515b | 3007 | return ret; |
bed92eae AJ |
3008 | |
3009 | spin_lock(&fs_info->qgroup_lock); | |
3010 | while (!list_empty(&fs_info->dirty_qgroups)) { | |
3011 | struct btrfs_qgroup *qgroup; | |
3012 | qgroup = list_first_entry(&fs_info->dirty_qgroups, | |
3013 | struct btrfs_qgroup, dirty); | |
3014 | list_del_init(&qgroup->dirty); | |
3015 | spin_unlock(&fs_info->qgroup_lock); | |
3e07e9a0 | 3016 | ret = update_qgroup_info_item(trans, qgroup); |
d3001ed3 | 3017 | if (ret) |
e562a8bd | 3018 | qgroup_mark_inconsistent(fs_info); |
ac8a866a | 3019 | ret = update_qgroup_limit_item(trans, qgroup); |
bed92eae | 3020 | if (ret) |
e562a8bd | 3021 | qgroup_mark_inconsistent(fs_info); |
bed92eae AJ |
3022 | spin_lock(&fs_info->qgroup_lock); |
3023 | } | |
182940f4 | 3024 | if (btrfs_qgroup_enabled(fs_info)) |
bed92eae AJ |
3025 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; |
3026 | else | |
3027 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; | |
3028 | spin_unlock(&fs_info->qgroup_lock); | |
3029 | ||
2e980acd | 3030 | ret = update_qgroup_status_item(trans); |
bed92eae | 3031 | if (ret) |
e562a8bd | 3032 | qgroup_mark_inconsistent(fs_info); |
bed92eae | 3033 | |
bed92eae AJ |
3034 | return ret; |
3035 | } | |
3036 | ||
5343cd93 BB |
3037 | static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info, |
3038 | u64 inode_rootid, | |
3039 | struct btrfs_qgroup_inherit **inherit) | |
3040 | { | |
3041 | int i = 0; | |
3042 | u64 num_qgroups = 0; | |
3043 | struct btrfs_qgroup *inode_qg; | |
3044 | struct btrfs_qgroup_list *qg_list; | |
3045 | struct btrfs_qgroup_inherit *res; | |
3046 | size_t struct_sz; | |
3047 | u64 *qgids; | |
3048 | ||
3049 | if (*inherit) | |
3050 | return -EEXIST; | |
3051 | ||
3052 | inode_qg = find_qgroup_rb(fs_info, inode_rootid); | |
3053 | if (!inode_qg) | |
3054 | return -ENOENT; | |
3055 | ||
3056 | num_qgroups = list_count_nodes(&inode_qg->groups); | |
3057 | ||
3058 | if (!num_qgroups) | |
3059 | return 0; | |
3060 | ||
3061 | struct_sz = struct_size(res, qgroups, num_qgroups); | |
3062 | if (struct_sz == SIZE_MAX) | |
3063 | return -ERANGE; | |
3064 | ||
3065 | res = kzalloc(struct_sz, GFP_NOFS); | |
3066 | if (!res) | |
3067 | return -ENOMEM; | |
3068 | res->num_qgroups = num_qgroups; | |
3069 | qgids = res->qgroups; | |
3070 | ||
3071 | list_for_each_entry(qg_list, &inode_qg->groups, next_group) | |
3072 | qgids[i] = qg_list->group->qgroupid; | |
3073 | ||
3074 | *inherit = res; | |
3075 | return 0; | |
3076 | } | |
3077 | ||
bed92eae | 3078 | /* |
01327610 | 3079 | * Copy the accounting information between qgroups. This is necessary |
918c2ee1 MF |
3080 | * when a snapshot or a subvolume is created. Throwing an error will |
3081 | * cause a transaction abort so we take extra care here to only error | |
3082 | * when a readonly fs is a reasonable outcome. | |
bed92eae | 3083 | */ |
a9377422 | 3084 | int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, |
5343cd93 BB |
3085 | u64 objectid, u64 inode_rootid, |
3086 | struct btrfs_qgroup_inherit *inherit) | |
bed92eae AJ |
3087 | { |
3088 | int ret = 0; | |
3089 | int i; | |
3090 | u64 *i_qgroups; | |
e88439de | 3091 | bool committing = false; |
a9377422 | 3092 | struct btrfs_fs_info *fs_info = trans->fs_info; |
552f0329 | 3093 | struct btrfs_root *quota_root; |
bed92eae AJ |
3094 | struct btrfs_qgroup *srcgroup; |
3095 | struct btrfs_qgroup *dstgroup; | |
8d54518b | 3096 | struct btrfs_qgroup *prealloc; |
79ace7b8 | 3097 | struct btrfs_qgroup_list **qlist_prealloc = NULL; |
5343cd93 | 3098 | bool free_inherit = false; |
cbab8ade | 3099 | bool need_rescan = false; |
bed92eae | 3100 | u32 level_size = 0; |
3f5e2d3b | 3101 | u64 nums; |
bed92eae | 3102 | |
8d54518b QW |
3103 | prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS); |
3104 | if (!prealloc) | |
3105 | return -ENOMEM; | |
3106 | ||
e88439de QW |
3107 | /* |
3108 | * There are only two callers of this function. | |
3109 | * | |
3110 | * One in create_subvol() in the ioctl context, which needs to hold | |
3111 | * the qgroup_ioctl_lock. | |
3112 | * | |
3113 | * The other one in create_pending_snapshot() where no other qgroup | |
3114 | * code can modify the fs as they all need to either start a new trans | |
3115 | * or hold a trans handler, thus we don't need to hold | |
3116 | * qgroup_ioctl_lock. | |
3117 | * This would avoid long and complex lock chain and make lockdep happy. | |
3118 | */ | |
3119 | spin_lock(&fs_info->trans_lock); | |
3120 | if (trans->transaction->state == TRANS_STATE_COMMIT_DOING) | |
3121 | committing = true; | |
3122 | spin_unlock(&fs_info->trans_lock); | |
3123 | ||
3124 | if (!committing) | |
3125 | mutex_lock(&fs_info->qgroup_ioctl_lock); | |
182940f4 | 3126 | if (!btrfs_qgroup_enabled(fs_info)) |
f2f6ed3d | 3127 | goto out; |
bed92eae | 3128 | |
552f0329 | 3129 | quota_root = fs_info->quota_root; |
f2f6ed3d WS |
3130 | if (!quota_root) { |
3131 | ret = -EINVAL; | |
3132 | goto out; | |
3133 | } | |
bed92eae | 3134 | |
5343cd93 BB |
3135 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) { |
3136 | ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit); | |
3137 | if (ret) | |
3138 | goto out; | |
3139 | free_inherit = true; | |
3140 | } | |
3141 | ||
3f5e2d3b WS |
3142 | if (inherit) { |
3143 | i_qgroups = (u64 *)(inherit + 1); | |
3144 | nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + | |
3145 | 2 * inherit->num_excl_copies; | |
3146 | for (i = 0; i < nums; ++i) { | |
3147 | srcgroup = find_qgroup_rb(fs_info, *i_qgroups); | |
09870d27 | 3148 | |
918c2ee1 MF |
3149 | /* |
3150 | * Zero out invalid groups so we can ignore | |
3151 | * them later. | |
3152 | */ | |
3153 | if (!srcgroup || | |
3154 | ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) | |
3155 | *i_qgroups = 0ULL; | |
3156 | ||
3f5e2d3b WS |
3157 | ++i_qgroups; |
3158 | } | |
3159 | } | |
3160 | ||
bed92eae AJ |
3161 | /* |
3162 | * create a tracking group for the subvol itself | |
3163 | */ | |
3164 | ret = add_qgroup_item(trans, quota_root, objectid); | |
3165 | if (ret) | |
3166 | goto out; | |
3167 | ||
bed92eae AJ |
3168 | /* |
3169 | * add qgroup to all inherited groups | |
3170 | */ | |
3171 | if (inherit) { | |
3172 | i_qgroups = (u64 *)(inherit + 1); | |
918c2ee1 MF |
3173 | for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { |
3174 | if (*i_qgroups == 0) | |
3175 | continue; | |
711169c4 LF |
3176 | ret = add_qgroup_relation_item(trans, objectid, |
3177 | *i_qgroups); | |
918c2ee1 | 3178 | if (ret && ret != -EEXIST) |
bed92eae | 3179 | goto out; |
711169c4 LF |
3180 | ret = add_qgroup_relation_item(trans, *i_qgroups, |
3181 | objectid); | |
918c2ee1 | 3182 | if (ret && ret != -EEXIST) |
bed92eae | 3183 | goto out; |
bed92eae | 3184 | } |
918c2ee1 | 3185 | ret = 0; |
bed92eae | 3186 | |
79ace7b8 QW |
3187 | qlist_prealloc = kcalloc(inherit->num_qgroups, |
3188 | sizeof(struct btrfs_qgroup_list *), | |
3189 | GFP_NOFS); | |
3190 | if (!qlist_prealloc) { | |
3191 | ret = -ENOMEM; | |
3192 | goto out; | |
3193 | } | |
3194 | for (int i = 0; i < inherit->num_qgroups; i++) { | |
3195 | qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list), | |
3196 | GFP_NOFS); | |
3197 | if (!qlist_prealloc[i]) { | |
3198 | ret = -ENOMEM; | |
3199 | goto out; | |
3200 | } | |
3201 | } | |
3202 | } | |
bed92eae AJ |
3203 | |
3204 | spin_lock(&fs_info->qgroup_lock); | |
3205 | ||
8d54518b QW |
3206 | dstgroup = add_qgroup_rb(fs_info, prealloc, objectid); |
3207 | prealloc = NULL; | |
bed92eae | 3208 | |
e8c8541a | 3209 | if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { |
e8c8541a DY |
3210 | dstgroup->lim_flags = inherit->lim.flags; |
3211 | dstgroup->max_rfer = inherit->lim.max_rfer; | |
3212 | dstgroup->max_excl = inherit->lim.max_excl; | |
3213 | dstgroup->rsv_rfer = inherit->lim.rsv_rfer; | |
3214 | dstgroup->rsv_excl = inherit->lim.rsv_excl; | |
1510e71c | 3215 | |
f7e942b5 | 3216 | qgroup_dirty(fs_info, dstgroup); |
e8c8541a DY |
3217 | } |
3218 | ||
182940f4 | 3219 | if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) { |
bed92eae | 3220 | srcgroup = find_qgroup_rb(fs_info, srcid); |
f3a87f1b | 3221 | if (!srcgroup) |
bed92eae | 3222 | goto unlock; |
fcebe456 JB |
3223 | |
3224 | /* | |
3225 | * We call inherit after we clone the root in order to make sure | |
3226 | * our counts don't go crazy, so at this point the only | |
3227 | * difference between the two roots should be the root node. | |
3228 | */ | |
c8389d4c | 3229 | level_size = fs_info->nodesize; |
fcebe456 JB |
3230 | dstgroup->rfer = srcgroup->rfer; |
3231 | dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; | |
3232 | dstgroup->excl = level_size; | |
3233 | dstgroup->excl_cmpr = level_size; | |
bed92eae AJ |
3234 | srcgroup->excl = level_size; |
3235 | srcgroup->excl_cmpr = level_size; | |
3eeb4d59 DY |
3236 | |
3237 | /* inherit the limit info */ | |
3238 | dstgroup->lim_flags = srcgroup->lim_flags; | |
3239 | dstgroup->max_rfer = srcgroup->max_rfer; | |
3240 | dstgroup->max_excl = srcgroup->max_excl; | |
3241 | dstgroup->rsv_rfer = srcgroup->rsv_rfer; | |
3242 | dstgroup->rsv_excl = srcgroup->rsv_excl; | |
3243 | ||
bed92eae AJ |
3244 | qgroup_dirty(fs_info, dstgroup); |
3245 | qgroup_dirty(fs_info, srcgroup); | |
3246 | } | |
3247 | ||
f3a87f1b | 3248 | if (!inherit) |
bed92eae AJ |
3249 | goto unlock; |
3250 | ||
3251 | i_qgroups = (u64 *)(inherit + 1); | |
3252 | for (i = 0; i < inherit->num_qgroups; ++i) { | |
918c2ee1 | 3253 | if (*i_qgroups) { |
79ace7b8 QW |
3254 | ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid, |
3255 | *i_qgroups); | |
3256 | qlist_prealloc[i] = NULL; | |
918c2ee1 MF |
3257 | if (ret) |
3258 | goto unlock; | |
3259 | } | |
bed92eae | 3260 | ++i_qgroups; |
cbab8ade QW |
3261 | |
3262 | /* | |
3263 | * If we're doing a snapshot, and adding the snapshot to a new | |
3264 | * qgroup, the numbers are guaranteed to be incorrect. | |
3265 | */ | |
3266 | if (srcid) | |
3267 | need_rescan = true; | |
bed92eae AJ |
3268 | } |
3269 | ||
918c2ee1 | 3270 | for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { |
bed92eae AJ |
3271 | struct btrfs_qgroup *src; |
3272 | struct btrfs_qgroup *dst; | |
3273 | ||
918c2ee1 MF |
3274 | if (!i_qgroups[0] || !i_qgroups[1]) |
3275 | continue; | |
3276 | ||
bed92eae AJ |
3277 | src = find_qgroup_rb(fs_info, i_qgroups[0]); |
3278 | dst = find_qgroup_rb(fs_info, i_qgroups[1]); | |
3279 | ||
3280 | if (!src || !dst) { | |
3281 | ret = -EINVAL; | |
3282 | goto unlock; | |
3283 | } | |
3284 | ||
3285 | dst->rfer = src->rfer - level_size; | |
3286 | dst->rfer_cmpr = src->rfer_cmpr - level_size; | |
cbab8ade QW |
3287 | |
3288 | /* Manually tweaking numbers certainly needs a rescan */ | |
3289 | need_rescan = true; | |
bed92eae | 3290 | } |
918c2ee1 | 3291 | for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { |
bed92eae AJ |
3292 | struct btrfs_qgroup *src; |
3293 | struct btrfs_qgroup *dst; | |
3294 | ||
918c2ee1 MF |
3295 | if (!i_qgroups[0] || !i_qgroups[1]) |
3296 | continue; | |
3297 | ||
bed92eae AJ |
3298 | src = find_qgroup_rb(fs_info, i_qgroups[0]); |
3299 | dst = find_qgroup_rb(fs_info, i_qgroups[1]); | |
3300 | ||
3301 | if (!src || !dst) { | |
3302 | ret = -EINVAL; | |
3303 | goto unlock; | |
3304 | } | |
3305 | ||
3306 | dst->excl = src->excl + level_size; | |
3307 | dst->excl_cmpr = src->excl_cmpr + level_size; | |
cbab8ade | 3308 | need_rescan = true; |
bed92eae AJ |
3309 | } |
3310 | ||
3311 | unlock: | |
3312 | spin_unlock(&fs_info->qgroup_lock); | |
49e5fb46 QW |
3313 | if (!ret) |
3314 | ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup); | |
bed92eae | 3315 | out: |
e88439de QW |
3316 | if (!committing) |
3317 | mutex_unlock(&fs_info->qgroup_ioctl_lock); | |
cbab8ade | 3318 | if (need_rescan) |
e562a8bd | 3319 | qgroup_mark_inconsistent(fs_info); |
79ace7b8 QW |
3320 | if (qlist_prealloc) { |
3321 | for (int i = 0; i < inherit->num_qgroups; i++) | |
3322 | kfree(qlist_prealloc[i]); | |
3323 | kfree(qlist_prealloc); | |
3324 | } | |
5343cd93 BB |
3325 | if (free_inherit) |
3326 | kfree(inherit); | |
8d54518b | 3327 | kfree(prealloc); |
bed92eae AJ |
3328 | return ret; |
3329 | } | |
3330 | ||
adca4d94 | 3331 | static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) |
003d7c59 JM |
3332 | { |
3333 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && | |
dba21324 | 3334 | qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) |
003d7c59 JM |
3335 | return false; |
3336 | ||
3337 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && | |
dba21324 | 3338 | qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) |
003d7c59 JM |
3339 | return false; |
3340 | ||
3341 | return true; | |
3342 | } | |
3343 | ||
dba21324 QW |
3344 | static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, |
3345 | enum btrfs_qgroup_rsv_type type) | |
bed92eae | 3346 | { |
bed92eae AJ |
3347 | struct btrfs_qgroup *qgroup; |
3348 | struct btrfs_fs_info *fs_info = root->fs_info; | |
3349 | u64 ref_root = root->root_key.objectid; | |
3350 | int ret = 0; | |
686c4a5a | 3351 | LIST_HEAD(qgroup_list); |
bed92eae AJ |
3352 | |
3353 | if (!is_fstree(ref_root)) | |
3354 | return 0; | |
3355 | ||
3356 | if (num_bytes == 0) | |
3357 | return 0; | |
f29efe29 SD |
3358 | |
3359 | if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && | |
3360 | capable(CAP_SYS_RESOURCE)) | |
3361 | enforce = false; | |
3362 | ||
bed92eae | 3363 | spin_lock(&fs_info->qgroup_lock); |
e3b0edd2 | 3364 | if (!fs_info->quota_root) |
bed92eae AJ |
3365 | goto out; |
3366 | ||
3367 | qgroup = find_qgroup_rb(fs_info, ref_root); | |
3368 | if (!qgroup) | |
3369 | goto out; | |
3370 | ||
686c4a5a QW |
3371 | qgroup_iterator_add(&qgroup_list, qgroup); |
3372 | list_for_each_entry(qgroup, &qgroup_list, iterator) { | |
bed92eae AJ |
3373 | struct btrfs_qgroup_list *glist; |
3374 | ||
686c4a5a | 3375 | if (enforce && !qgroup_check_limits(qgroup, num_bytes)) { |
bed92eae | 3376 | ret = -EDQUOT; |
720f1e20 WS |
3377 | goto out; |
3378 | } | |
bed92eae | 3379 | |
686c4a5a QW |
3380 | list_for_each_entry(glist, &qgroup->groups, next_group) |
3381 | qgroup_iterator_add(&qgroup_list, glist->group); | |
bed92eae | 3382 | } |
686c4a5a | 3383 | |
3c97185c | 3384 | ret = 0; |
bed92eae AJ |
3385 | /* |
3386 | * no limits exceeded, now record the reservation into all qgroups | |
3387 | */ | |
686c4a5a QW |
3388 | list_for_each_entry(qgroup, &qgroup_list, iterator) |
3389 | qgroup_rsv_add(fs_info, qgroup, num_bytes, type); | |
bed92eae AJ |
3390 | |
3391 | out: | |
686c4a5a | 3392 | qgroup_iterator_clean(&qgroup_list); |
bed92eae | 3393 | spin_unlock(&fs_info->qgroup_lock); |
bed92eae AJ |
3394 | return ret; |
3395 | } | |
3396 | ||
e1211d0e QW |
3397 | /* |
3398 | * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 | |
3399 | * qgroup). | |
3400 | * | |
3401 | * Will handle all higher level qgroup too. | |
3402 | * | |
3403 | * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. | |
3404 | * This special case is only used for META_PERTRANS type. | |
3405 | */ | |
297d750b | 3406 | void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, |
d4e5c920 QW |
3407 | u64 ref_root, u64 num_bytes, |
3408 | enum btrfs_qgroup_rsv_type type) | |
bed92eae | 3409 | { |
bed92eae | 3410 | struct btrfs_qgroup *qgroup; |
25152cb7 | 3411 | LIST_HEAD(qgroup_list); |
bed92eae AJ |
3412 | |
3413 | if (!is_fstree(ref_root)) | |
3414 | return; | |
3415 | ||
3416 | if (num_bytes == 0) | |
3417 | return; | |
3418 | ||
e1211d0e QW |
3419 | if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { |
3420 | WARN(1, "%s: Invalid type to free", __func__); | |
3421 | return; | |
3422 | } | |
bed92eae AJ |
3423 | spin_lock(&fs_info->qgroup_lock); |
3424 | ||
e3b0edd2 | 3425 | if (!fs_info->quota_root) |
bed92eae AJ |
3426 | goto out; |
3427 | ||
3428 | qgroup = find_qgroup_rb(fs_info, ref_root); | |
3429 | if (!qgroup) | |
3430 | goto out; | |
3431 | ||
e1211d0e | 3432 | if (num_bytes == (u64)-1) |
8287475a QW |
3433 | /* |
3434 | * We're freeing all pertrans rsv, get reserved value from | |
3435 | * level 0 qgroup as real num_bytes to free. | |
3436 | */ | |
e1211d0e QW |
3437 | num_bytes = qgroup->rsv.values[type]; |
3438 | ||
25152cb7 QW |
3439 | qgroup_iterator_add(&qgroup_list, qgroup); |
3440 | list_for_each_entry(qgroup, &qgroup_list, iterator) { | |
bed92eae AJ |
3441 | struct btrfs_qgroup_list *glist; |
3442 | ||
25152cb7 QW |
3443 | qgroup_rsv_release(fs_info, qgroup, num_bytes, type); |
3444 | list_for_each_entry(glist, &qgroup->groups, next_group) { | |
3445 | qgroup_iterator_add(&qgroup_list, glist->group); | |
bed92eae AJ |
3446 | } |
3447 | } | |
bed92eae | 3448 | out: |
25152cb7 | 3449 | qgroup_iterator_clean(&qgroup_list); |
bed92eae | 3450 | spin_unlock(&fs_info->qgroup_lock); |
bed92eae AJ |
3451 | } |
3452 | ||
ff3d27a0 QW |
3453 | /* |
3454 | * Check if the leaf is the last leaf. Which means all node pointers | |
3455 | * are at their last position. | |
3456 | */ | |
3457 | static bool is_last_leaf(struct btrfs_path *path) | |
3458 | { | |
3459 | int i; | |
3460 | ||
3461 | for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { | |
3462 | if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) | |
3463 | return false; | |
3464 | } | |
3465 | return true; | |
3466 | } | |
3467 | ||
2f232036 JS |
3468 | /* |
3469 | * returns < 0 on error, 0 when more leafs are to be scanned. | |
3393168d | 3470 | * returns 1 when done. |
2f232036 | 3471 | */ |
62088ca7 LF |
3472 | static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, |
3473 | struct btrfs_path *path) | |
2f232036 | 3474 | { |
62088ca7 | 3475 | struct btrfs_fs_info *fs_info = trans->fs_info; |
29cbcf40 | 3476 | struct btrfs_root *extent_root; |
2f232036 | 3477 | struct btrfs_key found; |
0a0e8b89 | 3478 | struct extent_buffer *scratch_leaf = NULL; |
fcebe456 | 3479 | u64 num_bytes; |
ff3d27a0 | 3480 | bool done; |
2f232036 JS |
3481 | int slot; |
3482 | int ret; | |
3483 | ||
182940f4 BB |
3484 | if (!btrfs_qgroup_full_accounting(fs_info)) |
3485 | return 1; | |
3486 | ||
2f232036 | 3487 | mutex_lock(&fs_info->qgroup_rescan_lock); |
29cbcf40 JB |
3488 | extent_root = btrfs_extent_root(fs_info, |
3489 | fs_info->qgroup_rescan_progress.objectid); | |
3490 | ret = btrfs_search_slot_for_read(extent_root, | |
2f232036 JS |
3491 | &fs_info->qgroup_rescan_progress, |
3492 | path, 1, 0); | |
3493 | ||
ab8d0fc4 JM |
3494 | btrfs_debug(fs_info, |
3495 | "current progress key (%llu %u %llu), search_slot ret %d", | |
3496 | fs_info->qgroup_rescan_progress.objectid, | |
3497 | fs_info->qgroup_rescan_progress.type, | |
3498 | fs_info->qgroup_rescan_progress.offset, ret); | |
2f232036 JS |
3499 | |
3500 | if (ret) { | |
3501 | /* | |
3502 | * The rescan is about to end, we will not be scanning any | |
3503 | * further blocks. We cannot unset the RESCAN flag here, because | |
3504 | * we want to commit the transaction if everything went well. | |
3505 | * To make the live accounting work in this phase, we set our | |
3506 | * scan progress pointer such that every real extent objectid | |
3507 | * will be smaller. | |
3508 | */ | |
3509 | fs_info->qgroup_rescan_progress.objectid = (u64)-1; | |
3510 | btrfs_release_path(path); | |
3511 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
3512 | return ret; | |
3513 | } | |
ff3d27a0 | 3514 | done = is_last_leaf(path); |
2f232036 JS |
3515 | |
3516 | btrfs_item_key_to_cpu(path->nodes[0], &found, | |
3517 | btrfs_header_nritems(path->nodes[0]) - 1); | |
3518 | fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; | |
3519 | ||
0a0e8b89 QW |
3520 | scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); |
3521 | if (!scratch_leaf) { | |
3522 | ret = -ENOMEM; | |
3523 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
3524 | goto out; | |
3525 | } | |
2f232036 JS |
3526 | slot = path->slots[0]; |
3527 | btrfs_release_path(path); | |
3528 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
3529 | ||
3530 | for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { | |
a2c8d27e FM |
3531 | struct btrfs_backref_walk_ctx ctx = { 0 }; |
3532 | ||
2f232036 | 3533 | btrfs_item_key_to_cpu(scratch_leaf, &found, slot); |
3a6d75e8 JB |
3534 | if (found.type != BTRFS_EXTENT_ITEM_KEY && |
3535 | found.type != BTRFS_METADATA_ITEM_KEY) | |
2f232036 | 3536 | continue; |
3a6d75e8 | 3537 | if (found.type == BTRFS_METADATA_ITEM_KEY) |
da17066c | 3538 | num_bytes = fs_info->nodesize; |
3a6d75e8 JB |
3539 | else |
3540 | num_bytes = found.offset; | |
3541 | ||
a2c8d27e FM |
3542 | ctx.bytenr = found.objectid; |
3543 | ctx.fs_info = fs_info; | |
3544 | ||
3545 | ret = btrfs_find_all_roots(&ctx, false); | |
2f232036 JS |
3546 | if (ret < 0) |
3547 | goto out; | |
9d220c95 | 3548 | /* For rescan, just pass old_roots as NULL */ |
8696d760 | 3549 | ret = btrfs_qgroup_account_extent(trans, found.objectid, |
a2c8d27e | 3550 | num_bytes, NULL, ctx.roots); |
9d220c95 | 3551 | if (ret < 0) |
fcebe456 | 3552 | goto out; |
2f232036 | 3553 | } |
2f232036 | 3554 | out: |
df449714 | 3555 | if (scratch_leaf) |
0a0e8b89 | 3556 | free_extent_buffer(scratch_leaf); |
2f232036 | 3557 | |
6f7de19e | 3558 | if (done && !ret) { |
ff3d27a0 | 3559 | ret = 1; |
6f7de19e QW |
3560 | fs_info->qgroup_rescan_progress.objectid = (u64)-1; |
3561 | } | |
2f232036 JS |
3562 | return ret; |
3563 | } | |
3564 | ||
cb13eea3 FM |
3565 | static bool rescan_should_stop(struct btrfs_fs_info *fs_info) |
3566 | { | |
182940f4 BB |
3567 | if (btrfs_fs_closing(fs_info)) |
3568 | return true; | |
3569 | if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)) | |
3570 | return true; | |
3571 | if (!btrfs_qgroup_enabled(fs_info)) | |
3572 | return true; | |
3573 | if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) | |
3574 | return true; | |
3575 | return false; | |
cb13eea3 FM |
3576 | } |
3577 | ||
d458b054 | 3578 | static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) |
2f232036 | 3579 | { |
b382a324 JS |
3580 | struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, |
3581 | qgroup_rescan_work); | |
2f232036 JS |
3582 | struct btrfs_path *path; |
3583 | struct btrfs_trans_handle *trans = NULL; | |
2f232036 | 3584 | int err = -ENOMEM; |
53b7cde9 | 3585 | int ret = 0; |
cb13eea3 | 3586 | bool stopped = false; |
b7adbf9a | 3587 | bool did_leaf_rescans = false; |
2f232036 | 3588 | |
182940f4 BB |
3589 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) |
3590 | return; | |
3591 | ||
2f232036 JS |
3592 | path = btrfs_alloc_path(); |
3593 | if (!path) | |
3594 | goto out; | |
b6debf15 QW |
3595 | /* |
3596 | * Rescan should only search for commit root, and any later difference | |
3597 | * should be recorded by qgroup | |
3598 | */ | |
3599 | path->search_commit_root = 1; | |
3600 | path->skip_locking = 1; | |
2f232036 JS |
3601 | |
3602 | err = 0; | |
cb13eea3 | 3603 | while (!err && !(stopped = rescan_should_stop(fs_info))) { |
2f232036 JS |
3604 | trans = btrfs_start_transaction(fs_info->fs_root, 0); |
3605 | if (IS_ERR(trans)) { | |
3606 | err = PTR_ERR(trans); | |
3607 | break; | |
3608 | } | |
db5df254 NB |
3609 | |
3610 | err = qgroup_rescan_leaf(trans, path); | |
b7adbf9a | 3611 | did_leaf_rescans = true; |
db5df254 | 3612 | |
2f232036 | 3613 | if (err > 0) |
3a45bb20 | 3614 | btrfs_commit_transaction(trans); |
2f232036 | 3615 | else |
3a45bb20 | 3616 | btrfs_end_transaction(trans); |
2f232036 JS |
3617 | } |
3618 | ||
3619 | out: | |
2f232036 | 3620 | btrfs_free_path(path); |
2f232036 JS |
3621 | |
3622 | mutex_lock(&fs_info->qgroup_rescan_lock); | |
3393168d | 3623 | if (err > 0 && |
2f232036 JS |
3624 | fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { |
3625 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; | |
db5df254 | 3626 | } else if (err < 0 || stopped) { |
2f232036 JS |
3627 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
3628 | } | |
3629 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
3630 | ||
53b7cde9 | 3631 | /* |
b7adbf9a FM |
3632 | * Only update status, since the previous part has already updated the |
3633 | * qgroup info, and only if we did any actual work. This also prevents | |
3634 | * race with a concurrent quota disable, which has already set | |
3635 | * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at | |
3636 | * btrfs_quota_disable(). | |
53b7cde9 | 3637 | */ |
b7adbf9a FM |
3638 | if (did_leaf_rescans) { |
3639 | trans = btrfs_start_transaction(fs_info->quota_root, 1); | |
3640 | if (IS_ERR(trans)) { | |
3641 | err = PTR_ERR(trans); | |
3642 | trans = NULL; | |
3643 | btrfs_err(fs_info, | |
3644 | "fail to start transaction for status update: %d", | |
3645 | err); | |
3646 | } | |
3647 | } else { | |
13fc1d27 | 3648 | trans = NULL; |
53b7cde9 | 3649 | } |
13fc1d27 FM |
3650 | |
3651 | mutex_lock(&fs_info->qgroup_rescan_lock); | |
e562a8bd QW |
3652 | if (!stopped || |
3653 | fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) | |
13fc1d27 FM |
3654 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
3655 | if (trans) { | |
3656 | ret = update_qgroup_status_item(trans); | |
3657 | if (ret < 0) { | |
3658 | err = ret; | |
3659 | btrfs_err(fs_info, "fail to update qgroup status: %d", | |
3660 | err); | |
3661 | } | |
53b7cde9 | 3662 | } |
13fc1d27 | 3663 | fs_info->qgroup_rescan_running = false; |
e562a8bd | 3664 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN; |
13fc1d27 FM |
3665 | complete_all(&fs_info->qgroup_rescan_completion); |
3666 | mutex_unlock(&fs_info->qgroup_rescan_lock); | |
3667 | ||
3668 | if (!trans) | |
3669 | return; | |
3670 | ||
3a45bb20 | 3671 | btrfs_end_transaction(trans); |
53b7cde9 | 3672 | |
cb13eea3 | 3673 | if (stopped) { |
7343dd61 | 3674 | btrfs_info(fs_info, "qgroup scan paused"); |
e562a8bd QW |
3675 | } else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) { |
3676 | btrfs_info(fs_info, "qgroup scan cancelled"); | |
7343dd61 | 3677 | } else if (err >= 0) { |
efe120a0 | 3678 | btrfs_info(fs_info, "qgroup scan completed%s", |
3393168d | 3679 | err > 0 ? " (inconsistency flag cleared)" : ""); |
2f232036 | 3680 | } else { |
efe120a0 | 3681 | btrfs_err(fs_info, "qgroup scan failed with %d", err); |
2f232036 JS |
3682 | } |
3683 | } | |
3684 | ||
b382a324 JS |
3685 | /* |
3686 | * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all | |
3687 | * memory required for the rescan context. | |
3688 | */ | |
3689 | static int | |
3690 | qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, | |
3691 | int init_flags) | |
2f232036 JS |
3692 | { |
3693 | int ret = 0; | |
2f232036 | 3694 | |
182940f4 BB |
3695 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) { |
3696 | btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode"); | |
3697 | return -EINVAL; | |
3698 | } | |
3699 | ||
9593bf49 QW |
3700 | if (!init_flags) { |
3701 | /* we're resuming qgroup rescan at mount time */ | |
e4e7ede7 FM |
3702 | if (!(fs_info->qgroup_flags & |
3703 | BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { | |
9593bf49 | 3704 | btrfs_warn(fs_info, |
37d02592 | 3705 | "qgroup rescan init failed, qgroup rescan is not queued"); |
e4e7ede7 FM |
3706 | ret = -EINVAL; |
3707 | } else if (!(fs_info->qgroup_flags & | |
3708 | BTRFS_QGROUP_STATUS_FLAG_ON)) { | |
9593bf49 | 3709 | btrfs_warn(fs_info, |
37d02592 | 3710 | "qgroup rescan init failed, qgroup is not enabled"); |
e4e7ede7 FM |
3711 | ret = -EINVAL; |
3712 | } | |
3713 | ||
3714 | if (ret) | |
3715 | return ret; | |
b382a324 | 3716 | } |
2f232036 JS |
3717 | |
3718 | mutex_lock(&fs_info->qgroup_rescan_lock); | |
b382a324 JS |
3719 | |
3720 | if (init_flags) { | |
9593bf49 QW |
3721 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { |
3722 | btrfs_warn(fs_info, | |
3723 | "qgroup rescan is already in progress"); | |
b382a324 | 3724 | ret = -EINPROGRESS; |
9593bf49 QW |
3725 | } else if (!(fs_info->qgroup_flags & |
3726 | BTRFS_QGROUP_STATUS_FLAG_ON)) { | |
3727 | btrfs_warn(fs_info, | |
3728 | "qgroup rescan init failed, qgroup is not enabled"); | |
b382a324 | 3729 | ret = -EINVAL; |
182940f4 | 3730 | } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) { |
e804861b SK |
3731 | /* Quota disable is in progress */ |
3732 | ret = -EBUSY; | |
9593bf49 | 3733 | } |
b382a324 JS |
3734 | |
3735 | if (ret) { | |
b382a324 | 3736 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
9593bf49 | 3737 | return ret; |
b382a324 | 3738 | } |
b382a324 | 3739 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
2f232036 JS |
3740 | } |
3741 | ||
2f232036 JS |
3742 | memset(&fs_info->qgroup_rescan_progress, 0, |
3743 | sizeof(fs_info->qgroup_rescan_progress)); | |
e15e9f43 QW |
3744 | fs_info->qgroup_flags &= ~(BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN | |
3745 | BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING); | |
b382a324 | 3746 | fs_info->qgroup_rescan_progress.objectid = progress_objectid; |
190631f1 | 3747 | init_completion(&fs_info->qgroup_rescan_completion); |
b382a324 JS |
3748 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
3749 | ||
fc97fab0 | 3750 | btrfs_init_work(&fs_info->qgroup_rescan_work, |
078b8b90 | 3751 | btrfs_qgroup_rescan_worker, NULL); |
b382a324 JS |
3752 | return 0; |
3753 | } | |
3754 | ||
3755 | static void | |
3756 | qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) | |
3757 | { | |
3758 | struct rb_node *n; | |
3759 | struct btrfs_qgroup *qgroup; | |
3760 | ||
3761 | spin_lock(&fs_info->qgroup_lock); | |
2f232036 JS |
3762 | /* clear all current qgroup tracking information */ |
3763 | for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { | |
3764 | qgroup = rb_entry(n, struct btrfs_qgroup, node); | |
3765 | qgroup->rfer = 0; | |
3766 | qgroup->rfer_cmpr = 0; | |
3767 | qgroup->excl = 0; | |
3768 | qgroup->excl_cmpr = 0; | |
9c7b0c2e | 3769 | qgroup_dirty(fs_info, qgroup); |
2f232036 JS |
3770 | } |
3771 | spin_unlock(&fs_info->qgroup_lock); | |
b382a324 | 3772 | } |
2f232036 | 3773 | |
b382a324 JS |
3774 | int |
3775 | btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) | |
3776 | { | |
3777 | int ret = 0; | |
3778 | struct btrfs_trans_handle *trans; | |
3779 | ||
3780 | ret = qgroup_rescan_init(fs_info, 0, 1); | |
3781 | if (ret) | |
3782 | return ret; | |
3783 | ||
3784 | /* | |
3785 | * We have set the rescan_progress to 0, which means no more | |
3786 | * delayed refs will be accounted by btrfs_qgroup_account_ref. | |
3787 | * However, btrfs_qgroup_account_ref may be right after its call | |
3788 | * to btrfs_find_all_roots, in which case it would still do the | |
3789 | * accounting. | |
3790 | * To solve this, we're committing the transaction, which will | |
3791 | * ensure we run all delayed refs and only after that, we are | |
3792 | * going to clear all tracking information for a clean start. | |
3793 | */ | |
3794 | ||
6705b48a FM |
3795 | trans = btrfs_attach_transaction_barrier(fs_info->fs_root); |
3796 | if (IS_ERR(trans) && trans != ERR_PTR(-ENOENT)) { | |
b382a324 JS |
3797 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
3798 | return PTR_ERR(trans); | |
6705b48a FM |
3799 | } else if (trans != ERR_PTR(-ENOENT)) { |
3800 | ret = btrfs_commit_transaction(trans); | |
3801 | if (ret) { | |
3802 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; | |
3803 | return ret; | |
3804 | } | |
b382a324 JS |
3805 | } |
3806 | ||
3807 | qgroup_rescan_zero_tracking(fs_info); | |
3808 | ||
d61acbbf QW |
3809 | mutex_lock(&fs_info->qgroup_rescan_lock); |
3810 | fs_info->qgroup_rescan_running = true; | |
fc97fab0 QW |
3811 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
3812 | &fs_info->qgroup_rescan_work); | |
d61acbbf | 3813 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
2f232036 JS |
3814 | |
3815 | return 0; | |
3816 | } | |
57254b6e | 3817 | |
d06f23d6 JM |
3818 | int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, |
3819 | bool interruptible) | |
57254b6e JS |
3820 | { |
3821 | int running; | |
3822 | int ret = 0; | |
3823 | ||
3824 | mutex_lock(&fs_info->qgroup_rescan_lock); | |
d2c609b8 | 3825 | running = fs_info->qgroup_rescan_running; |
57254b6e JS |
3826 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
3827 | ||
d06f23d6 JM |
3828 | if (!running) |
3829 | return 0; | |
3830 | ||
3831 | if (interruptible) | |
57254b6e JS |
3832 | ret = wait_for_completion_interruptible( |
3833 | &fs_info->qgroup_rescan_completion); | |
d06f23d6 JM |
3834 | else |
3835 | wait_for_completion(&fs_info->qgroup_rescan_completion); | |
57254b6e JS |
3836 | |
3837 | return ret; | |
3838 | } | |
b382a324 JS |
3839 | |
3840 | /* | |
3841 | * this is only called from open_ctree where we're still single threaded, thus | |
3842 | * locking is omitted here. | |
3843 | */ | |
3844 | void | |
3845 | btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) | |
3846 | { | |
d61acbbf QW |
3847 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { |
3848 | mutex_lock(&fs_info->qgroup_rescan_lock); | |
3849 | fs_info->qgroup_rescan_running = true; | |
fc97fab0 QW |
3850 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
3851 | &fs_info->qgroup_rescan_work); | |
d61acbbf QW |
3852 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
3853 | } | |
b382a324 | 3854 | } |
52472553 | 3855 | |
263da812 QW |
3856 | #define rbtree_iterate_from_safe(node, next, start) \ |
3857 | for (node = start; node && ({ next = rb_next(node); 1;}); node = next) | |
3858 | ||
3859 | static int qgroup_unreserve_range(struct btrfs_inode *inode, | |
3860 | struct extent_changeset *reserved, u64 start, | |
3861 | u64 len) | |
3862 | { | |
3863 | struct rb_node *node; | |
3864 | struct rb_node *next; | |
f07728d5 | 3865 | struct ulist_node *entry; |
263da812 QW |
3866 | int ret = 0; |
3867 | ||
3868 | node = reserved->range_changed.root.rb_node; | |
f07728d5 DC |
3869 | if (!node) |
3870 | return 0; | |
263da812 QW |
3871 | while (node) { |
3872 | entry = rb_entry(node, struct ulist_node, rb_node); | |
3873 | if (entry->val < start) | |
3874 | node = node->rb_right; | |
263da812 | 3875 | else |
f07728d5 | 3876 | node = node->rb_left; |
263da812 QW |
3877 | } |
3878 | ||
263da812 QW |
3879 | if (entry->val > start && rb_prev(&entry->rb_node)) |
3880 | entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node, | |
3881 | rb_node); | |
3882 | ||
3883 | rbtree_iterate_from_safe(node, next, &entry->rb_node) { | |
3884 | u64 entry_start; | |
3885 | u64 entry_end; | |
3886 | u64 entry_len; | |
3887 | int clear_ret; | |
3888 | ||
3889 | entry = rb_entry(node, struct ulist_node, rb_node); | |
3890 | entry_start = entry->val; | |
3891 | entry_end = entry->aux; | |
3892 | entry_len = entry_end - entry_start + 1; | |
3893 | ||
3894 | if (entry_start >= start + len) | |
3895 | break; | |
3896 | if (entry_start + entry_len <= start) | |
3897 | continue; | |
3898 | /* | |
3899 | * Now the entry is in [start, start + len), revert the | |
3900 | * EXTENT_QGROUP_RESERVED bit. | |
3901 | */ | |
3902 | clear_ret = clear_extent_bits(&inode->io_tree, entry_start, | |
3903 | entry_end, EXTENT_QGROUP_RESERVED); | |
3904 | if (!ret && clear_ret < 0) | |
3905 | ret = clear_ret; | |
3906 | ||
3907 | ulist_del(&reserved->range_changed, entry->val, entry->aux); | |
3908 | if (likely(reserved->bytes_changed >= entry_len)) { | |
3909 | reserved->bytes_changed -= entry_len; | |
3910 | } else { | |
3911 | WARN_ON(1); | |
3912 | reserved->bytes_changed = 0; | |
3913 | } | |
3914 | } | |
3915 | ||
3916 | return ret; | |
3917 | } | |
3918 | ||
52472553 | 3919 | /* |
c53e9653 | 3920 | * Try to free some space for qgroup. |
52472553 | 3921 | * |
c53e9653 QW |
3922 | * For qgroup, there are only 3 ways to free qgroup space: |
3923 | * - Flush nodatacow write | |
3924 | * Any nodatacow write will free its reserved data space at run_delalloc_range(). | |
3925 | * In theory, we should only flush nodatacow inodes, but it's not yet | |
3926 | * possible, so we need to flush the whole root. | |
52472553 | 3927 | * |
c53e9653 QW |
3928 | * - Wait for ordered extents |
3929 | * When ordered extents are finished, their reserved metadata is finally | |
3930 | * converted to per_trans status, which can be freed by later commit | |
3931 | * transaction. | |
52472553 | 3932 | * |
c53e9653 QW |
3933 | * - Commit transaction |
3934 | * This would free the meta_per_trans space. | |
3935 | * In theory this shouldn't provide much space, but any more qgroup space | |
3936 | * is needed. | |
52472553 | 3937 | */ |
c53e9653 QW |
3938 | static int try_flush_qgroup(struct btrfs_root *root) |
3939 | { | |
3940 | struct btrfs_trans_handle *trans; | |
3941 | int ret; | |
3942 | ||
35b22c19 | 3943 | /* Can't hold an open transaction or we run the risk of deadlocking. */ |
ffb7c2e9 FM |
3944 | ASSERT(current->journal_info == NULL); |
3945 | if (WARN_ON(current->journal_info)) | |
ae396a3b | 3946 | return 0; |
6f23277a | 3947 | |
ae5e070e QW |
3948 | /* |
3949 | * We don't want to run flush again and again, so if there is a running | |
3950 | * one, we won't try to start a new flush, but exit directly. | |
3951 | */ | |
3952 | if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { | |
ae5e070e QW |
3953 | wait_event(root->qgroup_flush_wait, |
3954 | !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); | |
3955 | return 0; | |
3956 | } | |
3957 | ||
f9baa501 | 3958 | ret = btrfs_start_delalloc_snapshot(root, true); |
c53e9653 QW |
3959 | if (ret < 0) |
3960 | goto out; | |
3961 | btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); | |
3962 | ||
9c93c238 | 3963 | trans = btrfs_attach_transaction_barrier(root); |
c53e9653 QW |
3964 | if (IS_ERR(trans)) { |
3965 | ret = PTR_ERR(trans); | |
9c93c238 FM |
3966 | if (ret == -ENOENT) |
3967 | ret = 0; | |
c53e9653 QW |
3968 | goto out; |
3969 | } | |
3970 | ||
ae396a3b | 3971 | ret = btrfs_commit_transaction(trans); |
c53e9653 QW |
3972 | out: |
3973 | clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); | |
3974 | wake_up(&root->qgroup_flush_wait); | |
3975 | return ret; | |
3976 | } | |
3977 | ||
3978 | static int qgroup_reserve_data(struct btrfs_inode *inode, | |
364ecf36 QW |
3979 | struct extent_changeset **reserved_ret, u64 start, |
3980 | u64 len) | |
52472553 | 3981 | { |
7661a3e0 | 3982 | struct btrfs_root *root = inode->root; |
364ecf36 | 3983 | struct extent_changeset *reserved; |
263da812 | 3984 | bool new_reserved = false; |
364ecf36 QW |
3985 | u64 orig_reserved; |
3986 | u64 to_reserve; | |
52472553 QW |
3987 | int ret; |
3988 | ||
182940f4 | 3989 | if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED || |
4fd786e6 | 3990 | !is_fstree(root->root_key.objectid) || len == 0) |
52472553 QW |
3991 | return 0; |
3992 | ||
364ecf36 QW |
3993 | /* @reserved parameter is mandatory for qgroup */ |
3994 | if (WARN_ON(!reserved_ret)) | |
3995 | return -EINVAL; | |
3996 | if (!*reserved_ret) { | |
263da812 | 3997 | new_reserved = true; |
364ecf36 QW |
3998 | *reserved_ret = extent_changeset_alloc(); |
3999 | if (!*reserved_ret) | |
4000 | return -ENOMEM; | |
4001 | } | |
4002 | reserved = *reserved_ret; | |
4003 | /* Record already reserved space */ | |
4004 | orig_reserved = reserved->bytes_changed; | |
7661a3e0 | 4005 | ret = set_record_extent_bits(&inode->io_tree, start, |
364ecf36 QW |
4006 | start + len -1, EXTENT_QGROUP_RESERVED, reserved); |
4007 | ||
4008 | /* Newly reserved space */ | |
4009 | to_reserve = reserved->bytes_changed - orig_reserved; | |
7661a3e0 | 4010 | trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len, |
364ecf36 | 4011 | to_reserve, QGROUP_RESERVE); |
52472553 | 4012 | if (ret < 0) |
263da812 | 4013 | goto out; |
dba21324 | 4014 | ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); |
52472553 QW |
4015 | if (ret < 0) |
4016 | goto cleanup; | |
4017 | ||
52472553 QW |
4018 | return ret; |
4019 | ||
4020 | cleanup: | |
263da812 QW |
4021 | qgroup_unreserve_range(inode, reserved, start, len); |
4022 | out: | |
4023 | if (new_reserved) { | |
d6ade689 | 4024 | extent_changeset_free(reserved); |
263da812 QW |
4025 | *reserved_ret = NULL; |
4026 | } | |
52472553 QW |
4027 | return ret; |
4028 | } | |
f695fdce | 4029 | |
c53e9653 QW |
4030 | /* |
4031 | * Reserve qgroup space for range [start, start + len). | |
4032 | * | |
4033 | * This function will either reserve space from related qgroups or do nothing | |
4034 | * if the range is already reserved. | |
4035 | * | |
4036 | * Return 0 for successful reservation | |
4037 | * Return <0 for error (including -EQUOT) | |
4038 | * | |
4039 | * NOTE: This function may sleep for memory allocation, dirty page flushing and | |
4040 | * commit transaction. So caller should not hold any dirty page locked. | |
4041 | */ | |
4042 | int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, | |
4043 | struct extent_changeset **reserved_ret, u64 start, | |
4044 | u64 len) | |
4045 | { | |
4046 | int ret; | |
4047 | ||
4048 | ret = qgroup_reserve_data(inode, reserved_ret, start, len); | |
4049 | if (ret <= 0 && ret != -EDQUOT) | |
4050 | return ret; | |
4051 | ||
4052 | ret = try_flush_qgroup(inode->root); | |
4053 | if (ret < 0) | |
4054 | return ret; | |
4055 | return qgroup_reserve_data(inode, reserved_ret, start, len); | |
4056 | } | |
4057 | ||
bc42bda2 | 4058 | /* Free ranges specified by @reserved, normally in error path */ |
df2cfd13 | 4059 | static int qgroup_free_reserved_data(struct btrfs_inode *inode, |
9e65bfca BB |
4060 | struct extent_changeset *reserved, |
4061 | u64 start, u64 len, u64 *freed_ret) | |
bc42bda2 | 4062 | { |
df2cfd13 | 4063 | struct btrfs_root *root = inode->root; |
bc42bda2 QW |
4064 | struct ulist_node *unode; |
4065 | struct ulist_iterator uiter; | |
4066 | struct extent_changeset changeset; | |
9e65bfca | 4067 | u64 freed = 0; |
bc42bda2 QW |
4068 | int ret; |
4069 | ||
4070 | extent_changeset_init(&changeset); | |
4071 | len = round_up(start + len, root->fs_info->sectorsize); | |
4072 | start = round_down(start, root->fs_info->sectorsize); | |
4073 | ||
4074 | ULIST_ITER_INIT(&uiter); | |
4075 | while ((unode = ulist_next(&reserved->range_changed, &uiter))) { | |
4076 | u64 range_start = unode->val; | |
4077 | /* unode->aux is the inclusive end */ | |
4078 | u64 range_len = unode->aux - range_start + 1; | |
4079 | u64 free_start; | |
4080 | u64 free_len; | |
4081 | ||
4082 | extent_changeset_release(&changeset); | |
4083 | ||
4084 | /* Only free range in range [start, start + len) */ | |
4085 | if (range_start >= start + len || | |
4086 | range_start + range_len <= start) | |
4087 | continue; | |
4088 | free_start = max(range_start, start); | |
4089 | free_len = min(start + len, range_start + range_len) - | |
4090 | free_start; | |
4091 | /* | |
4092 | * TODO: To also modify reserved->ranges_reserved to reflect | |
4093 | * the modification. | |
4094 | * | |
4095 | * However as long as we free qgroup reserved according to | |
4096 | * EXTENT_QGROUP_RESERVED, we won't double free. | |
4097 | * So not need to rush. | |
4098 | */ | |
df2cfd13 NB |
4099 | ret = clear_record_extent_bits(&inode->io_tree, free_start, |
4100 | free_start + free_len - 1, | |
bc42bda2 QW |
4101 | EXTENT_QGROUP_RESERVED, &changeset); |
4102 | if (ret < 0) | |
4103 | goto out; | |
4104 | freed += changeset.bytes_changed; | |
4105 | } | |
4fd786e6 | 4106 | btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed, |
d4e5c920 | 4107 | BTRFS_QGROUP_RSV_DATA); |
9e65bfca BB |
4108 | if (freed_ret) |
4109 | *freed_ret = freed; | |
4110 | ret = 0; | |
bc42bda2 QW |
4111 | out: |
4112 | extent_changeset_release(&changeset); | |
4113 | return ret; | |
4114 | } | |
4115 | ||
8769af96 | 4116 | static int __btrfs_qgroup_release_data(struct btrfs_inode *inode, |
bc42bda2 | 4117 | struct extent_changeset *reserved, u64 start, u64 len, |
9e65bfca | 4118 | u64 *released, int free) |
f695fdce QW |
4119 | { |
4120 | struct extent_changeset changeset; | |
81fb6f77 | 4121 | int trace_op = QGROUP_RELEASE; |
f695fdce QW |
4122 | int ret; |
4123 | ||
af0e2aab BB |
4124 | if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) { |
4125 | extent_changeset_init(&changeset); | |
4126 | return clear_record_extent_bits(&inode->io_tree, start, | |
4127 | start + len - 1, | |
4128 | EXTENT_QGROUP_RESERVED, &changeset); | |
4129 | } | |
3628b4ca | 4130 | |
bc42bda2 QW |
4131 | /* In release case, we shouldn't have @reserved */ |
4132 | WARN_ON(!free && reserved); | |
4133 | if (free && reserved) | |
9e65bfca | 4134 | return qgroup_free_reserved_data(inode, reserved, start, len, released); |
364ecf36 | 4135 | extent_changeset_init(&changeset); |
8769af96 NB |
4136 | ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1, |
4137 | EXTENT_QGROUP_RESERVED, &changeset); | |
f695fdce QW |
4138 | if (ret < 0) |
4139 | goto out; | |
4140 | ||
d51ea5dd | 4141 | if (free) |
81fb6f77 | 4142 | trace_op = QGROUP_FREE; |
8769af96 | 4143 | trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len, |
81fb6f77 | 4144 | changeset.bytes_changed, trace_op); |
d51ea5dd | 4145 | if (free) |
8769af96 NB |
4146 | btrfs_qgroup_free_refroot(inode->root->fs_info, |
4147 | inode->root->root_key.objectid, | |
d4e5c920 | 4148 | changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); |
9e65bfca BB |
4149 | if (released) |
4150 | *released = changeset.bytes_changed; | |
f695fdce | 4151 | out: |
364ecf36 | 4152 | extent_changeset_release(&changeset); |
f695fdce QW |
4153 | return ret; |
4154 | } | |
4155 | ||
4156 | /* | |
4157 | * Free a reserved space range from io_tree and related qgroups | |
4158 | * | |
4159 | * Should be called when a range of pages get invalidated before reaching disk. | |
4160 | * Or for error cleanup case. | |
bc42bda2 QW |
4161 | * if @reserved is given, only reserved range in [@start, @start + @len) will |
4162 | * be freed. | |
f695fdce QW |
4163 | * |
4164 | * For data written to disk, use btrfs_qgroup_release_data(). | |
4165 | * | |
4166 | * NOTE: This function may sleep for memory allocation. | |
4167 | */ | |
8b8a979f | 4168 | int btrfs_qgroup_free_data(struct btrfs_inode *inode, |
9e65bfca BB |
4169 | struct extent_changeset *reserved, |
4170 | u64 start, u64 len, u64 *freed) | |
f695fdce | 4171 | { |
9e65bfca | 4172 | return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1); |
f695fdce QW |
4173 | } |
4174 | ||
4175 | /* | |
4176 | * Release a reserved space range from io_tree only. | |
4177 | * | |
4178 | * Should be called when a range of pages get written to disk and corresponding | |
4179 | * FILE_EXTENT is inserted into corresponding root. | |
4180 | * | |
4181 | * Since new qgroup accounting framework will only update qgroup numbers at | |
4182 | * commit_transaction() time, its reserved space shouldn't be freed from | |
4183 | * related qgroups. | |
4184 | * | |
4185 | * But we should release the range from io_tree, to allow further write to be | |
4186 | * COWed. | |
4187 | * | |
4188 | * NOTE: This function may sleep for memory allocation. | |
4189 | */ | |
9e65bfca | 4190 | int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released) |
f695fdce | 4191 | { |
9e65bfca | 4192 | return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0); |
f695fdce | 4193 | } |
55eeaf05 | 4194 | |
8287475a QW |
4195 | static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, |
4196 | enum btrfs_qgroup_rsv_type type) | |
4197 | { | |
4198 | if (type != BTRFS_QGROUP_RSV_META_PREALLOC && | |
4199 | type != BTRFS_QGROUP_RSV_META_PERTRANS) | |
4200 | return; | |
4201 | if (num_bytes == 0) | |
4202 | return; | |
4203 | ||
4204 | spin_lock(&root->qgroup_meta_rsv_lock); | |
4205 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) | |
4206 | root->qgroup_meta_rsv_prealloc += num_bytes; | |
4207 | else | |
4208 | root->qgroup_meta_rsv_pertrans += num_bytes; | |
4209 | spin_unlock(&root->qgroup_meta_rsv_lock); | |
4210 | } | |
4211 | ||
4212 | static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, | |
4213 | enum btrfs_qgroup_rsv_type type) | |
4214 | { | |
4215 | if (type != BTRFS_QGROUP_RSV_META_PREALLOC && | |
4216 | type != BTRFS_QGROUP_RSV_META_PERTRANS) | |
4217 | return 0; | |
4218 | if (num_bytes == 0) | |
4219 | return 0; | |
4220 | ||
4221 | spin_lock(&root->qgroup_meta_rsv_lock); | |
4222 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { | |
4223 | num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, | |
4224 | num_bytes); | |
4225 | root->qgroup_meta_rsv_prealloc -= num_bytes; | |
4226 | } else { | |
4227 | num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, | |
4228 | num_bytes); | |
4229 | root->qgroup_meta_rsv_pertrans -= num_bytes; | |
4230 | } | |
4231 | spin_unlock(&root->qgroup_meta_rsv_lock); | |
4232 | return num_bytes; | |
4233 | } | |
4234 | ||
80e9baed NB |
4235 | int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
4236 | enum btrfs_qgroup_rsv_type type, bool enforce) | |
55eeaf05 | 4237 | { |
0b246afa | 4238 | struct btrfs_fs_info *fs_info = root->fs_info; |
55eeaf05 QW |
4239 | int ret; |
4240 | ||
182940f4 | 4241 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || |
4fd786e6 | 4242 | !is_fstree(root->root_key.objectid) || num_bytes == 0) |
55eeaf05 QW |
4243 | return 0; |
4244 | ||
0b246afa | 4245 | BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); |
fd2b007e | 4246 | trace_qgroup_meta_reserve(root, (s64)num_bytes, type); |
733e03a0 | 4247 | ret = qgroup_reserve(root, num_bytes, enforce, type); |
55eeaf05 QW |
4248 | if (ret < 0) |
4249 | return ret; | |
8287475a QW |
4250 | /* |
4251 | * Record what we have reserved into root. | |
4252 | * | |
4253 | * To avoid quota disabled->enabled underflow. | |
4254 | * In that case, we may try to free space we haven't reserved | |
4255 | * (since quota was disabled), so record what we reserved into root. | |
4256 | * And ensure later release won't underflow this number. | |
4257 | */ | |
4258 | add_root_meta_rsv(root, num_bytes, type); | |
55eeaf05 QW |
4259 | return ret; |
4260 | } | |
4261 | ||
c53e9653 | 4262 | int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
d4135134 FM |
4263 | enum btrfs_qgroup_rsv_type type, bool enforce, |
4264 | bool noflush) | |
c53e9653 QW |
4265 | { |
4266 | int ret; | |
4267 | ||
80e9baed | 4268 | ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); |
d4135134 | 4269 | if ((ret <= 0 && ret != -EDQUOT) || noflush) |
c53e9653 QW |
4270 | return ret; |
4271 | ||
4272 | ret = try_flush_qgroup(root); | |
4273 | if (ret < 0) | |
4274 | return ret; | |
80e9baed | 4275 | return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce); |
c53e9653 QW |
4276 | } |
4277 | ||
33b6b251 DS |
4278 | /* |
4279 | * Per-transaction meta reservation should be all freed at transaction commit | |
4280 | * time | |
4281 | */ | |
733e03a0 | 4282 | void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) |
55eeaf05 | 4283 | { |
0b246afa | 4284 | struct btrfs_fs_info *fs_info = root->fs_info; |
55eeaf05 | 4285 | |
182940f4 | 4286 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || |
4fd786e6 | 4287 | !is_fstree(root->root_key.objectid)) |
55eeaf05 QW |
4288 | return; |
4289 | ||
e1211d0e | 4290 | /* TODO: Update trace point to handle such free */ |
4ee0d883 | 4291 | trace_qgroup_meta_free_all_pertrans(root); |
e1211d0e | 4292 | /* Special value -1 means to free all reserved space */ |
4fd786e6 | 4293 | btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1, |
733e03a0 | 4294 | BTRFS_QGROUP_RSV_META_PERTRANS); |
55eeaf05 QW |
4295 | } |
4296 | ||
733e03a0 QW |
4297 | void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, |
4298 | enum btrfs_qgroup_rsv_type type) | |
55eeaf05 | 4299 | { |
0b246afa JM |
4300 | struct btrfs_fs_info *fs_info = root->fs_info; |
4301 | ||
182940f4 | 4302 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || |
4fd786e6 | 4303 | !is_fstree(root->root_key.objectid)) |
55eeaf05 QW |
4304 | return; |
4305 | ||
8287475a QW |
4306 | /* |
4307 | * reservation for META_PREALLOC can happen before quota is enabled, | |
4308 | * which can lead to underflow. | |
4309 | * Here ensure we will only free what we really have reserved. | |
4310 | */ | |
4311 | num_bytes = sub_root_meta_rsv(root, num_bytes, type); | |
0b246afa | 4312 | BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); |
fd2b007e | 4313 | trace_qgroup_meta_reserve(root, -(s64)num_bytes, type); |
4fd786e6 MT |
4314 | btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, |
4315 | num_bytes, type); | |
55eeaf05 | 4316 | } |
56fa9d07 | 4317 | |
64cfaef6 QW |
4318 | static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, |
4319 | int num_bytes) | |
4320 | { | |
64cfaef6 | 4321 | struct btrfs_qgroup *qgroup; |
09134450 | 4322 | LIST_HEAD(qgroup_list); |
64cfaef6 QW |
4323 | |
4324 | if (num_bytes == 0) | |
4325 | return; | |
e3b0edd2 | 4326 | if (!fs_info->quota_root) |
64cfaef6 QW |
4327 | return; |
4328 | ||
4329 | spin_lock(&fs_info->qgroup_lock); | |
4330 | qgroup = find_qgroup_rb(fs_info, ref_root); | |
4331 | if (!qgroup) | |
4332 | goto out; | |
64cfaef6 | 4333 | |
09134450 QW |
4334 | qgroup_iterator_add(&qgroup_list, qgroup); |
4335 | list_for_each_entry(qgroup, &qgroup_list, iterator) { | |
4336 | struct btrfs_qgroup_list *glist; | |
64cfaef6 | 4337 | |
09134450 | 4338 | qgroup_rsv_release(fs_info, qgroup, num_bytes, |
64cfaef6 | 4339 | BTRFS_QGROUP_RSV_META_PREALLOC); |
b321a52c BB |
4340 | if (!sb_rdonly(fs_info->sb)) |
4341 | qgroup_rsv_add(fs_info, qgroup, num_bytes, | |
4342 | BTRFS_QGROUP_RSV_META_PERTRANS); | |
09134450 QW |
4343 | |
4344 | list_for_each_entry(glist, &qgroup->groups, next_group) | |
4345 | qgroup_iterator_add(&qgroup_list, glist->group); | |
64cfaef6 QW |
4346 | } |
4347 | out: | |
09134450 | 4348 | qgroup_iterator_clean(&qgroup_list); |
64cfaef6 QW |
4349 | spin_unlock(&fs_info->qgroup_lock); |
4350 | } | |
4351 | ||
33b6b251 DS |
4352 | /* |
4353 | * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS. | |
4354 | * | |
4355 | * This is called when preallocated meta reservation needs to be used. | |
4356 | * Normally after btrfs_join_transaction() call. | |
4357 | */ | |
64cfaef6 QW |
4358 | void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) |
4359 | { | |
4360 | struct btrfs_fs_info *fs_info = root->fs_info; | |
4361 | ||
182940f4 | 4362 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED || |
4fd786e6 | 4363 | !is_fstree(root->root_key.objectid)) |
64cfaef6 | 4364 | return; |
8287475a QW |
4365 | /* Same as btrfs_qgroup_free_meta_prealloc() */ |
4366 | num_bytes = sub_root_meta_rsv(root, num_bytes, | |
4367 | BTRFS_QGROUP_RSV_META_PREALLOC); | |
4ee0d883 | 4368 | trace_qgroup_meta_convert(root, num_bytes); |
4fd786e6 | 4369 | qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes); |
64cfaef6 QW |
4370 | } |
4371 | ||
56fa9d07 | 4372 | /* |
01327610 | 4373 | * Check qgroup reserved space leaking, normally at destroy inode |
56fa9d07 QW |
4374 | * time |
4375 | */ | |
cfdd4592 | 4376 | void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode) |
56fa9d07 QW |
4377 | { |
4378 | struct extent_changeset changeset; | |
4379 | struct ulist_node *unode; | |
4380 | struct ulist_iterator iter; | |
4381 | int ret; | |
4382 | ||
364ecf36 | 4383 | extent_changeset_init(&changeset); |
cfdd4592 | 4384 | ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1, |
f734c44a | 4385 | EXTENT_QGROUP_RESERVED, &changeset); |
56fa9d07 QW |
4386 | |
4387 | WARN_ON(ret < 0); | |
4388 | if (WARN_ON(changeset.bytes_changed)) { | |
4389 | ULIST_ITER_INIT(&iter); | |
53d32359 | 4390 | while ((unode = ulist_next(&changeset.range_changed, &iter))) { |
cfdd4592 NB |
4391 | btrfs_warn(inode->root->fs_info, |
4392 | "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu", | |
4393 | btrfs_ino(inode), unode->val, unode->aux); | |
56fa9d07 | 4394 | } |
cfdd4592 NB |
4395 | btrfs_qgroup_free_refroot(inode->root->fs_info, |
4396 | inode->root->root_key.objectid, | |
d4e5c920 | 4397 | changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); |
0b08e1f4 | 4398 | |
56fa9d07 | 4399 | } |
364ecf36 | 4400 | extent_changeset_release(&changeset); |
56fa9d07 | 4401 | } |
370a11b8 QW |
4402 | |
4403 | void btrfs_qgroup_init_swapped_blocks( | |
4404 | struct btrfs_qgroup_swapped_blocks *swapped_blocks) | |
4405 | { | |
4406 | int i; | |
4407 | ||
4408 | spin_lock_init(&swapped_blocks->lock); | |
4409 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) | |
4410 | swapped_blocks->blocks[i] = RB_ROOT; | |
4411 | swapped_blocks->swapped = false; | |
4412 | } | |
4413 | ||
4414 | /* | |
4415 | * Delete all swapped blocks record of @root. | |
4416 | * Every record here means we skipped a full subtree scan for qgroup. | |
4417 | * | |
4418 | * Gets called when committing one transaction. | |
4419 | */ | |
4420 | void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root) | |
4421 | { | |
4422 | struct btrfs_qgroup_swapped_blocks *swapped_blocks; | |
4423 | int i; | |
4424 | ||
4425 | swapped_blocks = &root->swapped_blocks; | |
4426 | ||
4427 | spin_lock(&swapped_blocks->lock); | |
4428 | if (!swapped_blocks->swapped) | |
4429 | goto out; | |
4430 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { | |
4431 | struct rb_root *cur_root = &swapped_blocks->blocks[i]; | |
4432 | struct btrfs_qgroup_swapped_block *entry; | |
4433 | struct btrfs_qgroup_swapped_block *next; | |
4434 | ||
4435 | rbtree_postorder_for_each_entry_safe(entry, next, cur_root, | |
4436 | node) | |
4437 | kfree(entry); | |
4438 | swapped_blocks->blocks[i] = RB_ROOT; | |
4439 | } | |
4440 | swapped_blocks->swapped = false; | |
4441 | out: | |
4442 | spin_unlock(&swapped_blocks->lock); | |
4443 | } | |
4444 | ||
4445 | /* | |
4446 | * Add subtree roots record into @subvol_root. | |
4447 | * | |
4448 | * @subvol_root: tree root of the subvolume tree get swapped | |
4449 | * @bg: block group under balance | |
4450 | * @subvol_parent/slot: pointer to the subtree root in subvolume tree | |
4451 | * @reloc_parent/slot: pointer to the subtree root in reloc tree | |
4452 | * BOTH POINTERS ARE BEFORE TREE SWAP | |
4453 | * @last_snapshot: last snapshot generation of the subvolume tree | |
4454 | */ | |
4455 | int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, | |
4456 | struct btrfs_root *subvol_root, | |
32da5386 | 4457 | struct btrfs_block_group *bg, |
370a11b8 QW |
4458 | struct extent_buffer *subvol_parent, int subvol_slot, |
4459 | struct extent_buffer *reloc_parent, int reloc_slot, | |
4460 | u64 last_snapshot) | |
4461 | { | |
4462 | struct btrfs_fs_info *fs_info = subvol_root->fs_info; | |
4463 | struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks; | |
4464 | struct btrfs_qgroup_swapped_block *block; | |
4465 | struct rb_node **cur; | |
4466 | struct rb_node *parent = NULL; | |
4467 | int level = btrfs_header_level(subvol_parent) - 1; | |
4468 | int ret = 0; | |
4469 | ||
182940f4 | 4470 | if (!btrfs_qgroup_full_accounting(fs_info)) |
370a11b8 QW |
4471 | return 0; |
4472 | ||
4473 | if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) > | |
4474 | btrfs_node_ptr_generation(reloc_parent, reloc_slot)) { | |
4475 | btrfs_err_rl(fs_info, | |
4476 | "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu", | |
4477 | __func__, | |
4478 | btrfs_node_ptr_generation(subvol_parent, subvol_slot), | |
4479 | btrfs_node_ptr_generation(reloc_parent, reloc_slot)); | |
4480 | return -EUCLEAN; | |
4481 | } | |
4482 | ||
4483 | block = kmalloc(sizeof(*block), GFP_NOFS); | |
4484 | if (!block) { | |
4485 | ret = -ENOMEM; | |
4486 | goto out; | |
4487 | } | |
4488 | ||
4489 | /* | |
4490 | * @reloc_parent/slot is still before swap, while @block is going to | |
4491 | * record the bytenr after swap, so we do the swap here. | |
4492 | */ | |
4493 | block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot); | |
4494 | block->subvol_generation = btrfs_node_ptr_generation(reloc_parent, | |
4495 | reloc_slot); | |
4496 | block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot); | |
4497 | block->reloc_generation = btrfs_node_ptr_generation(subvol_parent, | |
4498 | subvol_slot); | |
4499 | block->last_snapshot = last_snapshot; | |
4500 | block->level = level; | |
57949d03 QW |
4501 | |
4502 | /* | |
4503 | * If we have bg == NULL, we're called from btrfs_recover_relocation(), | |
4504 | * no one else can modify tree blocks thus we qgroup will not change | |
4505 | * no matter the value of trace_leaf. | |
4506 | */ | |
4507 | if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA) | |
370a11b8 QW |
4508 | block->trace_leaf = true; |
4509 | else | |
4510 | block->trace_leaf = false; | |
4511 | btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot); | |
4512 | ||
4513 | /* Insert @block into @blocks */ | |
4514 | spin_lock(&blocks->lock); | |
4515 | cur = &blocks->blocks[level].rb_node; | |
4516 | while (*cur) { | |
4517 | struct btrfs_qgroup_swapped_block *entry; | |
4518 | ||
4519 | parent = *cur; | |
4520 | entry = rb_entry(parent, struct btrfs_qgroup_swapped_block, | |
4521 | node); | |
4522 | ||
4523 | if (entry->subvol_bytenr < block->subvol_bytenr) { | |
4524 | cur = &(*cur)->rb_left; | |
4525 | } else if (entry->subvol_bytenr > block->subvol_bytenr) { | |
4526 | cur = &(*cur)->rb_right; | |
4527 | } else { | |
4528 | if (entry->subvol_generation != | |
4529 | block->subvol_generation || | |
4530 | entry->reloc_bytenr != block->reloc_bytenr || | |
4531 | entry->reloc_generation != | |
4532 | block->reloc_generation) { | |
4533 | /* | |
4534 | * Duplicated but mismatch entry found. | |
4535 | * Shouldn't happen. | |
4536 | * | |
4537 | * Marking qgroup inconsistent should be enough | |
4538 | * for end users. | |
4539 | */ | |
4540 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); | |
4541 | ret = -EEXIST; | |
4542 | } | |
4543 | kfree(block); | |
4544 | goto out_unlock; | |
4545 | } | |
4546 | } | |
4547 | rb_link_node(&block->node, parent, cur); | |
4548 | rb_insert_color(&block->node, &blocks->blocks[level]); | |
4549 | blocks->swapped = true; | |
4550 | out_unlock: | |
4551 | spin_unlock(&blocks->lock); | |
4552 | out: | |
4553 | if (ret < 0) | |
e562a8bd | 4554 | qgroup_mark_inconsistent(fs_info); |
370a11b8 QW |
4555 | return ret; |
4556 | } | |
f616f5cd QW |
4557 | |
4558 | /* | |
4559 | * Check if the tree block is a subtree root, and if so do the needed | |
4560 | * delayed subtree trace for qgroup. | |
4561 | * | |
4562 | * This is called during btrfs_cow_block(). | |
4563 | */ | |
4564 | int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, | |
4565 | struct btrfs_root *root, | |
4566 | struct extent_buffer *subvol_eb) | |
4567 | { | |
4568 | struct btrfs_fs_info *fs_info = root->fs_info; | |
789d6a3a | 4569 | struct btrfs_tree_parent_check check = { 0 }; |
f616f5cd QW |
4570 | struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks; |
4571 | struct btrfs_qgroup_swapped_block *block; | |
4572 | struct extent_buffer *reloc_eb = NULL; | |
4573 | struct rb_node *node; | |
4574 | bool found = false; | |
4575 | bool swapped = false; | |
4576 | int level = btrfs_header_level(subvol_eb); | |
4577 | int ret = 0; | |
4578 | int i; | |
4579 | ||
182940f4 | 4580 | if (!btrfs_qgroup_full_accounting(fs_info)) |
f616f5cd QW |
4581 | return 0; |
4582 | if (!is_fstree(root->root_key.objectid) || !root->reloc_root) | |
4583 | return 0; | |
4584 | ||
4585 | spin_lock(&blocks->lock); | |
4586 | if (!blocks->swapped) { | |
4587 | spin_unlock(&blocks->lock); | |
4588 | return 0; | |
4589 | } | |
4590 | node = blocks->blocks[level].rb_node; | |
4591 | ||
4592 | while (node) { | |
4593 | block = rb_entry(node, struct btrfs_qgroup_swapped_block, node); | |
4594 | if (block->subvol_bytenr < subvol_eb->start) { | |
4595 | node = node->rb_left; | |
4596 | } else if (block->subvol_bytenr > subvol_eb->start) { | |
4597 | node = node->rb_right; | |
4598 | } else { | |
4599 | found = true; | |
4600 | break; | |
4601 | } | |
4602 | } | |
4603 | if (!found) { | |
4604 | spin_unlock(&blocks->lock); | |
4605 | goto out; | |
4606 | } | |
4607 | /* Found one, remove it from @blocks first and update blocks->swapped */ | |
4608 | rb_erase(&block->node, &blocks->blocks[level]); | |
4609 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { | |
4610 | if (RB_EMPTY_ROOT(&blocks->blocks[i])) { | |
4611 | swapped = true; | |
4612 | break; | |
4613 | } | |
4614 | } | |
4615 | blocks->swapped = swapped; | |
4616 | spin_unlock(&blocks->lock); | |
4617 | ||
789d6a3a QW |
4618 | check.level = block->level; |
4619 | check.transid = block->reloc_generation; | |
4620 | check.has_first_key = true; | |
4621 | memcpy(&check.first_key, &block->first_key, sizeof(check.first_key)); | |
4622 | ||
f616f5cd | 4623 | /* Read out reloc subtree root */ |
789d6a3a | 4624 | reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, &check); |
f616f5cd QW |
4625 | if (IS_ERR(reloc_eb)) { |
4626 | ret = PTR_ERR(reloc_eb); | |
4627 | reloc_eb = NULL; | |
4628 | goto free_out; | |
4629 | } | |
4630 | if (!extent_buffer_uptodate(reloc_eb)) { | |
4631 | ret = -EIO; | |
4632 | goto free_out; | |
4633 | } | |
4634 | ||
4635 | ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb, | |
4636 | block->last_snapshot, block->trace_leaf); | |
4637 | free_out: | |
4638 | kfree(block); | |
4639 | free_extent_buffer(reloc_eb); | |
4640 | out: | |
4641 | if (ret < 0) { | |
4642 | btrfs_err_rl(fs_info, | |
4643 | "failed to account subtree at bytenr %llu: %d", | |
4644 | subvol_eb->start, ret); | |
e562a8bd | 4645 | qgroup_mark_inconsistent(fs_info); |
f616f5cd QW |
4646 | } |
4647 | return ret; | |
4648 | } | |
81f7eb00 JM |
4649 | |
4650 | void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans) | |
4651 | { | |
4652 | struct btrfs_qgroup_extent_record *entry; | |
4653 | struct btrfs_qgroup_extent_record *next; | |
4654 | struct rb_root *root; | |
4655 | ||
4656 | root = &trans->delayed_refs.dirty_extent_root; | |
4657 | rbtree_postorder_for_each_entry_safe(entry, next, root, node) { | |
4658 | ulist_free(entry->old_roots); | |
4659 | kfree(entry); | |
4660 | } | |
aa84ce8a | 4661 | *root = RB_ROOT; |
81f7eb00 | 4662 | } |
1e0e9d57 | 4663 | |
e85a0ada BB |
4664 | void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes) |
4665 | { | |
4666 | if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) | |
4667 | return; | |
4668 | ||
4669 | if (!is_fstree(root)) | |
4670 | return; | |
4671 | ||
4672 | btrfs_qgroup_free_refroot(fs_info, root, rsv_bytes, BTRFS_QGROUP_RSV_DATA); | |
4673 | } | |
4674 | ||
1e0e9d57 BB |
4675 | int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, |
4676 | struct btrfs_squota_delta *delta) | |
4677 | { | |
4678 | int ret; | |
4679 | struct btrfs_qgroup *qgroup; | |
4680 | struct btrfs_qgroup *qg; | |
4681 | LIST_HEAD(qgroup_list); | |
4682 | u64 root = delta->root; | |
4683 | u64 num_bytes = delta->num_bytes; | |
4684 | const int sign = (delta->is_inc ? 1 : -1); | |
4685 | ||
4686 | if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE) | |
4687 | return 0; | |
4688 | ||
4689 | if (!is_fstree(root)) | |
4690 | return 0; | |
4691 | ||
bd7c1ea3 BB |
4692 | /* If the extent predates enabling quotas, don't count it. */ |
4693 | if (delta->generation < fs_info->qgroup_enable_gen) | |
4694 | return 0; | |
4695 | ||
1e0e9d57 BB |
4696 | spin_lock(&fs_info->qgroup_lock); |
4697 | qgroup = find_qgroup_rb(fs_info, root); | |
4698 | if (!qgroup) { | |
4699 | ret = -ENOENT; | |
4700 | goto out; | |
4701 | } | |
4702 | ||
4703 | ret = 0; | |
4704 | qgroup_iterator_add(&qgroup_list, qgroup); | |
4705 | list_for_each_entry(qg, &qgroup_list, iterator) { | |
4706 | struct btrfs_qgroup_list *glist; | |
4707 | ||
4708 | qg->excl += num_bytes * sign; | |
4709 | qg->rfer += num_bytes * sign; | |
4710 | qgroup_dirty(fs_info, qg); | |
4711 | ||
4712 | list_for_each_entry(glist, &qg->groups, next_group) | |
4713 | qgroup_iterator_add(&qgroup_list, glist->group); | |
4714 | } | |
4715 | qgroup_iterator_clean(&qgroup_list); | |
4716 | ||
4717 | out: | |
4718 | spin_unlock(&fs_info->qgroup_lock); | |
1e0e9d57 BB |
4719 | return ret; |
4720 | } |