]>
Commit | Line | Data |
---|---|---|
9888c340 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fcebe456 JB |
2 | /* |
3 | * Copyright (C) 2014 Facebook. All rights reserved. | |
fcebe456 JB |
4 | */ |
5 | ||
9888c340 DS |
6 | #ifndef BTRFS_QGROUP_H |
7 | #define BTRFS_QGROUP_H | |
fcebe456 | 8 | |
370a11b8 QW |
9 | #include <linux/spinlock.h> |
10 | #include <linux/rbtree.h> | |
49e5fb46 | 11 | #include <linux/kobject.h> |
3368d001 QW |
12 | #include "ulist.h" |
13 | #include "delayed-ref.h" | |
e0a8b9a7 | 14 | #include "misc.h" |
3368d001 | 15 | |
1d2beaa9 QW |
16 | /* |
17 | * Btrfs qgroup overview | |
18 | * | |
19 | * Btrfs qgroup splits into 3 main part: | |
20 | * 1) Reserve | |
21 | * Reserve metadata/data space for incoming operations | |
22 | * Affect how qgroup limit works | |
23 | * | |
24 | * 2) Trace | |
25 | * Tell btrfs qgroup to trace dirty extents. | |
26 | * | |
27 | * Dirty extents including: | |
28 | * - Newly allocated extents | |
29 | * - Extents going to be deleted (in this trans) | |
30 | * - Extents whose owner is going to be modified | |
31 | * | |
32 | * This is the main part affects whether qgroup numbers will stay | |
33 | * consistent. | |
34 | * Btrfs qgroup can trace clean extents and won't cause any problem, | |
35 | * but it will consume extra CPU time, it should be avoided if possible. | |
36 | * | |
37 | * 3) Account | |
38 | * Btrfs qgroup will updates its numbers, based on dirty extents traced | |
39 | * in previous step. | |
40 | * | |
41 | * Normally at qgroup rescan and transaction commit time. | |
42 | */ | |
43 | ||
370a11b8 QW |
44 | /* |
45 | * Special performance optimization for balance. | |
46 | * | |
47 | * For balance, we need to swap subtree of subvolume and reloc trees. | |
48 | * In theory, we need to trace all subtree blocks of both subvolume and reloc | |
49 | * trees, since their owner has changed during such swap. | |
50 | * | |
51 | * However since balance has ensured that both subtrees are containing the | |
52 | * same contents and have the same tree structures, such swap won't cause | |
53 | * qgroup number change. | |
54 | * | |
55 | * But there is a race window between subtree swap and transaction commit, | |
56 | * during that window, if we increase/decrease tree level or merge/split tree | |
57 | * blocks, we still need to trace the original subtrees. | |
58 | * | |
59 | * So for balance, we use a delayed subtree tracing, whose workflow is: | |
60 | * | |
61 | * 1) Record the subtree root block get swapped. | |
62 | * | |
63 | * During subtree swap: | |
64 | * O = Old tree blocks | |
65 | * N = New tree blocks | |
66 | * reloc tree subvolume tree X | |
67 | * Root Root | |
68 | * / \ / \ | |
69 | * NA OB OA OB | |
70 | * / | | \ / | | \ | |
71 | * NC ND OE OF OC OD OE OF | |
72 | * | |
73 | * In this case, NA and OA are going to be swapped, record (NA, OA) into | |
74 | * subvolume tree X. | |
75 | * | |
76 | * 2) After subtree swap. | |
77 | * reloc tree subvolume tree X | |
78 | * Root Root | |
79 | * / \ / \ | |
80 | * OA OB NA OB | |
81 | * / | | \ / | | \ | |
82 | * OC OD OE OF NC ND OE OF | |
83 | * | |
84 | * 3a) COW happens for OB | |
85 | * If we are going to COW tree block OB, we check OB's bytenr against | |
86 | * tree X's swapped_blocks structure. | |
87 | * If it doesn't fit any, nothing will happen. | |
88 | * | |
89 | * 3b) COW happens for NA | |
90 | * Check NA's bytenr against tree X's swapped_blocks, and get a hit. | |
91 | * Then we do subtree scan on both subtrees OA and NA. | |
92 | * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND). | |
93 | * | |
94 | * Then no matter what we do to subvolume tree X, qgroup numbers will | |
95 | * still be correct. | |
96 | * Then NA's record gets removed from X's swapped_blocks. | |
97 | * | |
98 | * 4) Transaction commit | |
99 | * Any record in X's swapped_blocks gets removed, since there is no | |
100 | * modification to the swapped subtrees, no need to trigger heavy qgroup | |
101 | * subtree rescan for them. | |
102 | */ | |
103 | ||
182940f4 BB |
104 | /* |
105 | * These flags share the flags field of the btrfs_qgroup_status_item with the | |
106 | * persisted flags defined in btrfs_tree.h. | |
107 | * | |
108 | * To minimize the chance of collision with new persisted status flags, these | |
109 | * count backwards from the MSB. | |
110 | */ | |
111 | #define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63) | |
112 | #define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62) | |
e562a8bd | 113 | |
3368d001 QW |
114 | /* |
115 | * Record a dirty extent, and info qgroup to update quota on it | |
116 | * TODO: Use kmem cache to alloc it. | |
117 | */ | |
118 | struct btrfs_qgroup_extent_record { | |
119 | struct rb_node node; | |
120 | u64 bytenr; | |
121 | u64 num_bytes; | |
1418bae1 QW |
122 | |
123 | /* | |
124 | * For qgroup reserved data space freeing. | |
125 | * | |
126 | * @data_rsv_refroot and @data_rsv will be recorded after | |
127 | * BTRFS_ADD_DELAYED_EXTENT is called. | |
128 | * And will be used to free reserved qgroup space at | |
129 | * transaction commit time. | |
130 | */ | |
131 | u32 data_rsv; /* reserved data space needs to be freed */ | |
132 | u64 data_rsv_refroot; /* which root the reserved data belongs to */ | |
3368d001 QW |
133 | struct ulist *old_roots; |
134 | }; | |
135 | ||
370a11b8 QW |
136 | struct btrfs_qgroup_swapped_block { |
137 | struct rb_node node; | |
138 | ||
139 | int level; | |
140 | bool trace_leaf; | |
141 | ||
142 | /* bytenr/generation of the tree block in subvolume tree after swap */ | |
143 | u64 subvol_bytenr; | |
144 | u64 subvol_generation; | |
145 | ||
146 | /* bytenr/generation of the tree block in reloc tree after swap */ | |
147 | u64 reloc_bytenr; | |
148 | u64 reloc_generation; | |
149 | ||
150 | u64 last_snapshot; | |
151 | struct btrfs_key first_key; | |
152 | }; | |
153 | ||
733e03a0 QW |
154 | /* |
155 | * Qgroup reservation types: | |
156 | * | |
157 | * DATA: | |
158 | * space reserved for data | |
159 | * | |
160 | * META_PERTRANS: | |
161 | * Space reserved for metadata (per-transaction) | |
162 | * Due to the fact that qgroup data is only updated at transaction commit | |
163 | * time, reserved space for metadata must be kept until transaction | |
164 | * commits. | |
165 | * Any metadata reserved that are used in btrfs_start_transaction() should | |
166 | * be of this type. | |
167 | * | |
168 | * META_PREALLOC: | |
169 | * There are cases where metadata space is reserved before starting | |
170 | * transaction, and then btrfs_join_transaction() to get a trans handle. | |
171 | * Any metadata reserved for such usage should be of this type. | |
172 | * And after join_transaction() part (or all) of such reservation should | |
173 | * be converted into META_PERTRANS. | |
174 | */ | |
d4e5c920 | 175 | enum btrfs_qgroup_rsv_type { |
bbe339cc | 176 | BTRFS_QGROUP_RSV_DATA, |
733e03a0 QW |
177 | BTRFS_QGROUP_RSV_META_PERTRANS, |
178 | BTRFS_QGROUP_RSV_META_PREALLOC, | |
d4e5c920 QW |
179 | BTRFS_QGROUP_RSV_LAST, |
180 | }; | |
181 | ||
182 | /* | |
183 | * Represents how many bytes we have reserved for this qgroup. | |
184 | * | |
185 | * Each type should have different reservation behavior. | |
186 | * E.g, data follows its io_tree flag modification, while | |
52042d8e | 187 | * *currently* meta is just reserve-and-clear during transaction. |
d4e5c920 QW |
188 | * |
189 | * TODO: Add new type for reservation which can survive transaction commit. | |
52042d8e | 190 | * Current metadata reservation behavior is not suitable for such case. |
d4e5c920 QW |
191 | */ |
192 | struct btrfs_qgroup_rsv { | |
193 | u64 values[BTRFS_QGROUP_RSV_LAST]; | |
194 | }; | |
195 | ||
3159fe7b QW |
196 | /* |
197 | * one struct for each qgroup, organized in fs_info->qgroup_tree. | |
198 | */ | |
199 | struct btrfs_qgroup { | |
200 | u64 qgroupid; | |
201 | ||
202 | /* | |
203 | * state | |
204 | */ | |
205 | u64 rfer; /* referenced */ | |
206 | u64 rfer_cmpr; /* referenced compressed */ | |
207 | u64 excl; /* exclusive */ | |
208 | u64 excl_cmpr; /* exclusive compressed */ | |
209 | ||
210 | /* | |
211 | * limits | |
212 | */ | |
213 | u64 lim_flags; /* which limits are set */ | |
214 | u64 max_rfer; | |
215 | u64 max_excl; | |
216 | u64 rsv_rfer; | |
217 | u64 rsv_excl; | |
218 | ||
219 | /* | |
220 | * reservation tracking | |
221 | */ | |
d4e5c920 | 222 | struct btrfs_qgroup_rsv rsv; |
3159fe7b QW |
223 | |
224 | /* | |
225 | * lists | |
226 | */ | |
227 | struct list_head groups; /* groups this group is member of */ | |
228 | struct list_head members; /* groups that are members of this group */ | |
229 | struct list_head dirty; /* dirty groups */ | |
686c4a5a QW |
230 | |
231 | /* | |
232 | * For qgroup iteration usage. | |
233 | * | |
234 | * The iteration list should always be empty until qgroup_iterator_add() | |
235 | * is called. And should be reset to empty after the iteration is | |
236 | * finished. | |
237 | */ | |
238 | struct list_head iterator; | |
dce28769 QW |
239 | |
240 | /* | |
241 | * For nested iterator usage. | |
242 | * | |
243 | * Here we support at most one level of nested iterator calls like: | |
244 | * | |
245 | * LIST_HEAD(all_qgroups); | |
246 | * { | |
247 | * LIST_HEAD(local_qgroups); | |
248 | * qgroup_iterator_add(local_qgroups, qg); | |
249 | * qgroup_iterator_nested_add(all_qgroups, qg); | |
250 | * do_some_work(local_qgroups); | |
251 | * qgroup_iterator_clean(local_qgroups); | |
252 | * } | |
253 | * do_some_work(all_qgroups); | |
254 | * qgroup_iterator_nested_clean(all_qgroups); | |
255 | */ | |
256 | struct list_head nested_iterator; | |
3159fe7b QW |
257 | struct rb_node node; /* tree of qgroups */ |
258 | ||
259 | /* | |
260 | * temp variables for accounting operations | |
261 | * Refer to qgroup_shared_accounting() for details. | |
262 | */ | |
263 | u64 old_refcnt; | |
264 | u64 new_refcnt; | |
49e5fb46 QW |
265 | |
266 | /* | |
267 | * Sysfs kobjectid | |
268 | */ | |
269 | struct kobject kobj; | |
3159fe7b QW |
270 | }; |
271 | ||
1e0e9d57 BB |
272 | struct btrfs_squota_delta { |
273 | /* The fstree root this delta counts against. */ | |
274 | u64 root; | |
275 | /* The number of bytes in the extent being counted. */ | |
276 | u64 num_bytes; | |
bd7c1ea3 BB |
277 | /* The generation the extent was created in. */ |
278 | u64 generation; | |
1e0e9d57 BB |
279 | /* Whether we are using or freeing the extent. */ |
280 | bool is_inc; | |
281 | /* Whether the extent is data or metadata. */ | |
282 | bool is_data; | |
283 | }; | |
284 | ||
49e5fb46 QW |
285 | static inline u64 btrfs_qgroup_subvolid(u64 qgroupid) |
286 | { | |
287 | return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1)); | |
288 | } | |
289 | ||
81fb6f77 QW |
290 | /* |
291 | * For qgroup event trace points only | |
292 | */ | |
e0a8b9a7 DS |
293 | enum { |
294 | ENUM_BIT(QGROUP_RESERVE), | |
295 | ENUM_BIT(QGROUP_RELEASE), | |
296 | ENUM_BIT(QGROUP_FREE), | |
297 | }; | |
81fb6f77 | 298 | |
6b0cd63b BB |
299 | enum btrfs_qgroup_mode { |
300 | BTRFS_QGROUP_MODE_DISABLED, | |
301 | BTRFS_QGROUP_MODE_FULL, | |
182940f4 | 302 | BTRFS_QGROUP_MODE_SIMPLE |
6b0cd63b BB |
303 | }; |
304 | ||
305 | enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info); | |
182940f4 BB |
306 | bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info); |
307 | bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info); | |
308 | int btrfs_quota_enable(struct btrfs_fs_info *fs_info, | |
309 | struct btrfs_ioctl_quota_ctl_args *quota_ctl_args); | |
340f1aa2 | 310 | int btrfs_quota_disable(struct btrfs_fs_info *fs_info); |
fcebe456 JB |
311 | int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info); |
312 | void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info); | |
d06f23d6 JM |
313 | int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, |
314 | bool interruptible); | |
5343cd93 | 315 | int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst); |
39616c27 LF |
316 | int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, |
317 | u64 dst); | |
49a05ecd | 318 | int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid); |
3efbee1d | 319 | int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid); |
f0042d5e | 320 | int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid, |
fcebe456 JB |
321 | struct btrfs_qgroup_limit *limit); |
322 | int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info); | |
323 | void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info); | |
324 | struct btrfs_delayed_extent_op; | |
d1b8b94a | 325 | |
50b3e040 | 326 | int btrfs_qgroup_trace_extent_nolock( |
cb93b52c QW |
327 | struct btrfs_fs_info *fs_info, |
328 | struct btrfs_delayed_ref_root *delayed_refs, | |
329 | struct btrfs_qgroup_extent_record *record); | |
8949b9a1 | 330 | int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, |
fb235dc0 | 331 | struct btrfs_qgroup_extent_record *qrecord); |
a95f3aaf | 332 | int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
e2896e79 | 333 | u64 num_bytes); |
33d1f05c | 334 | int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, |
33d1f05c | 335 | struct extent_buffer *eb); |
33d1f05c | 336 | int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, |
33d1f05c QW |
337 | struct extent_buffer *root_eb, |
338 | u64 root_gen, int root_level); | |
8696d760 LF |
339 | int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
340 | u64 num_bytes, struct ulist *old_roots, | |
341 | struct ulist *new_roots); | |
460fb20a | 342 | int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans); |
280f8bd2 | 343 | int btrfs_run_qgroups(struct btrfs_trans_handle *trans); |
a9377422 | 344 | int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid, |
5343cd93 BB |
345 | u64 objectid, u64 inode_rootid, |
346 | struct btrfs_qgroup_inherit *inherit); | |
297d750b | 347 | void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, |
d4e5c920 QW |
348 | u64 ref_root, u64 num_bytes, |
349 | enum btrfs_qgroup_rsv_type type); | |
fcebe456 JB |
350 | |
351 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
352 | int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, | |
353 | u64 rfer, u64 excl); | |
354 | #endif | |
355 | ||
52472553 | 356 | /* New io_tree based accurate qgroup reserve API */ |
7661a3e0 | 357 | int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, |
364ecf36 | 358 | struct extent_changeset **reserved, u64 start, u64 len); |
9e65bfca | 359 | int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released); |
8b8a979f NB |
360 | int btrfs_qgroup_free_data(struct btrfs_inode *inode, |
361 | struct extent_changeset *reserved, u64 start, | |
9e65bfca | 362 | u64 len, u64 *freed); |
80e9baed NB |
363 | int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
364 | enum btrfs_qgroup_rsv_type type, bool enforce); | |
733e03a0 | 365 | int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
d4135134 FM |
366 | enum btrfs_qgroup_rsv_type type, bool enforce, |
367 | bool noflush); | |
733e03a0 QW |
368 | /* Reserve metadata space for pertrans and prealloc type */ |
369 | static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root, | |
370 | int num_bytes, bool enforce) | |
371 | { | |
372 | return __btrfs_qgroup_reserve_meta(root, num_bytes, | |
d4135134 FM |
373 | BTRFS_QGROUP_RSV_META_PERTRANS, |
374 | enforce, false); | |
733e03a0 QW |
375 | } |
376 | static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root, | |
d4135134 FM |
377 | int num_bytes, bool enforce, |
378 | bool noflush) | |
733e03a0 QW |
379 | { |
380 | return __btrfs_qgroup_reserve_meta(root, num_bytes, | |
d4135134 FM |
381 | BTRFS_QGROUP_RSV_META_PREALLOC, |
382 | enforce, noflush); | |
733e03a0 QW |
383 | } |
384 | ||
385 | void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, | |
386 | enum btrfs_qgroup_rsv_type type); | |
387 | ||
388 | /* Free per-transaction meta reservation for error handling */ | |
389 | static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root, | |
390 | int num_bytes) | |
391 | { | |
392 | __btrfs_qgroup_free_meta(root, num_bytes, | |
393 | BTRFS_QGROUP_RSV_META_PERTRANS); | |
394 | } | |
395 | ||
396 | /* Pre-allocated meta reservation can be freed at need */ | |
397 | static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root, | |
398 | int num_bytes) | |
399 | { | |
400 | __btrfs_qgroup_free_meta(root, num_bytes, | |
401 | BTRFS_QGROUP_RSV_META_PREALLOC); | |
402 | } | |
403 | ||
733e03a0 | 404 | void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root); |
64cfaef6 | 405 | void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes); |
cfdd4592 | 406 | void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode); |
9888c340 | 407 | |
370a11b8 QW |
408 | /* btrfs_qgroup_swapped_blocks related functions */ |
409 | void btrfs_qgroup_init_swapped_blocks( | |
410 | struct btrfs_qgroup_swapped_blocks *swapped_blocks); | |
411 | ||
412 | void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root); | |
413 | int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, | |
414 | struct btrfs_root *subvol_root, | |
32da5386 | 415 | struct btrfs_block_group *bg, |
370a11b8 QW |
416 | struct extent_buffer *subvol_parent, int subvol_slot, |
417 | struct extent_buffer *reloc_parent, int reloc_slot, | |
418 | u64 last_snapshot); | |
f616f5cd QW |
419 | int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, |
420 | struct btrfs_root *root, struct extent_buffer *eb); | |
81f7eb00 | 421 | void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans); |
5958253c | 422 | bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info); |
e85a0ada | 423 | void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes); |
1e0e9d57 BB |
424 | int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info, |
425 | struct btrfs_squota_delta *delta); | |
370a11b8 | 426 | |
9888c340 | 427 | #endif |