]>
Commit | Line | Data |
---|---|---|
ccd979bd MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * alloc.c | |
5 | * | |
6 | * Extent allocs and frees | |
7 | * | |
8 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | */ | |
25 | ||
26 | #include <linux/fs.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/highmem.h> | |
60b11392 | 30 | #include <linux/swap.h> |
a90714c1 | 31 | #include <linux/quotaops.h> |
ccd979bd MF |
32 | |
33 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | |
34 | #include <cluster/masklog.h> | |
35 | ||
36 | #include "ocfs2.h" | |
37 | ||
38 | #include "alloc.h" | |
60b11392 | 39 | #include "aops.h" |
d6b32bbb | 40 | #include "blockcheck.h" |
ccd979bd MF |
41 | #include "dlmglue.h" |
42 | #include "extent_map.h" | |
43 | #include "inode.h" | |
44 | #include "journal.h" | |
45 | #include "localalloc.h" | |
46 | #include "suballoc.h" | |
47 | #include "sysfile.h" | |
48 | #include "file.h" | |
49 | #include "super.h" | |
50 | #include "uptodate.h" | |
2a50a743 | 51 | #include "xattr.h" |
ccd979bd MF |
52 | |
53 | #include "buffer_head_io.h" | |
54 | ||
e7d4cb6b | 55 | |
1625f8ac JB |
56 | /* |
57 | * Operations for a specific extent tree type. | |
58 | * | |
59 | * To implement an on-disk btree (extent tree) type in ocfs2, add | |
60 | * an ocfs2_extent_tree_operations structure and the matching | |
8d6220d6 | 61 | * ocfs2_init_<thingy>_extent_tree() function. That's pretty much it |
1625f8ac JB |
62 | * for the allocation portion of the extent tree. |
63 | */ | |
e7d4cb6b | 64 | struct ocfs2_extent_tree_operations { |
1625f8ac JB |
65 | /* |
66 | * last_eb_blk is the block number of the right most leaf extent | |
67 | * block. Most on-disk structures containing an extent tree store | |
68 | * this value for fast access. The ->eo_set_last_eb_blk() and | |
69 | * ->eo_get_last_eb_blk() operations access this value. They are | |
70 | * both required. | |
71 | */ | |
35dc0aa3 JB |
72 | void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et, |
73 | u64 blkno); | |
74 | u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et); | |
1625f8ac JB |
75 | |
76 | /* | |
77 | * The on-disk structure usually keeps track of how many total | |
78 | * clusters are stored in this extent tree. This function updates | |
79 | * that value. new_clusters is the delta, and must be | |
80 | * added to the total. Required. | |
81 | */ | |
35dc0aa3 JB |
82 | void (*eo_update_clusters)(struct inode *inode, |
83 | struct ocfs2_extent_tree *et, | |
84 | u32 new_clusters); | |
1625f8ac JB |
85 | |
86 | /* | |
87 | * If ->eo_insert_check() exists, it is called before rec is | |
88 | * inserted into the extent tree. It is optional. | |
89 | */ | |
1e61ee79 JB |
90 | int (*eo_insert_check)(struct inode *inode, |
91 | struct ocfs2_extent_tree *et, | |
92 | struct ocfs2_extent_rec *rec); | |
35dc0aa3 | 93 | int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et); |
0ce1010f | 94 | |
1625f8ac JB |
95 | /* |
96 | * -------------------------------------------------------------- | |
97 | * The remaining are internal to ocfs2_extent_tree and don't have | |
98 | * accessor functions | |
99 | */ | |
100 | ||
101 | /* | |
102 | * ->eo_fill_root_el() takes et->et_object and sets et->et_root_el. | |
103 | * It is required. | |
104 | */ | |
0ce1010f | 105 | void (*eo_fill_root_el)(struct ocfs2_extent_tree *et); |
1625f8ac JB |
106 | |
107 | /* | |
108 | * ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if | |
109 | * it exists. If it does not, et->et_max_leaf_clusters is set | |
110 | * to 0 (unlimited). Optional. | |
111 | */ | |
943cced3 JB |
112 | void (*eo_fill_max_leaf_clusters)(struct inode *inode, |
113 | struct ocfs2_extent_tree *et); | |
e7d4cb6b TM |
114 | }; |
115 | ||
e7d4cb6b | 116 | |
f99b9b7c JB |
117 | /* |
118 | * Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check | |
119 | * in the methods. | |
120 | */ | |
121 | static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et); | |
122 | static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, | |
123 | u64 blkno); | |
124 | static void ocfs2_dinode_update_clusters(struct inode *inode, | |
125 | struct ocfs2_extent_tree *et, | |
126 | u32 clusters); | |
127 | static int ocfs2_dinode_insert_check(struct inode *inode, | |
128 | struct ocfs2_extent_tree *et, | |
129 | struct ocfs2_extent_rec *rec); | |
130 | static int ocfs2_dinode_sanity_check(struct inode *inode, | |
131 | struct ocfs2_extent_tree *et); | |
132 | static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et); | |
133 | static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = { | |
134 | .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk, | |
135 | .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk, | |
136 | .eo_update_clusters = ocfs2_dinode_update_clusters, | |
137 | .eo_insert_check = ocfs2_dinode_insert_check, | |
138 | .eo_sanity_check = ocfs2_dinode_sanity_check, | |
139 | .eo_fill_root_el = ocfs2_dinode_fill_root_el, | |
140 | }; | |
0ce1010f | 141 | |
e7d4cb6b TM |
142 | static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, |
143 | u64 blkno) | |
144 | { | |
ea5efa15 | 145 | struct ocfs2_dinode *di = et->et_object; |
e7d4cb6b | 146 | |
f99b9b7c | 147 | BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); |
e7d4cb6b TM |
148 | di->i_last_eb_blk = cpu_to_le64(blkno); |
149 | } | |
150 | ||
151 | static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et) | |
152 | { | |
ea5efa15 | 153 | struct ocfs2_dinode *di = et->et_object; |
e7d4cb6b | 154 | |
f99b9b7c | 155 | BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); |
e7d4cb6b TM |
156 | return le64_to_cpu(di->i_last_eb_blk); |
157 | } | |
158 | ||
159 | static void ocfs2_dinode_update_clusters(struct inode *inode, | |
160 | struct ocfs2_extent_tree *et, | |
161 | u32 clusters) | |
162 | { | |
ea5efa15 | 163 | struct ocfs2_dinode *di = et->et_object; |
e7d4cb6b TM |
164 | |
165 | le32_add_cpu(&di->i_clusters, clusters); | |
166 | spin_lock(&OCFS2_I(inode)->ip_lock); | |
167 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters); | |
168 | spin_unlock(&OCFS2_I(inode)->ip_lock); | |
169 | } | |
170 | ||
1e61ee79 JB |
171 | static int ocfs2_dinode_insert_check(struct inode *inode, |
172 | struct ocfs2_extent_tree *et, | |
173 | struct ocfs2_extent_rec *rec) | |
174 | { | |
175 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
176 | ||
177 | BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL); | |
178 | mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && | |
74e77eb3 TM |
179 | (OCFS2_I(inode)->ip_clusters != |
180 | le32_to_cpu(rec->e_cpos)), | |
1e61ee79 JB |
181 | "Device %s, asking for sparse allocation: inode %llu, " |
182 | "cpos %u, clusters %u\n", | |
183 | osb->dev_str, | |
184 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
185 | rec->e_cpos, | |
186 | OCFS2_I(inode)->ip_clusters); | |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
e7d4cb6b TM |
191 | static int ocfs2_dinode_sanity_check(struct inode *inode, |
192 | struct ocfs2_extent_tree *et) | |
193 | { | |
10995aa2 | 194 | struct ocfs2_dinode *di = et->et_object; |
e7d4cb6b | 195 | |
f99b9b7c | 196 | BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); |
10995aa2 | 197 | BUG_ON(!OCFS2_IS_VALID_DINODE(di)); |
e7d4cb6b | 198 | |
10995aa2 | 199 | return 0; |
e7d4cb6b TM |
200 | } |
201 | ||
f99b9b7c JB |
202 | static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) |
203 | { | |
204 | struct ocfs2_dinode *di = et->et_object; | |
205 | ||
206 | et->et_root_el = &di->id2.i_list; | |
207 | } | |
208 | ||
e7d4cb6b | 209 | |
0ce1010f JB |
210 | static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et) |
211 | { | |
2a50a743 | 212 | struct ocfs2_xattr_value_buf *vb = et->et_object; |
0ce1010f | 213 | |
2a50a743 | 214 | et->et_root_el = &vb->vb_xv->xr_list; |
0ce1010f JB |
215 | } |
216 | ||
f56654c4 TM |
217 | static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et, |
218 | u64 blkno) | |
219 | { | |
2a50a743 | 220 | struct ocfs2_xattr_value_buf *vb = et->et_object; |
f56654c4 | 221 | |
2a50a743 | 222 | vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno); |
f56654c4 TM |
223 | } |
224 | ||
225 | static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) | |
226 | { | |
2a50a743 | 227 | struct ocfs2_xattr_value_buf *vb = et->et_object; |
f56654c4 | 228 | |
2a50a743 | 229 | return le64_to_cpu(vb->vb_xv->xr_last_eb_blk); |
f56654c4 TM |
230 | } |
231 | ||
232 | static void ocfs2_xattr_value_update_clusters(struct inode *inode, | |
233 | struct ocfs2_extent_tree *et, | |
234 | u32 clusters) | |
235 | { | |
2a50a743 | 236 | struct ocfs2_xattr_value_buf *vb = et->et_object; |
f56654c4 | 237 | |
2a50a743 | 238 | le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); |
f56654c4 TM |
239 | } |
240 | ||
1a09f556 | 241 | static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = { |
35dc0aa3 JB |
242 | .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk, |
243 | .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk, | |
244 | .eo_update_clusters = ocfs2_xattr_value_update_clusters, | |
0ce1010f | 245 | .eo_fill_root_el = ocfs2_xattr_value_fill_root_el, |
f56654c4 TM |
246 | }; |
247 | ||
0ce1010f JB |
248 | static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et) |
249 | { | |
250 | struct ocfs2_xattr_block *xb = et->et_object; | |
251 | ||
252 | et->et_root_el = &xb->xb_attrs.xb_root.xt_list; | |
253 | } | |
254 | ||
943cced3 JB |
255 | static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct inode *inode, |
256 | struct ocfs2_extent_tree *et) | |
257 | { | |
258 | et->et_max_leaf_clusters = | |
259 | ocfs2_clusters_for_bytes(inode->i_sb, | |
260 | OCFS2_MAX_XATTR_TREE_LEAF_SIZE); | |
261 | } | |
262 | ||
ba492615 TM |
263 | static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, |
264 | u64 blkno) | |
265 | { | |
ea5efa15 | 266 | struct ocfs2_xattr_block *xb = et->et_object; |
ba492615 TM |
267 | struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; |
268 | ||
269 | xt->xt_last_eb_blk = cpu_to_le64(blkno); | |
270 | } | |
271 | ||
272 | static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) | |
273 | { | |
ea5efa15 | 274 | struct ocfs2_xattr_block *xb = et->et_object; |
ba492615 TM |
275 | struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; |
276 | ||
277 | return le64_to_cpu(xt->xt_last_eb_blk); | |
278 | } | |
279 | ||
280 | static void ocfs2_xattr_tree_update_clusters(struct inode *inode, | |
281 | struct ocfs2_extent_tree *et, | |
282 | u32 clusters) | |
283 | { | |
ea5efa15 | 284 | struct ocfs2_xattr_block *xb = et->et_object; |
ba492615 TM |
285 | |
286 | le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters); | |
287 | } | |
288 | ||
ba492615 | 289 | static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = { |
35dc0aa3 JB |
290 | .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk, |
291 | .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk, | |
292 | .eo_update_clusters = ocfs2_xattr_tree_update_clusters, | |
0ce1010f | 293 | .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el, |
943cced3 | 294 | .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters, |
ba492615 TM |
295 | }; |
296 | ||
9b7895ef MF |
297 | static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et, |
298 | u64 blkno) | |
299 | { | |
300 | struct ocfs2_dx_root_block *dx_root = et->et_object; | |
301 | ||
302 | dx_root->dr_last_eb_blk = cpu_to_le64(blkno); | |
303 | } | |
304 | ||
305 | static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et) | |
306 | { | |
307 | struct ocfs2_dx_root_block *dx_root = et->et_object; | |
308 | ||
309 | return le64_to_cpu(dx_root->dr_last_eb_blk); | |
310 | } | |
311 | ||
312 | static void ocfs2_dx_root_update_clusters(struct inode *inode, | |
313 | struct ocfs2_extent_tree *et, | |
314 | u32 clusters) | |
315 | { | |
316 | struct ocfs2_dx_root_block *dx_root = et->et_object; | |
317 | ||
318 | le32_add_cpu(&dx_root->dr_clusters, clusters); | |
319 | } | |
320 | ||
321 | static int ocfs2_dx_root_sanity_check(struct inode *inode, | |
322 | struct ocfs2_extent_tree *et) | |
323 | { | |
324 | struct ocfs2_dx_root_block *dx_root = et->et_object; | |
325 | ||
326 | BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root)); | |
327 | ||
328 | return 0; | |
329 | } | |
330 | ||
331 | static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et) | |
332 | { | |
333 | struct ocfs2_dx_root_block *dx_root = et->et_object; | |
334 | ||
335 | et->et_root_el = &dx_root->dr_list; | |
336 | } | |
337 | ||
338 | static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = { | |
339 | .eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk, | |
340 | .eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk, | |
341 | .eo_update_clusters = ocfs2_dx_root_update_clusters, | |
342 | .eo_sanity_check = ocfs2_dx_root_sanity_check, | |
343 | .eo_fill_root_el = ocfs2_dx_root_fill_root_el, | |
344 | }; | |
345 | ||
8d6220d6 JB |
346 | static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, |
347 | struct inode *inode, | |
348 | struct buffer_head *bh, | |
13723d00 | 349 | ocfs2_journal_access_func access, |
8d6220d6 JB |
350 | void *obj, |
351 | struct ocfs2_extent_tree_operations *ops) | |
e7d4cb6b | 352 | { |
1a09f556 | 353 | et->et_ops = ops; |
ce1d9ea6 | 354 | et->et_root_bh = bh; |
13723d00 | 355 | et->et_root_journal_access = access; |
ea5efa15 JB |
356 | if (!obj) |
357 | obj = (void *)bh->b_data; | |
358 | et->et_object = obj; | |
e7d4cb6b | 359 | |
0ce1010f | 360 | et->et_ops->eo_fill_root_el(et); |
943cced3 JB |
361 | if (!et->et_ops->eo_fill_max_leaf_clusters) |
362 | et->et_max_leaf_clusters = 0; | |
363 | else | |
364 | et->et_ops->eo_fill_max_leaf_clusters(inode, et); | |
e7d4cb6b TM |
365 | } |
366 | ||
8d6220d6 JB |
367 | void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, |
368 | struct inode *inode, | |
369 | struct buffer_head *bh) | |
1a09f556 | 370 | { |
13723d00 JB |
371 | __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_di, |
372 | NULL, &ocfs2_dinode_et_ops); | |
1a09f556 JB |
373 | } |
374 | ||
8d6220d6 | 375 | void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, |
f99b9b7c | 376 | struct inode *inode, |
8d6220d6 | 377 | struct buffer_head *bh) |
1a09f556 | 378 | { |
13723d00 JB |
379 | __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_xb, |
380 | NULL, &ocfs2_xattr_tree_et_ops); | |
1a09f556 JB |
381 | } |
382 | ||
8d6220d6 JB |
383 | void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, |
384 | struct inode *inode, | |
2a50a743 | 385 | struct ocfs2_xattr_value_buf *vb) |
e7d4cb6b | 386 | { |
2a50a743 | 387 | __ocfs2_init_extent_tree(et, inode, vb->vb_bh, vb->vb_access, vb, |
8d6220d6 | 388 | &ocfs2_xattr_value_et_ops); |
e7d4cb6b TM |
389 | } |
390 | ||
9b7895ef MF |
391 | void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et, |
392 | struct inode *inode, | |
393 | struct buffer_head *bh) | |
394 | { | |
395 | __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_dr, | |
396 | NULL, &ocfs2_dx_root_et_ops); | |
397 | } | |
398 | ||
35dc0aa3 JB |
399 | static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et, |
400 | u64 new_last_eb_blk) | |
e7d4cb6b | 401 | { |
ce1d9ea6 | 402 | et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk); |
e7d4cb6b TM |
403 | } |
404 | ||
35dc0aa3 | 405 | static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et) |
e7d4cb6b | 406 | { |
ce1d9ea6 | 407 | return et->et_ops->eo_get_last_eb_blk(et); |
e7d4cb6b TM |
408 | } |
409 | ||
35dc0aa3 JB |
410 | static inline void ocfs2_et_update_clusters(struct inode *inode, |
411 | struct ocfs2_extent_tree *et, | |
412 | u32 clusters) | |
413 | { | |
ce1d9ea6 | 414 | et->et_ops->eo_update_clusters(inode, et, clusters); |
35dc0aa3 JB |
415 | } |
416 | ||
13723d00 JB |
417 | static inline int ocfs2_et_root_journal_access(handle_t *handle, |
418 | struct inode *inode, | |
419 | struct ocfs2_extent_tree *et, | |
420 | int type) | |
421 | { | |
422 | return et->et_root_journal_access(handle, inode, et->et_root_bh, | |
423 | type); | |
424 | } | |
425 | ||
1e61ee79 JB |
426 | static inline int ocfs2_et_insert_check(struct inode *inode, |
427 | struct ocfs2_extent_tree *et, | |
428 | struct ocfs2_extent_rec *rec) | |
429 | { | |
430 | int ret = 0; | |
431 | ||
432 | if (et->et_ops->eo_insert_check) | |
433 | ret = et->et_ops->eo_insert_check(inode, et, rec); | |
434 | return ret; | |
435 | } | |
436 | ||
35dc0aa3 JB |
437 | static inline int ocfs2_et_sanity_check(struct inode *inode, |
438 | struct ocfs2_extent_tree *et) | |
e7d4cb6b | 439 | { |
1e61ee79 JB |
440 | int ret = 0; |
441 | ||
442 | if (et->et_ops->eo_sanity_check) | |
443 | ret = et->et_ops->eo_sanity_check(inode, et); | |
444 | return ret; | |
e7d4cb6b TM |
445 | } |
446 | ||
dcd0538f | 447 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); |
59a5e416 MF |
448 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, |
449 | struct ocfs2_extent_block *eb); | |
ccd979bd | 450 | |
dcd0538f MF |
451 | /* |
452 | * Structures which describe a path through a btree, and functions to | |
453 | * manipulate them. | |
454 | * | |
455 | * The idea here is to be as generic as possible with the tree | |
456 | * manipulation code. | |
457 | */ | |
458 | struct ocfs2_path_item { | |
459 | struct buffer_head *bh; | |
460 | struct ocfs2_extent_list *el; | |
461 | }; | |
ccd979bd | 462 | |
dcd0538f | 463 | #define OCFS2_MAX_PATH_DEPTH 5 |
ccd979bd | 464 | |
dcd0538f | 465 | struct ocfs2_path { |
13723d00 JB |
466 | int p_tree_depth; |
467 | ocfs2_journal_access_func p_root_access; | |
468 | struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; | |
dcd0538f | 469 | }; |
ccd979bd | 470 | |
dcd0538f MF |
471 | #define path_root_bh(_path) ((_path)->p_node[0].bh) |
472 | #define path_root_el(_path) ((_path)->p_node[0].el) | |
13723d00 | 473 | #define path_root_access(_path)((_path)->p_root_access) |
dcd0538f MF |
474 | #define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) |
475 | #define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) | |
476 | #define path_num_items(_path) ((_path)->p_tree_depth + 1) | |
ccd979bd | 477 | |
6b791bcc TM |
478 | static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, |
479 | u32 cpos); | |
480 | static void ocfs2_adjust_rightmost_records(struct inode *inode, | |
481 | handle_t *handle, | |
482 | struct ocfs2_path *path, | |
483 | struct ocfs2_extent_rec *insert_rec); | |
dcd0538f MF |
484 | /* |
485 | * Reset the actual path elements so that we can re-use the structure | |
486 | * to build another path. Generally, this involves freeing the buffer | |
487 | * heads. | |
488 | */ | |
489 | static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) | |
490 | { | |
491 | int i, start = 0, depth = 0; | |
492 | struct ocfs2_path_item *node; | |
ccd979bd | 493 | |
dcd0538f MF |
494 | if (keep_root) |
495 | start = 1; | |
ccd979bd | 496 | |
dcd0538f MF |
497 | for(i = start; i < path_num_items(path); i++) { |
498 | node = &path->p_node[i]; | |
499 | ||
500 | brelse(node->bh); | |
501 | node->bh = NULL; | |
502 | node->el = NULL; | |
503 | } | |
504 | ||
505 | /* | |
506 | * Tree depth may change during truncate, or insert. If we're | |
507 | * keeping the root extent list, then make sure that our path | |
508 | * structure reflects the proper depth. | |
509 | */ | |
510 | if (keep_root) | |
511 | depth = le16_to_cpu(path_root_el(path)->l_tree_depth); | |
13723d00 JB |
512 | else |
513 | path_root_access(path) = NULL; | |
dcd0538f MF |
514 | |
515 | path->p_tree_depth = depth; | |
516 | } | |
517 | ||
518 | static void ocfs2_free_path(struct ocfs2_path *path) | |
519 | { | |
520 | if (path) { | |
521 | ocfs2_reinit_path(path, 0); | |
522 | kfree(path); | |
523 | } | |
524 | } | |
525 | ||
328d5752 MF |
526 | /* |
527 | * All the elements of src into dest. After this call, src could be freed | |
528 | * without affecting dest. | |
529 | * | |
530 | * Both paths should have the same root. Any non-root elements of dest | |
531 | * will be freed. | |
532 | */ | |
533 | static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) | |
534 | { | |
535 | int i; | |
536 | ||
537 | BUG_ON(path_root_bh(dest) != path_root_bh(src)); | |
538 | BUG_ON(path_root_el(dest) != path_root_el(src)); | |
13723d00 | 539 | BUG_ON(path_root_access(dest) != path_root_access(src)); |
328d5752 MF |
540 | |
541 | ocfs2_reinit_path(dest, 1); | |
542 | ||
543 | for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { | |
544 | dest->p_node[i].bh = src->p_node[i].bh; | |
545 | dest->p_node[i].el = src->p_node[i].el; | |
546 | ||
547 | if (dest->p_node[i].bh) | |
548 | get_bh(dest->p_node[i].bh); | |
549 | } | |
550 | } | |
551 | ||
dcd0538f MF |
552 | /* |
553 | * Make the *dest path the same as src and re-initialize src path to | |
554 | * have a root only. | |
555 | */ | |
556 | static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) | |
557 | { | |
558 | int i; | |
559 | ||
560 | BUG_ON(path_root_bh(dest) != path_root_bh(src)); | |
13723d00 | 561 | BUG_ON(path_root_access(dest) != path_root_access(src)); |
dcd0538f MF |
562 | |
563 | for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { | |
564 | brelse(dest->p_node[i].bh); | |
565 | ||
566 | dest->p_node[i].bh = src->p_node[i].bh; | |
567 | dest->p_node[i].el = src->p_node[i].el; | |
568 | ||
569 | src->p_node[i].bh = NULL; | |
570 | src->p_node[i].el = NULL; | |
571 | } | |
572 | } | |
573 | ||
574 | /* | |
575 | * Insert an extent block at given index. | |
576 | * | |
577 | * This will not take an additional reference on eb_bh. | |
578 | */ | |
579 | static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, | |
580 | struct buffer_head *eb_bh) | |
581 | { | |
582 | struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; | |
583 | ||
584 | /* | |
585 | * Right now, no root bh is an extent block, so this helps | |
586 | * catch code errors with dinode trees. The assertion can be | |
587 | * safely removed if we ever need to insert extent block | |
588 | * structures at the root. | |
589 | */ | |
590 | BUG_ON(index == 0); | |
591 | ||
592 | path->p_node[index].bh = eb_bh; | |
593 | path->p_node[index].el = &eb->h_list; | |
594 | } | |
595 | ||
596 | static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, | |
13723d00 JB |
597 | struct ocfs2_extent_list *root_el, |
598 | ocfs2_journal_access_func access) | |
dcd0538f MF |
599 | { |
600 | struct ocfs2_path *path; | |
ccd979bd | 601 | |
dcd0538f MF |
602 | BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH); |
603 | ||
604 | path = kzalloc(sizeof(*path), GFP_NOFS); | |
605 | if (path) { | |
606 | path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); | |
607 | get_bh(root_bh); | |
608 | path_root_bh(path) = root_bh; | |
609 | path_root_el(path) = root_el; | |
13723d00 | 610 | path_root_access(path) = access; |
dcd0538f MF |
611 | } |
612 | ||
613 | return path; | |
614 | } | |
615 | ||
ffdd7a54 JB |
616 | static struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) |
617 | { | |
13723d00 JB |
618 | return ocfs2_new_path(path_root_bh(path), path_root_el(path), |
619 | path_root_access(path)); | |
ffdd7a54 JB |
620 | } |
621 | ||
622 | static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) | |
623 | { | |
13723d00 JB |
624 | return ocfs2_new_path(et->et_root_bh, et->et_root_el, |
625 | et->et_root_journal_access); | |
626 | } | |
627 | ||
628 | /* | |
629 | * Journal the buffer at depth idx. All idx>0 are extent_blocks, | |
630 | * otherwise it's the root_access function. | |
631 | * | |
632 | * I don't like the way this function's name looks next to | |
633 | * ocfs2_journal_access_path(), but I don't have a better one. | |
634 | */ | |
635 | static int ocfs2_path_bh_journal_access(handle_t *handle, | |
636 | struct inode *inode, | |
637 | struct ocfs2_path *path, | |
638 | int idx) | |
639 | { | |
640 | ocfs2_journal_access_func access = path_root_access(path); | |
641 | ||
642 | if (!access) | |
643 | access = ocfs2_journal_access; | |
644 | ||
645 | if (idx) | |
646 | access = ocfs2_journal_access_eb; | |
647 | ||
648 | return access(handle, inode, path->p_node[idx].bh, | |
649 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ffdd7a54 JB |
650 | } |
651 | ||
dcd0538f MF |
652 | /* |
653 | * Convenience function to journal all components in a path. | |
654 | */ | |
655 | static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, | |
656 | struct ocfs2_path *path) | |
657 | { | |
658 | int i, ret = 0; | |
659 | ||
660 | if (!path) | |
661 | goto out; | |
662 | ||
663 | for(i = 0; i < path_num_items(path); i++) { | |
13723d00 | 664 | ret = ocfs2_path_bh_journal_access(handle, inode, path, i); |
dcd0538f MF |
665 | if (ret < 0) { |
666 | mlog_errno(ret); | |
667 | goto out; | |
668 | } | |
669 | } | |
670 | ||
671 | out: | |
672 | return ret; | |
673 | } | |
674 | ||
328d5752 MF |
675 | /* |
676 | * Return the index of the extent record which contains cluster #v_cluster. | |
677 | * -1 is returned if it was not found. | |
678 | * | |
679 | * Should work fine on interior and exterior nodes. | |
680 | */ | |
681 | int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) | |
682 | { | |
683 | int ret = -1; | |
684 | int i; | |
685 | struct ocfs2_extent_rec *rec; | |
686 | u32 rec_end, rec_start, clusters; | |
687 | ||
688 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { | |
689 | rec = &el->l_recs[i]; | |
690 | ||
691 | rec_start = le32_to_cpu(rec->e_cpos); | |
692 | clusters = ocfs2_rec_clusters(el, rec); | |
693 | ||
694 | rec_end = rec_start + clusters; | |
695 | ||
696 | if (v_cluster >= rec_start && v_cluster < rec_end) { | |
697 | ret = i; | |
698 | break; | |
699 | } | |
700 | } | |
701 | ||
702 | return ret; | |
703 | } | |
704 | ||
dcd0538f MF |
705 | enum ocfs2_contig_type { |
706 | CONTIG_NONE = 0, | |
707 | CONTIG_LEFT, | |
328d5752 MF |
708 | CONTIG_RIGHT, |
709 | CONTIG_LEFTRIGHT, | |
dcd0538f MF |
710 | }; |
711 | ||
e48edee2 MF |
712 | |
713 | /* | |
714 | * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and | |
715 | * ocfs2_extent_contig only work properly against leaf nodes! | |
716 | */ | |
dcd0538f MF |
717 | static int ocfs2_block_extent_contig(struct super_block *sb, |
718 | struct ocfs2_extent_rec *ext, | |
719 | u64 blkno) | |
ccd979bd | 720 | { |
e48edee2 MF |
721 | u64 blk_end = le64_to_cpu(ext->e_blkno); |
722 | ||
723 | blk_end += ocfs2_clusters_to_blocks(sb, | |
724 | le16_to_cpu(ext->e_leaf_clusters)); | |
725 | ||
726 | return blkno == blk_end; | |
ccd979bd MF |
727 | } |
728 | ||
dcd0538f MF |
729 | static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, |
730 | struct ocfs2_extent_rec *right) | |
731 | { | |
e48edee2 MF |
732 | u32 left_range; |
733 | ||
734 | left_range = le32_to_cpu(left->e_cpos) + | |
735 | le16_to_cpu(left->e_leaf_clusters); | |
736 | ||
737 | return (left_range == le32_to_cpu(right->e_cpos)); | |
dcd0538f MF |
738 | } |
739 | ||
740 | static enum ocfs2_contig_type | |
741 | ocfs2_extent_contig(struct inode *inode, | |
742 | struct ocfs2_extent_rec *ext, | |
743 | struct ocfs2_extent_rec *insert_rec) | |
744 | { | |
745 | u64 blkno = le64_to_cpu(insert_rec->e_blkno); | |
746 | ||
328d5752 MF |
747 | /* |
748 | * Refuse to coalesce extent records with different flag | |
749 | * fields - we don't want to mix unwritten extents with user | |
750 | * data. | |
751 | */ | |
752 | if (ext->e_flags != insert_rec->e_flags) | |
753 | return CONTIG_NONE; | |
754 | ||
dcd0538f MF |
755 | if (ocfs2_extents_adjacent(ext, insert_rec) && |
756 | ocfs2_block_extent_contig(inode->i_sb, ext, blkno)) | |
757 | return CONTIG_RIGHT; | |
758 | ||
759 | blkno = le64_to_cpu(ext->e_blkno); | |
760 | if (ocfs2_extents_adjacent(insert_rec, ext) && | |
761 | ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno)) | |
762 | return CONTIG_LEFT; | |
763 | ||
764 | return CONTIG_NONE; | |
765 | } | |
766 | ||
767 | /* | |
768 | * NOTE: We can have pretty much any combination of contiguousness and | |
769 | * appending. | |
770 | * | |
771 | * The usefulness of APPEND_TAIL is more in that it lets us know that | |
772 | * we'll have to update the path to that leaf. | |
773 | */ | |
774 | enum ocfs2_append_type { | |
775 | APPEND_NONE = 0, | |
776 | APPEND_TAIL, | |
777 | }; | |
778 | ||
328d5752 MF |
779 | enum ocfs2_split_type { |
780 | SPLIT_NONE = 0, | |
781 | SPLIT_LEFT, | |
782 | SPLIT_RIGHT, | |
783 | }; | |
784 | ||
dcd0538f | 785 | struct ocfs2_insert_type { |
328d5752 | 786 | enum ocfs2_split_type ins_split; |
dcd0538f MF |
787 | enum ocfs2_append_type ins_appending; |
788 | enum ocfs2_contig_type ins_contig; | |
789 | int ins_contig_index; | |
dcd0538f MF |
790 | int ins_tree_depth; |
791 | }; | |
792 | ||
328d5752 MF |
793 | struct ocfs2_merge_ctxt { |
794 | enum ocfs2_contig_type c_contig_type; | |
795 | int c_has_empty_extent; | |
796 | int c_split_covers_rec; | |
328d5752 MF |
797 | }; |
798 | ||
5e96581a JB |
799 | static int ocfs2_validate_extent_block(struct super_block *sb, |
800 | struct buffer_head *bh) | |
801 | { | |
d6b32bbb | 802 | int rc; |
5e96581a JB |
803 | struct ocfs2_extent_block *eb = |
804 | (struct ocfs2_extent_block *)bh->b_data; | |
805 | ||
970e4936 JB |
806 | mlog(0, "Validating extent block %llu\n", |
807 | (unsigned long long)bh->b_blocknr); | |
808 | ||
d6b32bbb JB |
809 | BUG_ON(!buffer_uptodate(bh)); |
810 | ||
811 | /* | |
812 | * If the ecc fails, we return the error but otherwise | |
813 | * leave the filesystem running. We know any error is | |
814 | * local to this block. | |
815 | */ | |
816 | rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); | |
13723d00 JB |
817 | if (rc) { |
818 | mlog(ML_ERROR, "Checksum failed for extent block %llu\n", | |
819 | (unsigned long long)bh->b_blocknr); | |
d6b32bbb | 820 | return rc; |
13723d00 | 821 | } |
d6b32bbb JB |
822 | |
823 | /* | |
824 | * Errors after here are fatal. | |
825 | */ | |
826 | ||
5e96581a JB |
827 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { |
828 | ocfs2_error(sb, | |
829 | "Extent block #%llu has bad signature %.*s", | |
830 | (unsigned long long)bh->b_blocknr, 7, | |
831 | eb->h_signature); | |
832 | return -EINVAL; | |
833 | } | |
834 | ||
835 | if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { | |
836 | ocfs2_error(sb, | |
837 | "Extent block #%llu has an invalid h_blkno " | |
838 | "of %llu", | |
839 | (unsigned long long)bh->b_blocknr, | |
840 | (unsigned long long)le64_to_cpu(eb->h_blkno)); | |
841 | return -EINVAL; | |
842 | } | |
843 | ||
844 | if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) { | |
845 | ocfs2_error(sb, | |
846 | "Extent block #%llu has an invalid " | |
847 | "h_fs_generation of #%u", | |
848 | (unsigned long long)bh->b_blocknr, | |
849 | le32_to_cpu(eb->h_fs_generation)); | |
850 | return -EINVAL; | |
851 | } | |
852 | ||
853 | return 0; | |
854 | } | |
855 | ||
856 | int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, | |
857 | struct buffer_head **bh) | |
858 | { | |
859 | int rc; | |
860 | struct buffer_head *tmp = *bh; | |
861 | ||
970e4936 JB |
862 | rc = ocfs2_read_block(inode, eb_blkno, &tmp, |
863 | ocfs2_validate_extent_block); | |
5e96581a JB |
864 | |
865 | /* If ocfs2_read_block() got us a new bh, pass it up. */ | |
970e4936 | 866 | if (!rc && !*bh) |
5e96581a JB |
867 | *bh = tmp; |
868 | ||
5e96581a JB |
869 | return rc; |
870 | } | |
871 | ||
872 | ||
ccd979bd MF |
873 | /* |
874 | * How many free extents have we got before we need more meta data? | |
875 | */ | |
876 | int ocfs2_num_free_extents(struct ocfs2_super *osb, | |
877 | struct inode *inode, | |
f99b9b7c | 878 | struct ocfs2_extent_tree *et) |
ccd979bd MF |
879 | { |
880 | int retval; | |
e7d4cb6b | 881 | struct ocfs2_extent_list *el = NULL; |
ccd979bd MF |
882 | struct ocfs2_extent_block *eb; |
883 | struct buffer_head *eb_bh = NULL; | |
e7d4cb6b | 884 | u64 last_eb_blk = 0; |
ccd979bd MF |
885 | |
886 | mlog_entry_void(); | |
887 | ||
f99b9b7c JB |
888 | el = et->et_root_el; |
889 | last_eb_blk = ocfs2_et_get_last_eb_blk(et); | |
ccd979bd | 890 | |
e7d4cb6b | 891 | if (last_eb_blk) { |
5e96581a | 892 | retval = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh); |
ccd979bd MF |
893 | if (retval < 0) { |
894 | mlog_errno(retval); | |
895 | goto bail; | |
896 | } | |
897 | eb = (struct ocfs2_extent_block *) eb_bh->b_data; | |
898 | el = &eb->h_list; | |
e7d4cb6b | 899 | } |
ccd979bd MF |
900 | |
901 | BUG_ON(el->l_tree_depth != 0); | |
902 | ||
903 | retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); | |
904 | bail: | |
a81cb88b | 905 | brelse(eb_bh); |
ccd979bd MF |
906 | |
907 | mlog_exit(retval); | |
908 | return retval; | |
909 | } | |
910 | ||
911 | /* expects array to already be allocated | |
912 | * | |
913 | * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and | |
914 | * l_count for you | |
915 | */ | |
916 | static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, | |
1fabe148 | 917 | handle_t *handle, |
ccd979bd MF |
918 | struct inode *inode, |
919 | int wanted, | |
920 | struct ocfs2_alloc_context *meta_ac, | |
921 | struct buffer_head *bhs[]) | |
922 | { | |
923 | int count, status, i; | |
924 | u16 suballoc_bit_start; | |
925 | u32 num_got; | |
926 | u64 first_blkno; | |
927 | struct ocfs2_extent_block *eb; | |
928 | ||
929 | mlog_entry_void(); | |
930 | ||
931 | count = 0; | |
932 | while (count < wanted) { | |
933 | status = ocfs2_claim_metadata(osb, | |
934 | handle, | |
935 | meta_ac, | |
936 | wanted - count, | |
937 | &suballoc_bit_start, | |
938 | &num_got, | |
939 | &first_blkno); | |
940 | if (status < 0) { | |
941 | mlog_errno(status); | |
942 | goto bail; | |
943 | } | |
944 | ||
945 | for(i = count; i < (num_got + count); i++) { | |
946 | bhs[i] = sb_getblk(osb->sb, first_blkno); | |
947 | if (bhs[i] == NULL) { | |
948 | status = -EIO; | |
949 | mlog_errno(status); | |
950 | goto bail; | |
951 | } | |
952 | ocfs2_set_new_buffer_uptodate(inode, bhs[i]); | |
953 | ||
13723d00 JB |
954 | status = ocfs2_journal_access_eb(handle, inode, bhs[i], |
955 | OCFS2_JOURNAL_ACCESS_CREATE); | |
ccd979bd MF |
956 | if (status < 0) { |
957 | mlog_errno(status); | |
958 | goto bail; | |
959 | } | |
960 | ||
961 | memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); | |
962 | eb = (struct ocfs2_extent_block *) bhs[i]->b_data; | |
963 | /* Ok, setup the minimal stuff here. */ | |
964 | strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); | |
965 | eb->h_blkno = cpu_to_le64(first_blkno); | |
966 | eb->h_fs_generation = cpu_to_le32(osb->fs_generation); | |
ccd979bd | 967 | eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); |
ccd979bd MF |
968 | eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); |
969 | eb->h_list.l_count = | |
970 | cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); | |
971 | ||
972 | suballoc_bit_start++; | |
973 | first_blkno++; | |
974 | ||
975 | /* We'll also be dirtied by the caller, so | |
976 | * this isn't absolutely necessary. */ | |
977 | status = ocfs2_journal_dirty(handle, bhs[i]); | |
978 | if (status < 0) { | |
979 | mlog_errno(status); | |
980 | goto bail; | |
981 | } | |
982 | } | |
983 | ||
984 | count += num_got; | |
985 | } | |
986 | ||
987 | status = 0; | |
988 | bail: | |
989 | if (status < 0) { | |
990 | for(i = 0; i < wanted; i++) { | |
a81cb88b | 991 | brelse(bhs[i]); |
ccd979bd MF |
992 | bhs[i] = NULL; |
993 | } | |
994 | } | |
995 | mlog_exit(status); | |
996 | return status; | |
997 | } | |
998 | ||
dcd0538f MF |
999 | /* |
1000 | * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth(). | |
1001 | * | |
1002 | * Returns the sum of the rightmost extent rec logical offset and | |
1003 | * cluster count. | |
1004 | * | |
1005 | * ocfs2_add_branch() uses this to determine what logical cluster | |
1006 | * value should be populated into the leftmost new branch records. | |
1007 | * | |
1008 | * ocfs2_shift_tree_depth() uses this to determine the # clusters | |
1009 | * value for the new topmost tree record. | |
1010 | */ | |
1011 | static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) | |
1012 | { | |
1013 | int i; | |
1014 | ||
1015 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
1016 | ||
1017 | return le32_to_cpu(el->l_recs[i].e_cpos) + | |
e48edee2 | 1018 | ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f MF |
1019 | } |
1020 | ||
6b791bcc TM |
1021 | /* |
1022 | * Change range of the branches in the right most path according to the leaf | |
1023 | * extent block's rightmost record. | |
1024 | */ | |
1025 | static int ocfs2_adjust_rightmost_branch(handle_t *handle, | |
1026 | struct inode *inode, | |
1027 | struct ocfs2_extent_tree *et) | |
1028 | { | |
1029 | int status; | |
1030 | struct ocfs2_path *path = NULL; | |
1031 | struct ocfs2_extent_list *el; | |
1032 | struct ocfs2_extent_rec *rec; | |
1033 | ||
1034 | path = ocfs2_new_path_from_et(et); | |
1035 | if (!path) { | |
1036 | status = -ENOMEM; | |
1037 | return status; | |
1038 | } | |
1039 | ||
1040 | status = ocfs2_find_path(inode, path, UINT_MAX); | |
1041 | if (status < 0) { | |
1042 | mlog_errno(status); | |
1043 | goto out; | |
1044 | } | |
1045 | ||
1046 | status = ocfs2_extend_trans(handle, path_num_items(path) + | |
1047 | handle->h_buffer_credits); | |
1048 | if (status < 0) { | |
1049 | mlog_errno(status); | |
1050 | goto out; | |
1051 | } | |
1052 | ||
1053 | status = ocfs2_journal_access_path(inode, handle, path); | |
1054 | if (status < 0) { | |
1055 | mlog_errno(status); | |
1056 | goto out; | |
1057 | } | |
1058 | ||
1059 | el = path_leaf_el(path); | |
1060 | rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1]; | |
1061 | ||
1062 | ocfs2_adjust_rightmost_records(inode, handle, path, rec); | |
1063 | ||
1064 | out: | |
1065 | ocfs2_free_path(path); | |
1066 | return status; | |
1067 | } | |
1068 | ||
ccd979bd MF |
1069 | /* |
1070 | * Add an entire tree branch to our inode. eb_bh is the extent block | |
1071 | * to start at, if we don't want to start the branch at the dinode | |
1072 | * structure. | |
1073 | * | |
1074 | * last_eb_bh is required as we have to update it's next_leaf pointer | |
1075 | * for the new last extent block. | |
1076 | * | |
1077 | * the new branch will be 'empty' in the sense that every block will | |
e48edee2 | 1078 | * contain a single record with cluster count == 0. |
ccd979bd MF |
1079 | */ |
1080 | static int ocfs2_add_branch(struct ocfs2_super *osb, | |
1fabe148 | 1081 | handle_t *handle, |
ccd979bd | 1082 | struct inode *inode, |
e7d4cb6b | 1083 | struct ocfs2_extent_tree *et, |
ccd979bd | 1084 | struct buffer_head *eb_bh, |
328d5752 | 1085 | struct buffer_head **last_eb_bh, |
ccd979bd MF |
1086 | struct ocfs2_alloc_context *meta_ac) |
1087 | { | |
1088 | int status, new_blocks, i; | |
1089 | u64 next_blkno, new_last_eb_blk; | |
1090 | struct buffer_head *bh; | |
1091 | struct buffer_head **new_eb_bhs = NULL; | |
ccd979bd MF |
1092 | struct ocfs2_extent_block *eb; |
1093 | struct ocfs2_extent_list *eb_el; | |
1094 | struct ocfs2_extent_list *el; | |
6b791bcc | 1095 | u32 new_cpos, root_end; |
ccd979bd MF |
1096 | |
1097 | mlog_entry_void(); | |
1098 | ||
328d5752 | 1099 | BUG_ON(!last_eb_bh || !*last_eb_bh); |
ccd979bd | 1100 | |
ccd979bd MF |
1101 | if (eb_bh) { |
1102 | eb = (struct ocfs2_extent_block *) eb_bh->b_data; | |
1103 | el = &eb->h_list; | |
1104 | } else | |
ce1d9ea6 | 1105 | el = et->et_root_el; |
ccd979bd MF |
1106 | |
1107 | /* we never add a branch to a leaf. */ | |
1108 | BUG_ON(!el->l_tree_depth); | |
1109 | ||
1110 | new_blocks = le16_to_cpu(el->l_tree_depth); | |
1111 | ||
6b791bcc TM |
1112 | eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; |
1113 | new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); | |
1114 | root_end = ocfs2_sum_rightmost_rec(et->et_root_el); | |
1115 | ||
1116 | /* | |
1117 | * If there is a gap before the root end and the real end | |
1118 | * of the righmost leaf block, we need to remove the gap | |
1119 | * between new_cpos and root_end first so that the tree | |
1120 | * is consistent after we add a new branch(it will start | |
1121 | * from new_cpos). | |
1122 | */ | |
1123 | if (root_end > new_cpos) { | |
1124 | mlog(0, "adjust the cluster end from %u to %u\n", | |
1125 | root_end, new_cpos); | |
1126 | status = ocfs2_adjust_rightmost_branch(handle, inode, et); | |
1127 | if (status) { | |
1128 | mlog_errno(status); | |
1129 | goto bail; | |
1130 | } | |
1131 | } | |
1132 | ||
ccd979bd MF |
1133 | /* allocate the number of new eb blocks we need */ |
1134 | new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *), | |
1135 | GFP_KERNEL); | |
1136 | if (!new_eb_bhs) { | |
1137 | status = -ENOMEM; | |
1138 | mlog_errno(status); | |
1139 | goto bail; | |
1140 | } | |
1141 | ||
1142 | status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks, | |
1143 | meta_ac, new_eb_bhs); | |
1144 | if (status < 0) { | |
1145 | mlog_errno(status); | |
1146 | goto bail; | |
1147 | } | |
1148 | ||
1149 | /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be | |
1150 | * linked with the rest of the tree. | |
1151 | * conversly, new_eb_bhs[0] is the new bottommost leaf. | |
1152 | * | |
1153 | * when we leave the loop, new_last_eb_blk will point to the | |
1154 | * newest leaf, and next_blkno will point to the topmost extent | |
1155 | * block. */ | |
1156 | next_blkno = new_last_eb_blk = 0; | |
1157 | for(i = 0; i < new_blocks; i++) { | |
1158 | bh = new_eb_bhs[i]; | |
1159 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
5e96581a JB |
1160 | /* ocfs2_create_new_meta_bhs() should create it right! */ |
1161 | BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); | |
ccd979bd MF |
1162 | eb_el = &eb->h_list; |
1163 | ||
13723d00 JB |
1164 | status = ocfs2_journal_access_eb(handle, inode, bh, |
1165 | OCFS2_JOURNAL_ACCESS_CREATE); | |
ccd979bd MF |
1166 | if (status < 0) { |
1167 | mlog_errno(status); | |
1168 | goto bail; | |
1169 | } | |
1170 | ||
1171 | eb->h_next_leaf_blk = 0; | |
1172 | eb_el->l_tree_depth = cpu_to_le16(i); | |
1173 | eb_el->l_next_free_rec = cpu_to_le16(1); | |
dcd0538f MF |
1174 | /* |
1175 | * This actually counts as an empty extent as | |
1176 | * c_clusters == 0 | |
1177 | */ | |
1178 | eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos); | |
ccd979bd | 1179 | eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); |
e48edee2 MF |
1180 | /* |
1181 | * eb_el isn't always an interior node, but even leaf | |
1182 | * nodes want a zero'd flags and reserved field so | |
1183 | * this gets the whole 32 bits regardless of use. | |
1184 | */ | |
1185 | eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0); | |
ccd979bd MF |
1186 | if (!eb_el->l_tree_depth) |
1187 | new_last_eb_blk = le64_to_cpu(eb->h_blkno); | |
1188 | ||
1189 | status = ocfs2_journal_dirty(handle, bh); | |
1190 | if (status < 0) { | |
1191 | mlog_errno(status); | |
1192 | goto bail; | |
1193 | } | |
1194 | ||
1195 | next_blkno = le64_to_cpu(eb->h_blkno); | |
1196 | } | |
1197 | ||
1198 | /* This is a bit hairy. We want to update up to three blocks | |
1199 | * here without leaving any of them in an inconsistent state | |
1200 | * in case of error. We don't have to worry about | |
1201 | * journal_dirty erroring as it won't unless we've aborted the | |
1202 | * handle (in which case we would never be here) so reserving | |
1203 | * the write with journal_access is all we need to do. */ | |
13723d00 JB |
1204 | status = ocfs2_journal_access_eb(handle, inode, *last_eb_bh, |
1205 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
1206 | if (status < 0) { |
1207 | mlog_errno(status); | |
1208 | goto bail; | |
1209 | } | |
13723d00 JB |
1210 | status = ocfs2_et_root_journal_access(handle, inode, et, |
1211 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
1212 | if (status < 0) { |
1213 | mlog_errno(status); | |
1214 | goto bail; | |
1215 | } | |
1216 | if (eb_bh) { | |
13723d00 JB |
1217 | status = ocfs2_journal_access_eb(handle, inode, eb_bh, |
1218 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
1219 | if (status < 0) { |
1220 | mlog_errno(status); | |
1221 | goto bail; | |
1222 | } | |
1223 | } | |
1224 | ||
1225 | /* Link the new branch into the rest of the tree (el will | |
e7d4cb6b | 1226 | * either be on the root_bh, or the extent block passed in. */ |
ccd979bd MF |
1227 | i = le16_to_cpu(el->l_next_free_rec); |
1228 | el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); | |
dcd0538f | 1229 | el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); |
e48edee2 | 1230 | el->l_recs[i].e_int_clusters = 0; |
ccd979bd MF |
1231 | le16_add_cpu(&el->l_next_free_rec, 1); |
1232 | ||
1233 | /* fe needs a new last extent block pointer, as does the | |
1234 | * next_leaf on the previously last-extent-block. */ | |
35dc0aa3 | 1235 | ocfs2_et_set_last_eb_blk(et, new_last_eb_blk); |
ccd979bd | 1236 | |
328d5752 | 1237 | eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; |
ccd979bd MF |
1238 | eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); |
1239 | ||
328d5752 | 1240 | status = ocfs2_journal_dirty(handle, *last_eb_bh); |
ccd979bd MF |
1241 | if (status < 0) |
1242 | mlog_errno(status); | |
ce1d9ea6 | 1243 | status = ocfs2_journal_dirty(handle, et->et_root_bh); |
ccd979bd MF |
1244 | if (status < 0) |
1245 | mlog_errno(status); | |
1246 | if (eb_bh) { | |
1247 | status = ocfs2_journal_dirty(handle, eb_bh); | |
1248 | if (status < 0) | |
1249 | mlog_errno(status); | |
1250 | } | |
1251 | ||
328d5752 MF |
1252 | /* |
1253 | * Some callers want to track the rightmost leaf so pass it | |
1254 | * back here. | |
1255 | */ | |
1256 | brelse(*last_eb_bh); | |
1257 | get_bh(new_eb_bhs[0]); | |
1258 | *last_eb_bh = new_eb_bhs[0]; | |
1259 | ||
ccd979bd MF |
1260 | status = 0; |
1261 | bail: | |
1262 | if (new_eb_bhs) { | |
1263 | for (i = 0; i < new_blocks; i++) | |
a81cb88b | 1264 | brelse(new_eb_bhs[i]); |
ccd979bd MF |
1265 | kfree(new_eb_bhs); |
1266 | } | |
1267 | ||
1268 | mlog_exit(status); | |
1269 | return status; | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * adds another level to the allocation tree. | |
1274 | * returns back the new extent block so you can add a branch to it | |
1275 | * after this call. | |
1276 | */ | |
1277 | static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, | |
1fabe148 | 1278 | handle_t *handle, |
ccd979bd | 1279 | struct inode *inode, |
e7d4cb6b | 1280 | struct ocfs2_extent_tree *et, |
ccd979bd MF |
1281 | struct ocfs2_alloc_context *meta_ac, |
1282 | struct buffer_head **ret_new_eb_bh) | |
1283 | { | |
1284 | int status, i; | |
dcd0538f | 1285 | u32 new_clusters; |
ccd979bd | 1286 | struct buffer_head *new_eb_bh = NULL; |
ccd979bd | 1287 | struct ocfs2_extent_block *eb; |
e7d4cb6b | 1288 | struct ocfs2_extent_list *root_el; |
ccd979bd MF |
1289 | struct ocfs2_extent_list *eb_el; |
1290 | ||
1291 | mlog_entry_void(); | |
1292 | ||
1293 | status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac, | |
1294 | &new_eb_bh); | |
1295 | if (status < 0) { | |
1296 | mlog_errno(status); | |
1297 | goto bail; | |
1298 | } | |
1299 | ||
1300 | eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; | |
5e96581a JB |
1301 | /* ocfs2_create_new_meta_bhs() should create it right! */ |
1302 | BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); | |
ccd979bd MF |
1303 | |
1304 | eb_el = &eb->h_list; | |
ce1d9ea6 | 1305 | root_el = et->et_root_el; |
ccd979bd | 1306 | |
13723d00 JB |
1307 | status = ocfs2_journal_access_eb(handle, inode, new_eb_bh, |
1308 | OCFS2_JOURNAL_ACCESS_CREATE); | |
ccd979bd MF |
1309 | if (status < 0) { |
1310 | mlog_errno(status); | |
1311 | goto bail; | |
1312 | } | |
1313 | ||
e7d4cb6b TM |
1314 | /* copy the root extent list data into the new extent block */ |
1315 | eb_el->l_tree_depth = root_el->l_tree_depth; | |
1316 | eb_el->l_next_free_rec = root_el->l_next_free_rec; | |
1317 | for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++) | |
1318 | eb_el->l_recs[i] = root_el->l_recs[i]; | |
ccd979bd MF |
1319 | |
1320 | status = ocfs2_journal_dirty(handle, new_eb_bh); | |
1321 | if (status < 0) { | |
1322 | mlog_errno(status); | |
1323 | goto bail; | |
1324 | } | |
1325 | ||
13723d00 JB |
1326 | status = ocfs2_et_root_journal_access(handle, inode, et, |
1327 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
1328 | if (status < 0) { |
1329 | mlog_errno(status); | |
1330 | goto bail; | |
1331 | } | |
1332 | ||
dcd0538f MF |
1333 | new_clusters = ocfs2_sum_rightmost_rec(eb_el); |
1334 | ||
e7d4cb6b TM |
1335 | /* update root_bh now */ |
1336 | le16_add_cpu(&root_el->l_tree_depth, 1); | |
1337 | root_el->l_recs[0].e_cpos = 0; | |
1338 | root_el->l_recs[0].e_blkno = eb->h_blkno; | |
1339 | root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters); | |
1340 | for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) | |
1341 | memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); | |
1342 | root_el->l_next_free_rec = cpu_to_le16(1); | |
ccd979bd MF |
1343 | |
1344 | /* If this is our 1st tree depth shift, then last_eb_blk | |
1345 | * becomes the allocated extent block */ | |
e7d4cb6b | 1346 | if (root_el->l_tree_depth == cpu_to_le16(1)) |
35dc0aa3 | 1347 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); |
ccd979bd | 1348 | |
ce1d9ea6 | 1349 | status = ocfs2_journal_dirty(handle, et->et_root_bh); |
ccd979bd MF |
1350 | if (status < 0) { |
1351 | mlog_errno(status); | |
1352 | goto bail; | |
1353 | } | |
1354 | ||
1355 | *ret_new_eb_bh = new_eb_bh; | |
1356 | new_eb_bh = NULL; | |
1357 | status = 0; | |
1358 | bail: | |
a81cb88b | 1359 | brelse(new_eb_bh); |
ccd979bd MF |
1360 | |
1361 | mlog_exit(status); | |
1362 | return status; | |
1363 | } | |
1364 | ||
ccd979bd MF |
1365 | /* |
1366 | * Should only be called when there is no space left in any of the | |
1367 | * leaf nodes. What we want to do is find the lowest tree depth | |
1368 | * non-leaf extent block with room for new records. There are three | |
1369 | * valid results of this search: | |
1370 | * | |
1371 | * 1) a lowest extent block is found, then we pass it back in | |
1372 | * *lowest_eb_bh and return '0' | |
1373 | * | |
e7d4cb6b | 1374 | * 2) the search fails to find anything, but the root_el has room. We |
ccd979bd MF |
1375 | * pass NULL back in *lowest_eb_bh, but still return '0' |
1376 | * | |
e7d4cb6b | 1377 | * 3) the search fails to find anything AND the root_el is full, in |
ccd979bd MF |
1378 | * which case we return > 0 |
1379 | * | |
1380 | * return status < 0 indicates an error. | |
1381 | */ | |
1382 | static int ocfs2_find_branch_target(struct ocfs2_super *osb, | |
1383 | struct inode *inode, | |
e7d4cb6b | 1384 | struct ocfs2_extent_tree *et, |
ccd979bd MF |
1385 | struct buffer_head **target_bh) |
1386 | { | |
1387 | int status = 0, i; | |
1388 | u64 blkno; | |
ccd979bd MF |
1389 | struct ocfs2_extent_block *eb; |
1390 | struct ocfs2_extent_list *el; | |
1391 | struct buffer_head *bh = NULL; | |
1392 | struct buffer_head *lowest_bh = NULL; | |
1393 | ||
1394 | mlog_entry_void(); | |
1395 | ||
1396 | *target_bh = NULL; | |
1397 | ||
ce1d9ea6 | 1398 | el = et->et_root_el; |
ccd979bd MF |
1399 | |
1400 | while(le16_to_cpu(el->l_tree_depth) > 1) { | |
1401 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
b0697053 | 1402 | ocfs2_error(inode->i_sb, "Dinode %llu has empty " |
ccd979bd | 1403 | "extent list (next_free_rec == 0)", |
b0697053 | 1404 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
ccd979bd MF |
1405 | status = -EIO; |
1406 | goto bail; | |
1407 | } | |
1408 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
1409 | blkno = le64_to_cpu(el->l_recs[i].e_blkno); | |
1410 | if (!blkno) { | |
b0697053 | 1411 | ocfs2_error(inode->i_sb, "Dinode %llu has extent " |
ccd979bd MF |
1412 | "list where extent # %d has no physical " |
1413 | "block start", | |
b0697053 | 1414 | (unsigned long long)OCFS2_I(inode)->ip_blkno, i); |
ccd979bd MF |
1415 | status = -EIO; |
1416 | goto bail; | |
1417 | } | |
1418 | ||
a81cb88b MF |
1419 | brelse(bh); |
1420 | bh = NULL; | |
ccd979bd | 1421 | |
5e96581a | 1422 | status = ocfs2_read_extent_block(inode, blkno, &bh); |
ccd979bd MF |
1423 | if (status < 0) { |
1424 | mlog_errno(status); | |
1425 | goto bail; | |
1426 | } | |
dcd0538f MF |
1427 | |
1428 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
dcd0538f MF |
1429 | el = &eb->h_list; |
1430 | ||
1431 | if (le16_to_cpu(el->l_next_free_rec) < | |
1432 | le16_to_cpu(el->l_count)) { | |
a81cb88b | 1433 | brelse(lowest_bh); |
dcd0538f MF |
1434 | lowest_bh = bh; |
1435 | get_bh(lowest_bh); | |
1436 | } | |
1437 | } | |
1438 | ||
1439 | /* If we didn't find one and the fe doesn't have any room, | |
1440 | * then return '1' */ | |
ce1d9ea6 | 1441 | el = et->et_root_el; |
e7d4cb6b | 1442 | if (!lowest_bh && (el->l_next_free_rec == el->l_count)) |
dcd0538f MF |
1443 | status = 1; |
1444 | ||
1445 | *target_bh = lowest_bh; | |
1446 | bail: | |
a81cb88b | 1447 | brelse(bh); |
dcd0538f MF |
1448 | |
1449 | mlog_exit(status); | |
1450 | return status; | |
1451 | } | |
1452 | ||
c3afcbb3 MF |
1453 | /* |
1454 | * Grow a b-tree so that it has more records. | |
1455 | * | |
1456 | * We might shift the tree depth in which case existing paths should | |
1457 | * be considered invalid. | |
1458 | * | |
1459 | * Tree depth after the grow is returned via *final_depth. | |
328d5752 MF |
1460 | * |
1461 | * *last_eb_bh will be updated by ocfs2_add_branch(). | |
c3afcbb3 MF |
1462 | */ |
1463 | static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, | |
e7d4cb6b | 1464 | struct ocfs2_extent_tree *et, int *final_depth, |
328d5752 | 1465 | struct buffer_head **last_eb_bh, |
c3afcbb3 MF |
1466 | struct ocfs2_alloc_context *meta_ac) |
1467 | { | |
1468 | int ret, shift; | |
ce1d9ea6 | 1469 | struct ocfs2_extent_list *el = et->et_root_el; |
e7d4cb6b | 1470 | int depth = le16_to_cpu(el->l_tree_depth); |
c3afcbb3 MF |
1471 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
1472 | struct buffer_head *bh = NULL; | |
1473 | ||
1474 | BUG_ON(meta_ac == NULL); | |
1475 | ||
e7d4cb6b | 1476 | shift = ocfs2_find_branch_target(osb, inode, et, &bh); |
c3afcbb3 MF |
1477 | if (shift < 0) { |
1478 | ret = shift; | |
1479 | mlog_errno(ret); | |
1480 | goto out; | |
1481 | } | |
1482 | ||
1483 | /* We traveled all the way to the bottom of the allocation tree | |
1484 | * and didn't find room for any more extents - we need to add | |
1485 | * another tree level */ | |
1486 | if (shift) { | |
1487 | BUG_ON(bh); | |
1488 | mlog(0, "need to shift tree depth (current = %d)\n", depth); | |
1489 | ||
1490 | /* ocfs2_shift_tree_depth will return us a buffer with | |
1491 | * the new extent block (so we can pass that to | |
1492 | * ocfs2_add_branch). */ | |
e7d4cb6b | 1493 | ret = ocfs2_shift_tree_depth(osb, handle, inode, et, |
c3afcbb3 MF |
1494 | meta_ac, &bh); |
1495 | if (ret < 0) { | |
1496 | mlog_errno(ret); | |
1497 | goto out; | |
1498 | } | |
1499 | depth++; | |
328d5752 MF |
1500 | if (depth == 1) { |
1501 | /* | |
1502 | * Special case: we have room now if we shifted from | |
1503 | * tree_depth 0, so no more work needs to be done. | |
1504 | * | |
1505 | * We won't be calling add_branch, so pass | |
1506 | * back *last_eb_bh as the new leaf. At depth | |
1507 | * zero, it should always be null so there's | |
1508 | * no reason to brelse. | |
1509 | */ | |
1510 | BUG_ON(*last_eb_bh); | |
1511 | get_bh(bh); | |
1512 | *last_eb_bh = bh; | |
c3afcbb3 | 1513 | goto out; |
328d5752 | 1514 | } |
c3afcbb3 MF |
1515 | } |
1516 | ||
1517 | /* call ocfs2_add_branch to add the final part of the tree with | |
1518 | * the new data. */ | |
1519 | mlog(0, "add branch. bh = %p\n", bh); | |
e7d4cb6b | 1520 | ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh, |
c3afcbb3 MF |
1521 | meta_ac); |
1522 | if (ret < 0) { | |
1523 | mlog_errno(ret); | |
1524 | goto out; | |
1525 | } | |
1526 | ||
1527 | out: | |
1528 | if (final_depth) | |
1529 | *final_depth = depth; | |
1530 | brelse(bh); | |
1531 | return ret; | |
1532 | } | |
1533 | ||
dcd0538f MF |
1534 | /* |
1535 | * This function will discard the rightmost extent record. | |
1536 | */ | |
1537 | static void ocfs2_shift_records_right(struct ocfs2_extent_list *el) | |
1538 | { | |
1539 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
1540 | int count = le16_to_cpu(el->l_count); | |
1541 | unsigned int num_bytes; | |
1542 | ||
1543 | BUG_ON(!next_free); | |
1544 | /* This will cause us to go off the end of our extent list. */ | |
1545 | BUG_ON(next_free >= count); | |
1546 | ||
1547 | num_bytes = sizeof(struct ocfs2_extent_rec) * next_free; | |
1548 | ||
1549 | memmove(&el->l_recs[1], &el->l_recs[0], num_bytes); | |
1550 | } | |
1551 | ||
1552 | static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, | |
1553 | struct ocfs2_extent_rec *insert_rec) | |
1554 | { | |
1555 | int i, insert_index, next_free, has_empty, num_bytes; | |
1556 | u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos); | |
1557 | struct ocfs2_extent_rec *rec; | |
1558 | ||
1559 | next_free = le16_to_cpu(el->l_next_free_rec); | |
1560 | has_empty = ocfs2_is_empty_extent(&el->l_recs[0]); | |
1561 | ||
1562 | BUG_ON(!next_free); | |
1563 | ||
1564 | /* The tree code before us didn't allow enough room in the leaf. */ | |
b1f3550f | 1565 | BUG_ON(el->l_next_free_rec == el->l_count && !has_empty); |
dcd0538f MF |
1566 | |
1567 | /* | |
1568 | * The easiest way to approach this is to just remove the | |
1569 | * empty extent and temporarily decrement next_free. | |
1570 | */ | |
1571 | if (has_empty) { | |
1572 | /* | |
1573 | * If next_free was 1 (only an empty extent), this | |
1574 | * loop won't execute, which is fine. We still want | |
1575 | * the decrement above to happen. | |
1576 | */ | |
1577 | for(i = 0; i < (next_free - 1); i++) | |
1578 | el->l_recs[i] = el->l_recs[i+1]; | |
1579 | ||
1580 | next_free--; | |
1581 | } | |
1582 | ||
1583 | /* | |
1584 | * Figure out what the new record index should be. | |
1585 | */ | |
1586 | for(i = 0; i < next_free; i++) { | |
1587 | rec = &el->l_recs[i]; | |
1588 | ||
1589 | if (insert_cpos < le32_to_cpu(rec->e_cpos)) | |
1590 | break; | |
1591 | } | |
1592 | insert_index = i; | |
1593 | ||
1594 | mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", | |
1595 | insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); | |
1596 | ||
1597 | BUG_ON(insert_index < 0); | |
1598 | BUG_ON(insert_index >= le16_to_cpu(el->l_count)); | |
1599 | BUG_ON(insert_index > next_free); | |
1600 | ||
1601 | /* | |
1602 | * No need to memmove if we're just adding to the tail. | |
1603 | */ | |
1604 | if (insert_index != next_free) { | |
1605 | BUG_ON(next_free >= le16_to_cpu(el->l_count)); | |
1606 | ||
1607 | num_bytes = next_free - insert_index; | |
1608 | num_bytes *= sizeof(struct ocfs2_extent_rec); | |
1609 | memmove(&el->l_recs[insert_index + 1], | |
1610 | &el->l_recs[insert_index], | |
1611 | num_bytes); | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * Either we had an empty extent, and need to re-increment or | |
1616 | * there was no empty extent on a non full rightmost leaf node, | |
1617 | * in which case we still need to increment. | |
1618 | */ | |
1619 | next_free++; | |
1620 | el->l_next_free_rec = cpu_to_le16(next_free); | |
1621 | /* | |
1622 | * Make sure none of the math above just messed up our tree. | |
1623 | */ | |
1624 | BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)); | |
1625 | ||
1626 | el->l_recs[insert_index] = *insert_rec; | |
1627 | ||
1628 | } | |
1629 | ||
328d5752 MF |
1630 | static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el) |
1631 | { | |
1632 | int size, num_recs = le16_to_cpu(el->l_next_free_rec); | |
1633 | ||
1634 | BUG_ON(num_recs == 0); | |
1635 | ||
1636 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
1637 | num_recs--; | |
1638 | size = num_recs * sizeof(struct ocfs2_extent_rec); | |
1639 | memmove(&el->l_recs[0], &el->l_recs[1], size); | |
1640 | memset(&el->l_recs[num_recs], 0, | |
1641 | sizeof(struct ocfs2_extent_rec)); | |
1642 | el->l_next_free_rec = cpu_to_le16(num_recs); | |
1643 | } | |
1644 | } | |
1645 | ||
dcd0538f MF |
1646 | /* |
1647 | * Create an empty extent record . | |
1648 | * | |
1649 | * l_next_free_rec may be updated. | |
1650 | * | |
1651 | * If an empty extent already exists do nothing. | |
1652 | */ | |
1653 | static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el) | |
1654 | { | |
1655 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
1656 | ||
e48edee2 MF |
1657 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
1658 | ||
dcd0538f MF |
1659 | if (next_free == 0) |
1660 | goto set_and_inc; | |
1661 | ||
1662 | if (ocfs2_is_empty_extent(&el->l_recs[0])) | |
1663 | return; | |
1664 | ||
1665 | mlog_bug_on_msg(el->l_count == el->l_next_free_rec, | |
1666 | "Asked to create an empty extent in a full list:\n" | |
1667 | "count = %u, tree depth = %u", | |
1668 | le16_to_cpu(el->l_count), | |
1669 | le16_to_cpu(el->l_tree_depth)); | |
1670 | ||
1671 | ocfs2_shift_records_right(el); | |
1672 | ||
1673 | set_and_inc: | |
1674 | le16_add_cpu(&el->l_next_free_rec, 1); | |
1675 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
1676 | } | |
1677 | ||
1678 | /* | |
1679 | * For a rotation which involves two leaf nodes, the "root node" is | |
1680 | * the lowest level tree node which contains a path to both leafs. This | |
1681 | * resulting set of information can be used to form a complete "subtree" | |
1682 | * | |
1683 | * This function is passed two full paths from the dinode down to a | |
1684 | * pair of adjacent leaves. It's task is to figure out which path | |
1685 | * index contains the subtree root - this can be the root index itself | |
1686 | * in a worst-case rotation. | |
1687 | * | |
1688 | * The array index of the subtree root is passed back. | |
1689 | */ | |
1690 | static int ocfs2_find_subtree_root(struct inode *inode, | |
1691 | struct ocfs2_path *left, | |
1692 | struct ocfs2_path *right) | |
1693 | { | |
1694 | int i = 0; | |
1695 | ||
1696 | /* | |
1697 | * Check that the caller passed in two paths from the same tree. | |
1698 | */ | |
1699 | BUG_ON(path_root_bh(left) != path_root_bh(right)); | |
1700 | ||
1701 | do { | |
1702 | i++; | |
1703 | ||
1704 | /* | |
1705 | * The caller didn't pass two adjacent paths. | |
1706 | */ | |
1707 | mlog_bug_on_msg(i > left->p_tree_depth, | |
1708 | "Inode %lu, left depth %u, right depth %u\n" | |
1709 | "left leaf blk %llu, right leaf blk %llu\n", | |
1710 | inode->i_ino, left->p_tree_depth, | |
1711 | right->p_tree_depth, | |
1712 | (unsigned long long)path_leaf_bh(left)->b_blocknr, | |
1713 | (unsigned long long)path_leaf_bh(right)->b_blocknr); | |
1714 | } while (left->p_node[i].bh->b_blocknr == | |
1715 | right->p_node[i].bh->b_blocknr); | |
1716 | ||
1717 | return i - 1; | |
1718 | } | |
1719 | ||
1720 | typedef void (path_insert_t)(void *, struct buffer_head *); | |
1721 | ||
1722 | /* | |
1723 | * Traverse a btree path in search of cpos, starting at root_el. | |
1724 | * | |
1725 | * This code can be called with a cpos larger than the tree, in which | |
1726 | * case it will return the rightmost path. | |
1727 | */ | |
1728 | static int __ocfs2_find_path(struct inode *inode, | |
1729 | struct ocfs2_extent_list *root_el, u32 cpos, | |
1730 | path_insert_t *func, void *data) | |
1731 | { | |
1732 | int i, ret = 0; | |
1733 | u32 range; | |
1734 | u64 blkno; | |
1735 | struct buffer_head *bh = NULL; | |
1736 | struct ocfs2_extent_block *eb; | |
1737 | struct ocfs2_extent_list *el; | |
1738 | struct ocfs2_extent_rec *rec; | |
1739 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
1740 | ||
1741 | el = root_el; | |
1742 | while (el->l_tree_depth) { | |
1743 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
1744 | ocfs2_error(inode->i_sb, | |
1745 | "Inode %llu has empty extent list at " | |
1746 | "depth %u\n", | |
1747 | (unsigned long long)oi->ip_blkno, | |
1748 | le16_to_cpu(el->l_tree_depth)); | |
1749 | ret = -EROFS; | |
1750 | goto out; | |
1751 | ||
1752 | } | |
1753 | ||
1754 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) { | |
1755 | rec = &el->l_recs[i]; | |
1756 | ||
1757 | /* | |
1758 | * In the case that cpos is off the allocation | |
1759 | * tree, this should just wind up returning the | |
1760 | * rightmost record. | |
1761 | */ | |
1762 | range = le32_to_cpu(rec->e_cpos) + | |
e48edee2 | 1763 | ocfs2_rec_clusters(el, rec); |
dcd0538f MF |
1764 | if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) |
1765 | break; | |
1766 | } | |
1767 | ||
1768 | blkno = le64_to_cpu(el->l_recs[i].e_blkno); | |
1769 | if (blkno == 0) { | |
1770 | ocfs2_error(inode->i_sb, | |
1771 | "Inode %llu has bad blkno in extent list " | |
1772 | "at depth %u (index %d)\n", | |
1773 | (unsigned long long)oi->ip_blkno, | |
1774 | le16_to_cpu(el->l_tree_depth), i); | |
1775 | ret = -EROFS; | |
1776 | goto out; | |
1777 | } | |
1778 | ||
1779 | brelse(bh); | |
1780 | bh = NULL; | |
5e96581a | 1781 | ret = ocfs2_read_extent_block(inode, blkno, &bh); |
dcd0538f MF |
1782 | if (ret) { |
1783 | mlog_errno(ret); | |
1784 | goto out; | |
1785 | } | |
1786 | ||
1787 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
1788 | el = &eb->h_list; | |
dcd0538f MF |
1789 | |
1790 | if (le16_to_cpu(el->l_next_free_rec) > | |
1791 | le16_to_cpu(el->l_count)) { | |
1792 | ocfs2_error(inode->i_sb, | |
1793 | "Inode %llu has bad count in extent list " | |
1794 | "at block %llu (next free=%u, count=%u)\n", | |
1795 | (unsigned long long)oi->ip_blkno, | |
1796 | (unsigned long long)bh->b_blocknr, | |
1797 | le16_to_cpu(el->l_next_free_rec), | |
1798 | le16_to_cpu(el->l_count)); | |
1799 | ret = -EROFS; | |
1800 | goto out; | |
1801 | } | |
1802 | ||
1803 | if (func) | |
1804 | func(data, bh); | |
1805 | } | |
1806 | ||
1807 | out: | |
1808 | /* | |
1809 | * Catch any trailing bh that the loop didn't handle. | |
1810 | */ | |
1811 | brelse(bh); | |
1812 | ||
1813 | return ret; | |
1814 | } | |
1815 | ||
1816 | /* | |
1817 | * Given an initialized path (that is, it has a valid root extent | |
1818 | * list), this function will traverse the btree in search of the path | |
1819 | * which would contain cpos. | |
1820 | * | |
1821 | * The path traveled is recorded in the path structure. | |
1822 | * | |
1823 | * Note that this will not do any comparisons on leaf node extent | |
1824 | * records, so it will work fine in the case that we just added a tree | |
1825 | * branch. | |
1826 | */ | |
1827 | struct find_path_data { | |
1828 | int index; | |
1829 | struct ocfs2_path *path; | |
1830 | }; | |
1831 | static void find_path_ins(void *data, struct buffer_head *bh) | |
1832 | { | |
1833 | struct find_path_data *fp = data; | |
1834 | ||
1835 | get_bh(bh); | |
1836 | ocfs2_path_insert_eb(fp->path, fp->index, bh); | |
1837 | fp->index++; | |
1838 | } | |
1839 | static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, | |
1840 | u32 cpos) | |
1841 | { | |
1842 | struct find_path_data data; | |
1843 | ||
1844 | data.index = 1; | |
1845 | data.path = path; | |
1846 | return __ocfs2_find_path(inode, path_root_el(path), cpos, | |
1847 | find_path_ins, &data); | |
1848 | } | |
1849 | ||
1850 | static void find_leaf_ins(void *data, struct buffer_head *bh) | |
1851 | { | |
1852 | struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; | |
1853 | struct ocfs2_extent_list *el = &eb->h_list; | |
1854 | struct buffer_head **ret = data; | |
1855 | ||
1856 | /* We want to retain only the leaf block. */ | |
1857 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
1858 | get_bh(bh); | |
1859 | *ret = bh; | |
1860 | } | |
1861 | } | |
1862 | /* | |
1863 | * Find the leaf block in the tree which would contain cpos. No | |
1864 | * checking of the actual leaf is done. | |
1865 | * | |
1866 | * Some paths want to call this instead of allocating a path structure | |
1867 | * and calling ocfs2_find_path(). | |
1868 | * | |
1869 | * This function doesn't handle non btree extent lists. | |
1870 | */ | |
363041a5 MF |
1871 | int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, |
1872 | u32 cpos, struct buffer_head **leaf_bh) | |
dcd0538f MF |
1873 | { |
1874 | int ret; | |
1875 | struct buffer_head *bh = NULL; | |
1876 | ||
1877 | ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh); | |
1878 | if (ret) { | |
1879 | mlog_errno(ret); | |
1880 | goto out; | |
1881 | } | |
1882 | ||
1883 | *leaf_bh = bh; | |
1884 | out: | |
1885 | return ret; | |
1886 | } | |
1887 | ||
1888 | /* | |
1889 | * Adjust the adjacent records (left_rec, right_rec) involved in a rotation. | |
1890 | * | |
1891 | * Basically, we've moved stuff around at the bottom of the tree and | |
1892 | * we need to fix up the extent records above the changes to reflect | |
1893 | * the new changes. | |
1894 | * | |
1895 | * left_rec: the record on the left. | |
1896 | * left_child_el: is the child list pointed to by left_rec | |
1897 | * right_rec: the record to the right of left_rec | |
1898 | * right_child_el: is the child list pointed to by right_rec | |
1899 | * | |
1900 | * By definition, this only works on interior nodes. | |
1901 | */ | |
1902 | static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, | |
1903 | struct ocfs2_extent_list *left_child_el, | |
1904 | struct ocfs2_extent_rec *right_rec, | |
1905 | struct ocfs2_extent_list *right_child_el) | |
1906 | { | |
1907 | u32 left_clusters, right_end; | |
1908 | ||
1909 | /* | |
1910 | * Interior nodes never have holes. Their cpos is the cpos of | |
1911 | * the leftmost record in their child list. Their cluster | |
1912 | * count covers the full theoretical range of their child list | |
1913 | * - the range between their cpos and the cpos of the record | |
1914 | * immediately to their right. | |
1915 | */ | |
1916 | left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); | |
82e12644 TM |
1917 | if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) { |
1918 | BUG_ON(right_child_el->l_tree_depth); | |
328d5752 MF |
1919 | BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); |
1920 | left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); | |
1921 | } | |
dcd0538f | 1922 | left_clusters -= le32_to_cpu(left_rec->e_cpos); |
e48edee2 | 1923 | left_rec->e_int_clusters = cpu_to_le32(left_clusters); |
dcd0538f MF |
1924 | |
1925 | /* | |
1926 | * Calculate the rightmost cluster count boundary before | |
e48edee2 | 1927 | * moving cpos - we will need to adjust clusters after |
dcd0538f MF |
1928 | * updating e_cpos to keep the same highest cluster count. |
1929 | */ | |
1930 | right_end = le32_to_cpu(right_rec->e_cpos); | |
e48edee2 | 1931 | right_end += le32_to_cpu(right_rec->e_int_clusters); |
dcd0538f MF |
1932 | |
1933 | right_rec->e_cpos = left_rec->e_cpos; | |
1934 | le32_add_cpu(&right_rec->e_cpos, left_clusters); | |
1935 | ||
1936 | right_end -= le32_to_cpu(right_rec->e_cpos); | |
e48edee2 | 1937 | right_rec->e_int_clusters = cpu_to_le32(right_end); |
dcd0538f MF |
1938 | } |
1939 | ||
1940 | /* | |
1941 | * Adjust the adjacent root node records involved in a | |
1942 | * rotation. left_el_blkno is passed in as a key so that we can easily | |
1943 | * find it's index in the root list. | |
1944 | */ | |
1945 | static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, | |
1946 | struct ocfs2_extent_list *left_el, | |
1947 | struct ocfs2_extent_list *right_el, | |
1948 | u64 left_el_blkno) | |
1949 | { | |
1950 | int i; | |
1951 | ||
1952 | BUG_ON(le16_to_cpu(root_el->l_tree_depth) <= | |
1953 | le16_to_cpu(left_el->l_tree_depth)); | |
1954 | ||
1955 | for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) { | |
1956 | if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno) | |
1957 | break; | |
1958 | } | |
1959 | ||
1960 | /* | |
1961 | * The path walking code should have never returned a root and | |
1962 | * two paths which are not adjacent. | |
1963 | */ | |
1964 | BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1)); | |
1965 | ||
1966 | ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el, | |
1967 | &root_el->l_recs[i + 1], right_el); | |
1968 | } | |
1969 | ||
1970 | /* | |
1971 | * We've changed a leaf block (in right_path) and need to reflect that | |
1972 | * change back up the subtree. | |
1973 | * | |
1974 | * This happens in multiple places: | |
1975 | * - When we've moved an extent record from the left path leaf to the right | |
1976 | * path leaf to make room for an empty extent in the left path leaf. | |
1977 | * - When our insert into the right path leaf is at the leftmost edge | |
1978 | * and requires an update of the path immediately to it's left. This | |
1979 | * can occur at the end of some types of rotation and appending inserts. | |
677b9752 TM |
1980 | * - When we've adjusted the last extent record in the left path leaf and the |
1981 | * 1st extent record in the right path leaf during cross extent block merge. | |
dcd0538f MF |
1982 | */ |
1983 | static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle, | |
1984 | struct ocfs2_path *left_path, | |
1985 | struct ocfs2_path *right_path, | |
1986 | int subtree_index) | |
1987 | { | |
1988 | int ret, i, idx; | |
1989 | struct ocfs2_extent_list *el, *left_el, *right_el; | |
1990 | struct ocfs2_extent_rec *left_rec, *right_rec; | |
1991 | struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; | |
1992 | ||
1993 | /* | |
1994 | * Update the counts and position values within all the | |
1995 | * interior nodes to reflect the leaf rotation we just did. | |
1996 | * | |
1997 | * The root node is handled below the loop. | |
1998 | * | |
1999 | * We begin the loop with right_el and left_el pointing to the | |
2000 | * leaf lists and work our way up. | |
2001 | * | |
2002 | * NOTE: within this loop, left_el and right_el always refer | |
2003 | * to the *child* lists. | |
2004 | */ | |
2005 | left_el = path_leaf_el(left_path); | |
2006 | right_el = path_leaf_el(right_path); | |
2007 | for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { | |
2008 | mlog(0, "Adjust records at index %u\n", i); | |
2009 | ||
2010 | /* | |
2011 | * One nice property of knowing that all of these | |
2012 | * nodes are below the root is that we only deal with | |
2013 | * the leftmost right node record and the rightmost | |
2014 | * left node record. | |
2015 | */ | |
2016 | el = left_path->p_node[i].el; | |
2017 | idx = le16_to_cpu(left_el->l_next_free_rec) - 1; | |
2018 | left_rec = &el->l_recs[idx]; | |
2019 | ||
2020 | el = right_path->p_node[i].el; | |
2021 | right_rec = &el->l_recs[0]; | |
2022 | ||
2023 | ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec, | |
2024 | right_el); | |
2025 | ||
2026 | ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh); | |
2027 | if (ret) | |
2028 | mlog_errno(ret); | |
2029 | ||
2030 | ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh); | |
2031 | if (ret) | |
2032 | mlog_errno(ret); | |
2033 | ||
2034 | /* | |
2035 | * Setup our list pointers now so that the current | |
2036 | * parents become children in the next iteration. | |
2037 | */ | |
2038 | left_el = left_path->p_node[i].el; | |
2039 | right_el = right_path->p_node[i].el; | |
2040 | } | |
2041 | ||
2042 | /* | |
2043 | * At the root node, adjust the two adjacent records which | |
2044 | * begin our path to the leaves. | |
2045 | */ | |
2046 | ||
2047 | el = left_path->p_node[subtree_index].el; | |
2048 | left_el = left_path->p_node[subtree_index + 1].el; | |
2049 | right_el = right_path->p_node[subtree_index + 1].el; | |
2050 | ||
2051 | ocfs2_adjust_root_records(el, left_el, right_el, | |
2052 | left_path->p_node[subtree_index + 1].bh->b_blocknr); | |
2053 | ||
2054 | root_bh = left_path->p_node[subtree_index].bh; | |
2055 | ||
2056 | ret = ocfs2_journal_dirty(handle, root_bh); | |
2057 | if (ret) | |
2058 | mlog_errno(ret); | |
2059 | } | |
2060 | ||
2061 | static int ocfs2_rotate_subtree_right(struct inode *inode, | |
2062 | handle_t *handle, | |
2063 | struct ocfs2_path *left_path, | |
2064 | struct ocfs2_path *right_path, | |
2065 | int subtree_index) | |
2066 | { | |
2067 | int ret, i; | |
2068 | struct buffer_head *right_leaf_bh; | |
2069 | struct buffer_head *left_leaf_bh = NULL; | |
2070 | struct buffer_head *root_bh; | |
2071 | struct ocfs2_extent_list *right_el, *left_el; | |
2072 | struct ocfs2_extent_rec move_rec; | |
2073 | ||
2074 | left_leaf_bh = path_leaf_bh(left_path); | |
2075 | left_el = path_leaf_el(left_path); | |
2076 | ||
2077 | if (left_el->l_next_free_rec != left_el->l_count) { | |
2078 | ocfs2_error(inode->i_sb, | |
2079 | "Inode %llu has non-full interior leaf node %llu" | |
2080 | "(next free = %u)", | |
2081 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
2082 | (unsigned long long)left_leaf_bh->b_blocknr, | |
2083 | le16_to_cpu(left_el->l_next_free_rec)); | |
2084 | return -EROFS; | |
2085 | } | |
2086 | ||
2087 | /* | |
2088 | * This extent block may already have an empty record, so we | |
2089 | * return early if so. | |
2090 | */ | |
2091 | if (ocfs2_is_empty_extent(&left_el->l_recs[0])) | |
2092 | return 0; | |
2093 | ||
2094 | root_bh = left_path->p_node[subtree_index].bh; | |
2095 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
2096 | ||
13723d00 JB |
2097 | ret = ocfs2_path_bh_journal_access(handle, inode, right_path, |
2098 | subtree_index); | |
dcd0538f MF |
2099 | if (ret) { |
2100 | mlog_errno(ret); | |
2101 | goto out; | |
2102 | } | |
2103 | ||
2104 | for(i = subtree_index + 1; i < path_num_items(right_path); i++) { | |
13723d00 JB |
2105 | ret = ocfs2_path_bh_journal_access(handle, inode, |
2106 | right_path, i); | |
dcd0538f MF |
2107 | if (ret) { |
2108 | mlog_errno(ret); | |
2109 | goto out; | |
2110 | } | |
2111 | ||
13723d00 JB |
2112 | ret = ocfs2_path_bh_journal_access(handle, inode, |
2113 | left_path, i); | |
dcd0538f MF |
2114 | if (ret) { |
2115 | mlog_errno(ret); | |
2116 | goto out; | |
2117 | } | |
2118 | } | |
2119 | ||
2120 | right_leaf_bh = path_leaf_bh(right_path); | |
2121 | right_el = path_leaf_el(right_path); | |
2122 | ||
2123 | /* This is a code error, not a disk corruption. */ | |
2124 | mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " | |
2125 | "because rightmost leaf block %llu is empty\n", | |
2126 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
2127 | (unsigned long long)right_leaf_bh->b_blocknr); | |
2128 | ||
2129 | ocfs2_create_empty_extent(right_el); | |
2130 | ||
2131 | ret = ocfs2_journal_dirty(handle, right_leaf_bh); | |
2132 | if (ret) { | |
2133 | mlog_errno(ret); | |
2134 | goto out; | |
2135 | } | |
2136 | ||
2137 | /* Do the copy now. */ | |
2138 | i = le16_to_cpu(left_el->l_next_free_rec) - 1; | |
2139 | move_rec = left_el->l_recs[i]; | |
2140 | right_el->l_recs[0] = move_rec; | |
2141 | ||
2142 | /* | |
2143 | * Clear out the record we just copied and shift everything | |
2144 | * over, leaving an empty extent in the left leaf. | |
2145 | * | |
2146 | * We temporarily subtract from next_free_rec so that the | |
2147 | * shift will lose the tail record (which is now defunct). | |
2148 | */ | |
2149 | le16_add_cpu(&left_el->l_next_free_rec, -1); | |
2150 | ocfs2_shift_records_right(left_el); | |
2151 | memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
2152 | le16_add_cpu(&left_el->l_next_free_rec, 1); | |
2153 | ||
2154 | ret = ocfs2_journal_dirty(handle, left_leaf_bh); | |
2155 | if (ret) { | |
2156 | mlog_errno(ret); | |
2157 | goto out; | |
2158 | } | |
2159 | ||
2160 | ocfs2_complete_edge_insert(inode, handle, left_path, right_path, | |
2161 | subtree_index); | |
2162 | ||
2163 | out: | |
2164 | return ret; | |
2165 | } | |
2166 | ||
2167 | /* | |
2168 | * Given a full path, determine what cpos value would return us a path | |
2169 | * containing the leaf immediately to the left of the current one. | |
2170 | * | |
2171 | * Will return zero if the path passed in is already the leftmost path. | |
2172 | */ | |
2173 | static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb, | |
2174 | struct ocfs2_path *path, u32 *cpos) | |
2175 | { | |
2176 | int i, j, ret = 0; | |
2177 | u64 blkno; | |
2178 | struct ocfs2_extent_list *el; | |
2179 | ||
e48edee2 MF |
2180 | BUG_ON(path->p_tree_depth == 0); |
2181 | ||
dcd0538f MF |
2182 | *cpos = 0; |
2183 | ||
2184 | blkno = path_leaf_bh(path)->b_blocknr; | |
2185 | ||
2186 | /* Start at the tree node just above the leaf and work our way up. */ | |
2187 | i = path->p_tree_depth - 1; | |
2188 | while (i >= 0) { | |
2189 | el = path->p_node[i].el; | |
2190 | ||
2191 | /* | |
2192 | * Find the extent record just before the one in our | |
2193 | * path. | |
2194 | */ | |
2195 | for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { | |
2196 | if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { | |
2197 | if (j == 0) { | |
2198 | if (i == 0) { | |
2199 | /* | |
2200 | * We've determined that the | |
2201 | * path specified is already | |
2202 | * the leftmost one - return a | |
2203 | * cpos of zero. | |
2204 | */ | |
2205 | goto out; | |
2206 | } | |
2207 | /* | |
2208 | * The leftmost record points to our | |
2209 | * leaf - we need to travel up the | |
2210 | * tree one level. | |
2211 | */ | |
2212 | goto next_node; | |
2213 | } | |
2214 | ||
2215 | *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos); | |
e48edee2 MF |
2216 | *cpos = *cpos + ocfs2_rec_clusters(el, |
2217 | &el->l_recs[j - 1]); | |
2218 | *cpos = *cpos - 1; | |
dcd0538f MF |
2219 | goto out; |
2220 | } | |
2221 | } | |
2222 | ||
2223 | /* | |
2224 | * If we got here, we never found a valid node where | |
2225 | * the tree indicated one should be. | |
2226 | */ | |
2227 | ocfs2_error(sb, | |
2228 | "Invalid extent tree at extent block %llu\n", | |
2229 | (unsigned long long)blkno); | |
2230 | ret = -EROFS; | |
2231 | goto out; | |
2232 | ||
2233 | next_node: | |
2234 | blkno = path->p_node[i].bh->b_blocknr; | |
2235 | i--; | |
2236 | } | |
2237 | ||
2238 | out: | |
2239 | return ret; | |
2240 | } | |
2241 | ||
328d5752 MF |
2242 | /* |
2243 | * Extend the transaction by enough credits to complete the rotation, | |
2244 | * and still leave at least the original number of credits allocated | |
2245 | * to this transaction. | |
2246 | */ | |
dcd0538f | 2247 | static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, |
328d5752 | 2248 | int op_credits, |
dcd0538f MF |
2249 | struct ocfs2_path *path) |
2250 | { | |
328d5752 | 2251 | int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; |
dcd0538f MF |
2252 | |
2253 | if (handle->h_buffer_credits < credits) | |
2254 | return ocfs2_extend_trans(handle, credits); | |
2255 | ||
2256 | return 0; | |
2257 | } | |
2258 | ||
2259 | /* | |
2260 | * Trap the case where we're inserting into the theoretical range past | |
2261 | * the _actual_ left leaf range. Otherwise, we'll rotate a record | |
2262 | * whose cpos is less than ours into the right leaf. | |
2263 | * | |
2264 | * It's only necessary to look at the rightmost record of the left | |
2265 | * leaf because the logic that calls us should ensure that the | |
2266 | * theoretical ranges in the path components above the leaves are | |
2267 | * correct. | |
2268 | */ | |
2269 | static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path, | |
2270 | u32 insert_cpos) | |
2271 | { | |
2272 | struct ocfs2_extent_list *left_el; | |
2273 | struct ocfs2_extent_rec *rec; | |
2274 | int next_free; | |
2275 | ||
2276 | left_el = path_leaf_el(left_path); | |
2277 | next_free = le16_to_cpu(left_el->l_next_free_rec); | |
2278 | rec = &left_el->l_recs[next_free - 1]; | |
2279 | ||
2280 | if (insert_cpos > le32_to_cpu(rec->e_cpos)) | |
2281 | return 1; | |
2282 | return 0; | |
2283 | } | |
2284 | ||
328d5752 MF |
2285 | static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) |
2286 | { | |
2287 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
2288 | unsigned int range; | |
2289 | struct ocfs2_extent_rec *rec; | |
2290 | ||
2291 | if (next_free == 0) | |
2292 | return 0; | |
2293 | ||
2294 | rec = &el->l_recs[0]; | |
2295 | if (ocfs2_is_empty_extent(rec)) { | |
2296 | /* Empty list. */ | |
2297 | if (next_free == 1) | |
2298 | return 0; | |
2299 | rec = &el->l_recs[1]; | |
2300 | } | |
2301 | ||
2302 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
2303 | if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) | |
2304 | return 1; | |
2305 | return 0; | |
2306 | } | |
2307 | ||
dcd0538f MF |
2308 | /* |
2309 | * Rotate all the records in a btree right one record, starting at insert_cpos. | |
2310 | * | |
2311 | * The path to the rightmost leaf should be passed in. | |
2312 | * | |
2313 | * The array is assumed to be large enough to hold an entire path (tree depth). | |
2314 | * | |
2315 | * Upon succesful return from this function: | |
2316 | * | |
2317 | * - The 'right_path' array will contain a path to the leaf block | |
2318 | * whose range contains e_cpos. | |
2319 | * - That leaf block will have a single empty extent in list index 0. | |
2320 | * - In the case that the rotation requires a post-insert update, | |
2321 | * *ret_left_path will contain a valid path which can be passed to | |
2322 | * ocfs2_insert_path(). | |
2323 | */ | |
2324 | static int ocfs2_rotate_tree_right(struct inode *inode, | |
2325 | handle_t *handle, | |
328d5752 | 2326 | enum ocfs2_split_type split, |
dcd0538f MF |
2327 | u32 insert_cpos, |
2328 | struct ocfs2_path *right_path, | |
2329 | struct ocfs2_path **ret_left_path) | |
2330 | { | |
328d5752 | 2331 | int ret, start, orig_credits = handle->h_buffer_credits; |
dcd0538f MF |
2332 | u32 cpos; |
2333 | struct ocfs2_path *left_path = NULL; | |
2334 | ||
2335 | *ret_left_path = NULL; | |
2336 | ||
ffdd7a54 | 2337 | left_path = ocfs2_new_path_from_path(right_path); |
dcd0538f MF |
2338 | if (!left_path) { |
2339 | ret = -ENOMEM; | |
2340 | mlog_errno(ret); | |
2341 | goto out; | |
2342 | } | |
2343 | ||
2344 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos); | |
2345 | if (ret) { | |
2346 | mlog_errno(ret); | |
2347 | goto out; | |
2348 | } | |
2349 | ||
2350 | mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); | |
2351 | ||
2352 | /* | |
2353 | * What we want to do here is: | |
2354 | * | |
2355 | * 1) Start with the rightmost path. | |
2356 | * | |
2357 | * 2) Determine a path to the leaf block directly to the left | |
2358 | * of that leaf. | |
2359 | * | |
2360 | * 3) Determine the 'subtree root' - the lowest level tree node | |
2361 | * which contains a path to both leaves. | |
2362 | * | |
2363 | * 4) Rotate the subtree. | |
2364 | * | |
2365 | * 5) Find the next subtree by considering the left path to be | |
2366 | * the new right path. | |
2367 | * | |
2368 | * The check at the top of this while loop also accepts | |
2369 | * insert_cpos == cpos because cpos is only a _theoretical_ | |
2370 | * value to get us the left path - insert_cpos might very well | |
2371 | * be filling that hole. | |
2372 | * | |
2373 | * Stop at a cpos of '0' because we either started at the | |
2374 | * leftmost branch (i.e., a tree with one branch and a | |
2375 | * rotation inside of it), or we've gone as far as we can in | |
2376 | * rotating subtrees. | |
2377 | */ | |
2378 | while (cpos && insert_cpos <= cpos) { | |
2379 | mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", | |
2380 | insert_cpos, cpos); | |
2381 | ||
2382 | ret = ocfs2_find_path(inode, left_path, cpos); | |
2383 | if (ret) { | |
2384 | mlog_errno(ret); | |
2385 | goto out; | |
2386 | } | |
2387 | ||
2388 | mlog_bug_on_msg(path_leaf_bh(left_path) == | |
2389 | path_leaf_bh(right_path), | |
2390 | "Inode %lu: error during insert of %u " | |
2391 | "(left path cpos %u) results in two identical " | |
2392 | "paths ending at %llu\n", | |
2393 | inode->i_ino, insert_cpos, cpos, | |
2394 | (unsigned long long) | |
2395 | path_leaf_bh(left_path)->b_blocknr); | |
2396 | ||
328d5752 MF |
2397 | if (split == SPLIT_NONE && |
2398 | ocfs2_rotate_requires_path_adjustment(left_path, | |
dcd0538f | 2399 | insert_cpos)) { |
dcd0538f MF |
2400 | |
2401 | /* | |
2402 | * We've rotated the tree as much as we | |
2403 | * should. The rest is up to | |
2404 | * ocfs2_insert_path() to complete, after the | |
2405 | * record insertion. We indicate this | |
2406 | * situation by returning the left path. | |
2407 | * | |
2408 | * The reason we don't adjust the records here | |
2409 | * before the record insert is that an error | |
2410 | * later might break the rule where a parent | |
2411 | * record e_cpos will reflect the actual | |
2412 | * e_cpos of the 1st nonempty record of the | |
2413 | * child list. | |
2414 | */ | |
2415 | *ret_left_path = left_path; | |
2416 | goto out_ret_path; | |
2417 | } | |
2418 | ||
2419 | start = ocfs2_find_subtree_root(inode, left_path, right_path); | |
2420 | ||
2421 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | |
2422 | start, | |
2423 | (unsigned long long) right_path->p_node[start].bh->b_blocknr, | |
2424 | right_path->p_tree_depth); | |
2425 | ||
2426 | ret = ocfs2_extend_rotate_transaction(handle, start, | |
328d5752 | 2427 | orig_credits, right_path); |
dcd0538f MF |
2428 | if (ret) { |
2429 | mlog_errno(ret); | |
2430 | goto out; | |
2431 | } | |
2432 | ||
2433 | ret = ocfs2_rotate_subtree_right(inode, handle, left_path, | |
2434 | right_path, start); | |
2435 | if (ret) { | |
2436 | mlog_errno(ret); | |
2437 | goto out; | |
2438 | } | |
2439 | ||
328d5752 MF |
2440 | if (split != SPLIT_NONE && |
2441 | ocfs2_leftmost_rec_contains(path_leaf_el(right_path), | |
2442 | insert_cpos)) { | |
2443 | /* | |
2444 | * A rotate moves the rightmost left leaf | |
2445 | * record over to the leftmost right leaf | |
2446 | * slot. If we're doing an extent split | |
2447 | * instead of a real insert, then we have to | |
2448 | * check that the extent to be split wasn't | |
2449 | * just moved over. If it was, then we can | |
2450 | * exit here, passing left_path back - | |
2451 | * ocfs2_split_extent() is smart enough to | |
2452 | * search both leaves. | |
2453 | */ | |
2454 | *ret_left_path = left_path; | |
2455 | goto out_ret_path; | |
2456 | } | |
2457 | ||
dcd0538f MF |
2458 | /* |
2459 | * There is no need to re-read the next right path | |
2460 | * as we know that it'll be our current left | |
2461 | * path. Optimize by copying values instead. | |
2462 | */ | |
2463 | ocfs2_mv_path(right_path, left_path); | |
2464 | ||
2465 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, | |
2466 | &cpos); | |
2467 | if (ret) { | |
2468 | mlog_errno(ret); | |
2469 | goto out; | |
2470 | } | |
2471 | } | |
2472 | ||
2473 | out: | |
2474 | ocfs2_free_path(left_path); | |
2475 | ||
2476 | out_ret_path: | |
2477 | return ret; | |
2478 | } | |
2479 | ||
3c5e1068 TM |
2480 | static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, |
2481 | int subtree_index, struct ocfs2_path *path) | |
dcd0538f | 2482 | { |
3c5e1068 | 2483 | int i, idx, ret; |
dcd0538f | 2484 | struct ocfs2_extent_rec *rec; |
328d5752 MF |
2485 | struct ocfs2_extent_list *el; |
2486 | struct ocfs2_extent_block *eb; | |
2487 | u32 range; | |
dcd0538f | 2488 | |
3c5e1068 TM |
2489 | /* |
2490 | * In normal tree rotation process, we will never touch the | |
2491 | * tree branch above subtree_index and ocfs2_extend_rotate_transaction | |
2492 | * doesn't reserve the credits for them either. | |
2493 | * | |
2494 | * But we do have a special case here which will update the rightmost | |
2495 | * records for all the bh in the path. | |
2496 | * So we have to allocate extra credits and access them. | |
2497 | */ | |
2498 | ret = ocfs2_extend_trans(handle, | |
2499 | handle->h_buffer_credits + subtree_index); | |
2500 | if (ret) { | |
2501 | mlog_errno(ret); | |
2502 | goto out; | |
2503 | } | |
2504 | ||
2505 | ret = ocfs2_journal_access_path(inode, handle, path); | |
2506 | if (ret) { | |
2507 | mlog_errno(ret); | |
2508 | goto out; | |
2509 | } | |
2510 | ||
328d5752 MF |
2511 | /* Path should always be rightmost. */ |
2512 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
2513 | BUG_ON(eb->h_next_leaf_blk != 0ULL); | |
dcd0538f | 2514 | |
328d5752 MF |
2515 | el = &eb->h_list; |
2516 | BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); | |
2517 | idx = le16_to_cpu(el->l_next_free_rec) - 1; | |
2518 | rec = &el->l_recs[idx]; | |
2519 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
dcd0538f | 2520 | |
328d5752 MF |
2521 | for (i = 0; i < path->p_tree_depth; i++) { |
2522 | el = path->p_node[i].el; | |
2523 | idx = le16_to_cpu(el->l_next_free_rec) - 1; | |
2524 | rec = &el->l_recs[idx]; | |
dcd0538f | 2525 | |
328d5752 MF |
2526 | rec->e_int_clusters = cpu_to_le32(range); |
2527 | le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); | |
dcd0538f | 2528 | |
328d5752 | 2529 | ocfs2_journal_dirty(handle, path->p_node[i].bh); |
dcd0538f | 2530 | } |
3c5e1068 TM |
2531 | out: |
2532 | return ret; | |
dcd0538f MF |
2533 | } |
2534 | ||
328d5752 MF |
2535 | static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, |
2536 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
2537 | struct ocfs2_path *path, int unlink_start) | |
dcd0538f | 2538 | { |
328d5752 MF |
2539 | int ret, i; |
2540 | struct ocfs2_extent_block *eb; | |
2541 | struct ocfs2_extent_list *el; | |
2542 | struct buffer_head *bh; | |
2543 | ||
2544 | for(i = unlink_start; i < path_num_items(path); i++) { | |
2545 | bh = path->p_node[i].bh; | |
2546 | ||
2547 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
2548 | /* | |
2549 | * Not all nodes might have had their final count | |
2550 | * decremented by the caller - handle this here. | |
2551 | */ | |
2552 | el = &eb->h_list; | |
2553 | if (le16_to_cpu(el->l_next_free_rec) > 1) { | |
2554 | mlog(ML_ERROR, | |
2555 | "Inode %llu, attempted to remove extent block " | |
2556 | "%llu with %u records\n", | |
2557 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
2558 | (unsigned long long)le64_to_cpu(eb->h_blkno), | |
2559 | le16_to_cpu(el->l_next_free_rec)); | |
2560 | ||
2561 | ocfs2_journal_dirty(handle, bh); | |
2562 | ocfs2_remove_from_cache(inode, bh); | |
2563 | continue; | |
2564 | } | |
2565 | ||
2566 | el->l_next_free_rec = 0; | |
2567 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
2568 | ||
2569 | ocfs2_journal_dirty(handle, bh); | |
2570 | ||
2571 | ret = ocfs2_cache_extent_block_free(dealloc, eb); | |
2572 | if (ret) | |
2573 | mlog_errno(ret); | |
2574 | ||
2575 | ocfs2_remove_from_cache(inode, bh); | |
2576 | } | |
dcd0538f MF |
2577 | } |
2578 | ||
328d5752 MF |
2579 | static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle, |
2580 | struct ocfs2_path *left_path, | |
2581 | struct ocfs2_path *right_path, | |
2582 | int subtree_index, | |
2583 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
dcd0538f | 2584 | { |
328d5752 MF |
2585 | int i; |
2586 | struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; | |
2587 | struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el; | |
dcd0538f | 2588 | struct ocfs2_extent_list *el; |
328d5752 | 2589 | struct ocfs2_extent_block *eb; |
dcd0538f | 2590 | |
328d5752 | 2591 | el = path_leaf_el(left_path); |
dcd0538f | 2592 | |
328d5752 | 2593 | eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; |
e48edee2 | 2594 | |
328d5752 MF |
2595 | for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) |
2596 | if (root_el->l_recs[i].e_blkno == eb->h_blkno) | |
2597 | break; | |
dcd0538f | 2598 | |
328d5752 | 2599 | BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec)); |
dcd0538f | 2600 | |
328d5752 MF |
2601 | memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); |
2602 | le16_add_cpu(&root_el->l_next_free_rec, -1); | |
2603 | ||
2604 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
2605 | eb->h_next_leaf_blk = 0; | |
2606 | ||
2607 | ocfs2_journal_dirty(handle, root_bh); | |
2608 | ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
2609 | ||
2610 | ocfs2_unlink_path(inode, handle, dealloc, right_path, | |
2611 | subtree_index + 1); | |
2612 | } | |
2613 | ||
2614 | static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, | |
2615 | struct ocfs2_path *left_path, | |
2616 | struct ocfs2_path *right_path, | |
2617 | int subtree_index, | |
2618 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
e7d4cb6b TM |
2619 | int *deleted, |
2620 | struct ocfs2_extent_tree *et) | |
328d5752 MF |
2621 | { |
2622 | int ret, i, del_right_subtree = 0, right_has_empty = 0; | |
e7d4cb6b | 2623 | struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path); |
328d5752 MF |
2624 | struct ocfs2_extent_list *right_leaf_el, *left_leaf_el; |
2625 | struct ocfs2_extent_block *eb; | |
2626 | ||
2627 | *deleted = 0; | |
2628 | ||
2629 | right_leaf_el = path_leaf_el(right_path); | |
2630 | left_leaf_el = path_leaf_el(left_path); | |
2631 | root_bh = left_path->p_node[subtree_index].bh; | |
2632 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
2633 | ||
2634 | if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0])) | |
2635 | return 0; | |
dcd0538f | 2636 | |
328d5752 MF |
2637 | eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; |
2638 | if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) { | |
dcd0538f | 2639 | /* |
328d5752 MF |
2640 | * It's legal for us to proceed if the right leaf is |
2641 | * the rightmost one and it has an empty extent. There | |
2642 | * are two cases to handle - whether the leaf will be | |
2643 | * empty after removal or not. If the leaf isn't empty | |
2644 | * then just remove the empty extent up front. The | |
2645 | * next block will handle empty leaves by flagging | |
2646 | * them for unlink. | |
2647 | * | |
2648 | * Non rightmost leaves will throw -EAGAIN and the | |
2649 | * caller can manually move the subtree and retry. | |
dcd0538f | 2650 | */ |
dcd0538f | 2651 | |
328d5752 MF |
2652 | if (eb->h_next_leaf_blk != 0ULL) |
2653 | return -EAGAIN; | |
2654 | ||
2655 | if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { | |
13723d00 JB |
2656 | ret = ocfs2_journal_access_eb(handle, inode, |
2657 | path_leaf_bh(right_path), | |
2658 | OCFS2_JOURNAL_ACCESS_WRITE); | |
dcd0538f MF |
2659 | if (ret) { |
2660 | mlog_errno(ret); | |
2661 | goto out; | |
2662 | } | |
2663 | ||
328d5752 MF |
2664 | ocfs2_remove_empty_extent(right_leaf_el); |
2665 | } else | |
2666 | right_has_empty = 1; | |
dcd0538f MF |
2667 | } |
2668 | ||
328d5752 MF |
2669 | if (eb->h_next_leaf_blk == 0ULL && |
2670 | le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) { | |
2671 | /* | |
2672 | * We have to update i_last_eb_blk during the meta | |
2673 | * data delete. | |
2674 | */ | |
13723d00 JB |
2675 | ret = ocfs2_et_root_journal_access(handle, inode, et, |
2676 | OCFS2_JOURNAL_ACCESS_WRITE); | |
328d5752 MF |
2677 | if (ret) { |
2678 | mlog_errno(ret); | |
2679 | goto out; | |
2680 | } | |
2681 | ||
2682 | del_right_subtree = 1; | |
2683 | } | |
2684 | ||
2685 | /* | |
2686 | * Getting here with an empty extent in the right path implies | |
2687 | * that it's the rightmost path and will be deleted. | |
2688 | */ | |
2689 | BUG_ON(right_has_empty && !del_right_subtree); | |
2690 | ||
13723d00 JB |
2691 | ret = ocfs2_path_bh_journal_access(handle, inode, right_path, |
2692 | subtree_index); | |
328d5752 MF |
2693 | if (ret) { |
2694 | mlog_errno(ret); | |
2695 | goto out; | |
2696 | } | |
2697 | ||
2698 | for(i = subtree_index + 1; i < path_num_items(right_path); i++) { | |
13723d00 JB |
2699 | ret = ocfs2_path_bh_journal_access(handle, inode, |
2700 | right_path, i); | |
328d5752 MF |
2701 | if (ret) { |
2702 | mlog_errno(ret); | |
2703 | goto out; | |
2704 | } | |
2705 | ||
13723d00 JB |
2706 | ret = ocfs2_path_bh_journal_access(handle, inode, |
2707 | left_path, i); | |
328d5752 MF |
2708 | if (ret) { |
2709 | mlog_errno(ret); | |
2710 | goto out; | |
2711 | } | |
2712 | } | |
2713 | ||
2714 | if (!right_has_empty) { | |
2715 | /* | |
2716 | * Only do this if we're moving a real | |
2717 | * record. Otherwise, the action is delayed until | |
2718 | * after removal of the right path in which case we | |
2719 | * can do a simple shift to remove the empty extent. | |
2720 | */ | |
2721 | ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]); | |
2722 | memset(&right_leaf_el->l_recs[0], 0, | |
2723 | sizeof(struct ocfs2_extent_rec)); | |
2724 | } | |
2725 | if (eb->h_next_leaf_blk == 0ULL) { | |
2726 | /* | |
2727 | * Move recs over to get rid of empty extent, decrease | |
2728 | * next_free. This is allowed to remove the last | |
2729 | * extent in our leaf (setting l_next_free_rec to | |
2730 | * zero) - the delete code below won't care. | |
2731 | */ | |
2732 | ocfs2_remove_empty_extent(right_leaf_el); | |
2733 | } | |
2734 | ||
2735 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
2736 | if (ret) | |
2737 | mlog_errno(ret); | |
2738 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); | |
2739 | if (ret) | |
2740 | mlog_errno(ret); | |
2741 | ||
2742 | if (del_right_subtree) { | |
2743 | ocfs2_unlink_subtree(inode, handle, left_path, right_path, | |
2744 | subtree_index, dealloc); | |
3c5e1068 TM |
2745 | ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, |
2746 | left_path); | |
2747 | if (ret) { | |
2748 | mlog_errno(ret); | |
2749 | goto out; | |
2750 | } | |
328d5752 MF |
2751 | |
2752 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
35dc0aa3 | 2753 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); |
328d5752 MF |
2754 | |
2755 | /* | |
2756 | * Removal of the extent in the left leaf was skipped | |
2757 | * above so we could delete the right path | |
2758 | * 1st. | |
2759 | */ | |
2760 | if (right_has_empty) | |
2761 | ocfs2_remove_empty_extent(left_leaf_el); | |
2762 | ||
e7d4cb6b | 2763 | ret = ocfs2_journal_dirty(handle, et_root_bh); |
328d5752 MF |
2764 | if (ret) |
2765 | mlog_errno(ret); | |
2766 | ||
2767 | *deleted = 1; | |
2768 | } else | |
2769 | ocfs2_complete_edge_insert(inode, handle, left_path, right_path, | |
2770 | subtree_index); | |
2771 | ||
2772 | out: | |
2773 | return ret; | |
2774 | } | |
2775 | ||
2776 | /* | |
2777 | * Given a full path, determine what cpos value would return us a path | |
2778 | * containing the leaf immediately to the right of the current one. | |
2779 | * | |
2780 | * Will return zero if the path passed in is already the rightmost path. | |
2781 | * | |
2782 | * This looks similar, but is subtly different to | |
2783 | * ocfs2_find_cpos_for_left_leaf(). | |
2784 | */ | |
2785 | static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | |
2786 | struct ocfs2_path *path, u32 *cpos) | |
2787 | { | |
2788 | int i, j, ret = 0; | |
2789 | u64 blkno; | |
2790 | struct ocfs2_extent_list *el; | |
2791 | ||
2792 | *cpos = 0; | |
2793 | ||
2794 | if (path->p_tree_depth == 0) | |
2795 | return 0; | |
2796 | ||
2797 | blkno = path_leaf_bh(path)->b_blocknr; | |
2798 | ||
2799 | /* Start at the tree node just above the leaf and work our way up. */ | |
2800 | i = path->p_tree_depth - 1; | |
2801 | while (i >= 0) { | |
2802 | int next_free; | |
2803 | ||
2804 | el = path->p_node[i].el; | |
2805 | ||
2806 | /* | |
2807 | * Find the extent record just after the one in our | |
2808 | * path. | |
2809 | */ | |
2810 | next_free = le16_to_cpu(el->l_next_free_rec); | |
2811 | for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { | |
2812 | if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { | |
2813 | if (j == (next_free - 1)) { | |
2814 | if (i == 0) { | |
2815 | /* | |
2816 | * We've determined that the | |
2817 | * path specified is already | |
2818 | * the rightmost one - return a | |
2819 | * cpos of zero. | |
2820 | */ | |
2821 | goto out; | |
2822 | } | |
2823 | /* | |
2824 | * The rightmost record points to our | |
2825 | * leaf - we need to travel up the | |
2826 | * tree one level. | |
2827 | */ | |
2828 | goto next_node; | |
2829 | } | |
2830 | ||
2831 | *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos); | |
2832 | goto out; | |
2833 | } | |
2834 | } | |
2835 | ||
2836 | /* | |
2837 | * If we got here, we never found a valid node where | |
2838 | * the tree indicated one should be. | |
2839 | */ | |
2840 | ocfs2_error(sb, | |
2841 | "Invalid extent tree at extent block %llu\n", | |
2842 | (unsigned long long)blkno); | |
2843 | ret = -EROFS; | |
2844 | goto out; | |
2845 | ||
2846 | next_node: | |
2847 | blkno = path->p_node[i].bh->b_blocknr; | |
2848 | i--; | |
2849 | } | |
2850 | ||
2851 | out: | |
2852 | return ret; | |
2853 | } | |
2854 | ||
2855 | static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, | |
2856 | handle_t *handle, | |
13723d00 | 2857 | struct ocfs2_path *path) |
328d5752 MF |
2858 | { |
2859 | int ret; | |
13723d00 JB |
2860 | struct buffer_head *bh = path_leaf_bh(path); |
2861 | struct ocfs2_extent_list *el = path_leaf_el(path); | |
328d5752 MF |
2862 | |
2863 | if (!ocfs2_is_empty_extent(&el->l_recs[0])) | |
2864 | return 0; | |
2865 | ||
13723d00 JB |
2866 | ret = ocfs2_path_bh_journal_access(handle, inode, path, |
2867 | path_num_items(path) - 1); | |
328d5752 MF |
2868 | if (ret) { |
2869 | mlog_errno(ret); | |
2870 | goto out; | |
2871 | } | |
2872 | ||
2873 | ocfs2_remove_empty_extent(el); | |
2874 | ||
2875 | ret = ocfs2_journal_dirty(handle, bh); | |
2876 | if (ret) | |
2877 | mlog_errno(ret); | |
2878 | ||
2879 | out: | |
2880 | return ret; | |
2881 | } | |
2882 | ||
2883 | static int __ocfs2_rotate_tree_left(struct inode *inode, | |
2884 | handle_t *handle, int orig_credits, | |
2885 | struct ocfs2_path *path, | |
2886 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
e7d4cb6b TM |
2887 | struct ocfs2_path **empty_extent_path, |
2888 | struct ocfs2_extent_tree *et) | |
328d5752 MF |
2889 | { |
2890 | int ret, subtree_root, deleted; | |
2891 | u32 right_cpos; | |
2892 | struct ocfs2_path *left_path = NULL; | |
2893 | struct ocfs2_path *right_path = NULL; | |
2894 | ||
2895 | BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); | |
2896 | ||
2897 | *empty_extent_path = NULL; | |
2898 | ||
2899 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path, | |
2900 | &right_cpos); | |
2901 | if (ret) { | |
2902 | mlog_errno(ret); | |
2903 | goto out; | |
2904 | } | |
2905 | ||
ffdd7a54 | 2906 | left_path = ocfs2_new_path_from_path(path); |
328d5752 MF |
2907 | if (!left_path) { |
2908 | ret = -ENOMEM; | |
2909 | mlog_errno(ret); | |
2910 | goto out; | |
2911 | } | |
2912 | ||
2913 | ocfs2_cp_path(left_path, path); | |
2914 | ||
ffdd7a54 | 2915 | right_path = ocfs2_new_path_from_path(path); |
328d5752 MF |
2916 | if (!right_path) { |
2917 | ret = -ENOMEM; | |
2918 | mlog_errno(ret); | |
2919 | goto out; | |
2920 | } | |
2921 | ||
2922 | while (right_cpos) { | |
2923 | ret = ocfs2_find_path(inode, right_path, right_cpos); | |
2924 | if (ret) { | |
2925 | mlog_errno(ret); | |
2926 | goto out; | |
2927 | } | |
2928 | ||
2929 | subtree_root = ocfs2_find_subtree_root(inode, left_path, | |
2930 | right_path); | |
2931 | ||
2932 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | |
2933 | subtree_root, | |
2934 | (unsigned long long) | |
2935 | right_path->p_node[subtree_root].bh->b_blocknr, | |
2936 | right_path->p_tree_depth); | |
2937 | ||
2938 | ret = ocfs2_extend_rotate_transaction(handle, subtree_root, | |
2939 | orig_credits, left_path); | |
2940 | if (ret) { | |
2941 | mlog_errno(ret); | |
2942 | goto out; | |
2943 | } | |
2944 | ||
e8aed345 MF |
2945 | /* |
2946 | * Caller might still want to make changes to the | |
2947 | * tree root, so re-add it to the journal here. | |
2948 | */ | |
13723d00 JB |
2949 | ret = ocfs2_path_bh_journal_access(handle, inode, |
2950 | left_path, 0); | |
e8aed345 MF |
2951 | if (ret) { |
2952 | mlog_errno(ret); | |
2953 | goto out; | |
2954 | } | |
2955 | ||
328d5752 MF |
2956 | ret = ocfs2_rotate_subtree_left(inode, handle, left_path, |
2957 | right_path, subtree_root, | |
e7d4cb6b | 2958 | dealloc, &deleted, et); |
328d5752 MF |
2959 | if (ret == -EAGAIN) { |
2960 | /* | |
2961 | * The rotation has to temporarily stop due to | |
2962 | * the right subtree having an empty | |
2963 | * extent. Pass it back to the caller for a | |
2964 | * fixup. | |
2965 | */ | |
2966 | *empty_extent_path = right_path; | |
2967 | right_path = NULL; | |
2968 | goto out; | |
2969 | } | |
2970 | if (ret) { | |
2971 | mlog_errno(ret); | |
2972 | goto out; | |
2973 | } | |
2974 | ||
2975 | /* | |
2976 | * The subtree rotate might have removed records on | |
2977 | * the rightmost edge. If so, then rotation is | |
2978 | * complete. | |
2979 | */ | |
2980 | if (deleted) | |
2981 | break; | |
2982 | ||
2983 | ocfs2_mv_path(left_path, right_path); | |
2984 | ||
2985 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, | |
2986 | &right_cpos); | |
2987 | if (ret) { | |
2988 | mlog_errno(ret); | |
2989 | goto out; | |
2990 | } | |
2991 | } | |
2992 | ||
2993 | out: | |
2994 | ocfs2_free_path(right_path); | |
2995 | ocfs2_free_path(left_path); | |
2996 | ||
2997 | return ret; | |
2998 | } | |
2999 | ||
3000 | static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, | |
e7d4cb6b TM |
3001 | struct ocfs2_path *path, |
3002 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
3003 | struct ocfs2_extent_tree *et) | |
328d5752 MF |
3004 | { |
3005 | int ret, subtree_index; | |
3006 | u32 cpos; | |
3007 | struct ocfs2_path *left_path = NULL; | |
328d5752 MF |
3008 | struct ocfs2_extent_block *eb; |
3009 | struct ocfs2_extent_list *el; | |
3010 | ||
328d5752 | 3011 | |
35dc0aa3 | 3012 | ret = ocfs2_et_sanity_check(inode, et); |
e7d4cb6b TM |
3013 | if (ret) |
3014 | goto out; | |
328d5752 MF |
3015 | /* |
3016 | * There's two ways we handle this depending on | |
3017 | * whether path is the only existing one. | |
3018 | */ | |
3019 | ret = ocfs2_extend_rotate_transaction(handle, 0, | |
3020 | handle->h_buffer_credits, | |
3021 | path); | |
3022 | if (ret) { | |
3023 | mlog_errno(ret); | |
3024 | goto out; | |
3025 | } | |
3026 | ||
3027 | ret = ocfs2_journal_access_path(inode, handle, path); | |
3028 | if (ret) { | |
3029 | mlog_errno(ret); | |
3030 | goto out; | |
3031 | } | |
3032 | ||
3033 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); | |
3034 | if (ret) { | |
3035 | mlog_errno(ret); | |
3036 | goto out; | |
3037 | } | |
3038 | ||
3039 | if (cpos) { | |
3040 | /* | |
3041 | * We have a path to the left of this one - it needs | |
3042 | * an update too. | |
3043 | */ | |
ffdd7a54 | 3044 | left_path = ocfs2_new_path_from_path(path); |
328d5752 MF |
3045 | if (!left_path) { |
3046 | ret = -ENOMEM; | |
3047 | mlog_errno(ret); | |
3048 | goto out; | |
3049 | } | |
3050 | ||
3051 | ret = ocfs2_find_path(inode, left_path, cpos); | |
3052 | if (ret) { | |
3053 | mlog_errno(ret); | |
3054 | goto out; | |
3055 | } | |
3056 | ||
3057 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
3058 | if (ret) { | |
3059 | mlog_errno(ret); | |
3060 | goto out; | |
3061 | } | |
3062 | ||
3063 | subtree_index = ocfs2_find_subtree_root(inode, left_path, path); | |
3064 | ||
3065 | ocfs2_unlink_subtree(inode, handle, left_path, path, | |
3066 | subtree_index, dealloc); | |
3c5e1068 TM |
3067 | ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, |
3068 | left_path); | |
3069 | if (ret) { | |
3070 | mlog_errno(ret); | |
3071 | goto out; | |
3072 | } | |
328d5752 MF |
3073 | |
3074 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
35dc0aa3 | 3075 | ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); |
328d5752 MF |
3076 | } else { |
3077 | /* | |
3078 | * 'path' is also the leftmost path which | |
3079 | * means it must be the only one. This gets | |
3080 | * handled differently because we want to | |
3081 | * revert the inode back to having extents | |
3082 | * in-line. | |
3083 | */ | |
3084 | ocfs2_unlink_path(inode, handle, dealloc, path, 1); | |
3085 | ||
ce1d9ea6 | 3086 | el = et->et_root_el; |
328d5752 MF |
3087 | el->l_tree_depth = 0; |
3088 | el->l_next_free_rec = 0; | |
3089 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
3090 | ||
35dc0aa3 | 3091 | ocfs2_et_set_last_eb_blk(et, 0); |
328d5752 MF |
3092 | } |
3093 | ||
3094 | ocfs2_journal_dirty(handle, path_root_bh(path)); | |
3095 | ||
3096 | out: | |
3097 | ocfs2_free_path(left_path); | |
3098 | return ret; | |
3099 | } | |
3100 | ||
3101 | /* | |
3102 | * Left rotation of btree records. | |
3103 | * | |
3104 | * In many ways, this is (unsurprisingly) the opposite of right | |
3105 | * rotation. We start at some non-rightmost path containing an empty | |
3106 | * extent in the leaf block. The code works its way to the rightmost | |
3107 | * path by rotating records to the left in every subtree. | |
3108 | * | |
3109 | * This is used by any code which reduces the number of extent records | |
3110 | * in a leaf. After removal, an empty record should be placed in the | |
3111 | * leftmost list position. | |
3112 | * | |
3113 | * This won't handle a length update of the rightmost path records if | |
3114 | * the rightmost tree leaf record is removed so the caller is | |
3115 | * responsible for detecting and correcting that. | |
3116 | */ | |
3117 | static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle, | |
3118 | struct ocfs2_path *path, | |
e7d4cb6b TM |
3119 | struct ocfs2_cached_dealloc_ctxt *dealloc, |
3120 | struct ocfs2_extent_tree *et) | |
328d5752 MF |
3121 | { |
3122 | int ret, orig_credits = handle->h_buffer_credits; | |
3123 | struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; | |
3124 | struct ocfs2_extent_block *eb; | |
3125 | struct ocfs2_extent_list *el; | |
3126 | ||
3127 | el = path_leaf_el(path); | |
3128 | if (!ocfs2_is_empty_extent(&el->l_recs[0])) | |
3129 | return 0; | |
3130 | ||
3131 | if (path->p_tree_depth == 0) { | |
3132 | rightmost_no_delete: | |
3133 | /* | |
e7d4cb6b | 3134 | * Inline extents. This is trivially handled, so do |
328d5752 MF |
3135 | * it up front. |
3136 | */ | |
3137 | ret = ocfs2_rotate_rightmost_leaf_left(inode, handle, | |
13723d00 | 3138 | path); |
328d5752 MF |
3139 | if (ret) |
3140 | mlog_errno(ret); | |
3141 | goto out; | |
3142 | } | |
3143 | ||
3144 | /* | |
3145 | * Handle rightmost branch now. There's several cases: | |
3146 | * 1) simple rotation leaving records in there. That's trivial. | |
3147 | * 2) rotation requiring a branch delete - there's no more | |
3148 | * records left. Two cases of this: | |
3149 | * a) There are branches to the left. | |
3150 | * b) This is also the leftmost (the only) branch. | |
3151 | * | |
3152 | * 1) is handled via ocfs2_rotate_rightmost_leaf_left() | |
3153 | * 2a) we need the left branch so that we can update it with the unlink | |
3154 | * 2b) we need to bring the inode back to inline extents. | |
3155 | */ | |
3156 | ||
3157 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
3158 | el = &eb->h_list; | |
3159 | if (eb->h_next_leaf_blk == 0) { | |
3160 | /* | |
3161 | * This gets a bit tricky if we're going to delete the | |
3162 | * rightmost path. Get the other cases out of the way | |
3163 | * 1st. | |
3164 | */ | |
3165 | if (le16_to_cpu(el->l_next_free_rec) > 1) | |
3166 | goto rightmost_no_delete; | |
3167 | ||
3168 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
3169 | ret = -EIO; | |
3170 | ocfs2_error(inode->i_sb, | |
3171 | "Inode %llu has empty extent block at %llu", | |
3172 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
3173 | (unsigned long long)le64_to_cpu(eb->h_blkno)); | |
3174 | goto out; | |
3175 | } | |
3176 | ||
3177 | /* | |
3178 | * XXX: The caller can not trust "path" any more after | |
3179 | * this as it will have been deleted. What do we do? | |
3180 | * | |
3181 | * In theory the rotate-for-merge code will never get | |
3182 | * here because it'll always ask for a rotate in a | |
3183 | * nonempty list. | |
3184 | */ | |
3185 | ||
3186 | ret = ocfs2_remove_rightmost_path(inode, handle, path, | |
e7d4cb6b | 3187 | dealloc, et); |
328d5752 MF |
3188 | if (ret) |
3189 | mlog_errno(ret); | |
3190 | goto out; | |
3191 | } | |
3192 | ||
3193 | /* | |
3194 | * Now we can loop, remembering the path we get from -EAGAIN | |
3195 | * and restarting from there. | |
3196 | */ | |
3197 | try_rotate: | |
3198 | ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path, | |
e7d4cb6b | 3199 | dealloc, &restart_path, et); |
328d5752 MF |
3200 | if (ret && ret != -EAGAIN) { |
3201 | mlog_errno(ret); | |
3202 | goto out; | |
3203 | } | |
3204 | ||
3205 | while (ret == -EAGAIN) { | |
3206 | tmp_path = restart_path; | |
3207 | restart_path = NULL; | |
3208 | ||
3209 | ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, | |
3210 | tmp_path, dealloc, | |
e7d4cb6b | 3211 | &restart_path, et); |
328d5752 MF |
3212 | if (ret && ret != -EAGAIN) { |
3213 | mlog_errno(ret); | |
3214 | goto out; | |
3215 | } | |
3216 | ||
3217 | ocfs2_free_path(tmp_path); | |
3218 | tmp_path = NULL; | |
3219 | ||
3220 | if (ret == 0) | |
3221 | goto try_rotate; | |
3222 | } | |
3223 | ||
3224 | out: | |
3225 | ocfs2_free_path(tmp_path); | |
3226 | ocfs2_free_path(restart_path); | |
3227 | return ret; | |
3228 | } | |
3229 | ||
3230 | static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, | |
3231 | int index) | |
3232 | { | |
3233 | struct ocfs2_extent_rec *rec = &el->l_recs[index]; | |
3234 | unsigned int size; | |
3235 | ||
3236 | if (rec->e_leaf_clusters == 0) { | |
3237 | /* | |
3238 | * We consumed all of the merged-from record. An empty | |
3239 | * extent cannot exist anywhere but the 1st array | |
3240 | * position, so move things over if the merged-from | |
3241 | * record doesn't occupy that position. | |
3242 | * | |
3243 | * This creates a new empty extent so the caller | |
3244 | * should be smart enough to have removed any existing | |
3245 | * ones. | |
3246 | */ | |
3247 | if (index > 0) { | |
3248 | BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); | |
3249 | size = index * sizeof(struct ocfs2_extent_rec); | |
3250 | memmove(&el->l_recs[1], &el->l_recs[0], size); | |
3251 | } | |
3252 | ||
3253 | /* | |
3254 | * Always memset - the caller doesn't check whether it | |
3255 | * created an empty extent, so there could be junk in | |
3256 | * the other fields. | |
3257 | */ | |
3258 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
3259 | } | |
3260 | } | |
3261 | ||
677b9752 TM |
3262 | static int ocfs2_get_right_path(struct inode *inode, |
3263 | struct ocfs2_path *left_path, | |
3264 | struct ocfs2_path **ret_right_path) | |
3265 | { | |
3266 | int ret; | |
3267 | u32 right_cpos; | |
3268 | struct ocfs2_path *right_path = NULL; | |
3269 | struct ocfs2_extent_list *left_el; | |
3270 | ||
3271 | *ret_right_path = NULL; | |
3272 | ||
3273 | /* This function shouldn't be called for non-trees. */ | |
3274 | BUG_ON(left_path->p_tree_depth == 0); | |
3275 | ||
3276 | left_el = path_leaf_el(left_path); | |
3277 | BUG_ON(left_el->l_next_free_rec != left_el->l_count); | |
3278 | ||
3279 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, | |
3280 | &right_cpos); | |
3281 | if (ret) { | |
3282 | mlog_errno(ret); | |
3283 | goto out; | |
3284 | } | |
3285 | ||
3286 | /* This function shouldn't be called for the rightmost leaf. */ | |
3287 | BUG_ON(right_cpos == 0); | |
3288 | ||
ffdd7a54 | 3289 | right_path = ocfs2_new_path_from_path(left_path); |
677b9752 TM |
3290 | if (!right_path) { |
3291 | ret = -ENOMEM; | |
3292 | mlog_errno(ret); | |
3293 | goto out; | |
3294 | } | |
3295 | ||
3296 | ret = ocfs2_find_path(inode, right_path, right_cpos); | |
3297 | if (ret) { | |
3298 | mlog_errno(ret); | |
3299 | goto out; | |
3300 | } | |
3301 | ||
3302 | *ret_right_path = right_path; | |
3303 | out: | |
3304 | if (ret) | |
3305 | ocfs2_free_path(right_path); | |
3306 | return ret; | |
3307 | } | |
3308 | ||
328d5752 MF |
3309 | /* |
3310 | * Remove split_rec clusters from the record at index and merge them | |
677b9752 TM |
3311 | * onto the beginning of the record "next" to it. |
3312 | * For index < l_count - 1, the next means the extent rec at index + 1. | |
3313 | * For index == l_count - 1, the "next" means the 1st extent rec of the | |
3314 | * next extent block. | |
328d5752 | 3315 | */ |
677b9752 TM |
3316 | static int ocfs2_merge_rec_right(struct inode *inode, |
3317 | struct ocfs2_path *left_path, | |
3318 | handle_t *handle, | |
3319 | struct ocfs2_extent_rec *split_rec, | |
3320 | int index) | |
328d5752 | 3321 | { |
677b9752 | 3322 | int ret, next_free, i; |
328d5752 MF |
3323 | unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); |
3324 | struct ocfs2_extent_rec *left_rec; | |
3325 | struct ocfs2_extent_rec *right_rec; | |
677b9752 TM |
3326 | struct ocfs2_extent_list *right_el; |
3327 | struct ocfs2_path *right_path = NULL; | |
3328 | int subtree_index = 0; | |
3329 | struct ocfs2_extent_list *el = path_leaf_el(left_path); | |
3330 | struct buffer_head *bh = path_leaf_bh(left_path); | |
3331 | struct buffer_head *root_bh = NULL; | |
328d5752 MF |
3332 | |
3333 | BUG_ON(index >= le16_to_cpu(el->l_next_free_rec)); | |
328d5752 | 3334 | left_rec = &el->l_recs[index]; |
677b9752 | 3335 | |
9d8df6aa | 3336 | if (index == le16_to_cpu(el->l_next_free_rec) - 1 && |
677b9752 TM |
3337 | le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { |
3338 | /* we meet with a cross extent block merge. */ | |
3339 | ret = ocfs2_get_right_path(inode, left_path, &right_path); | |
3340 | if (ret) { | |
3341 | mlog_errno(ret); | |
3342 | goto out; | |
3343 | } | |
3344 | ||
3345 | right_el = path_leaf_el(right_path); | |
3346 | next_free = le16_to_cpu(right_el->l_next_free_rec); | |
3347 | BUG_ON(next_free <= 0); | |
3348 | right_rec = &right_el->l_recs[0]; | |
3349 | if (ocfs2_is_empty_extent(right_rec)) { | |
9d8df6aa | 3350 | BUG_ON(next_free <= 1); |
677b9752 TM |
3351 | right_rec = &right_el->l_recs[1]; |
3352 | } | |
3353 | ||
3354 | BUG_ON(le32_to_cpu(left_rec->e_cpos) + | |
3355 | le16_to_cpu(left_rec->e_leaf_clusters) != | |
3356 | le32_to_cpu(right_rec->e_cpos)); | |
3357 | ||
3358 | subtree_index = ocfs2_find_subtree_root(inode, | |
3359 | left_path, right_path); | |
3360 | ||
3361 | ret = ocfs2_extend_rotate_transaction(handle, subtree_index, | |
3362 | handle->h_buffer_credits, | |
3363 | right_path); | |
3364 | if (ret) { | |
3365 | mlog_errno(ret); | |
3366 | goto out; | |
3367 | } | |
3368 | ||
3369 | root_bh = left_path->p_node[subtree_index].bh; | |
3370 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
3371 | ||
13723d00 JB |
3372 | ret = ocfs2_path_bh_journal_access(handle, inode, right_path, |
3373 | subtree_index); | |
677b9752 TM |
3374 | if (ret) { |
3375 | mlog_errno(ret); | |
3376 | goto out; | |
3377 | } | |
3378 | ||
3379 | for (i = subtree_index + 1; | |
3380 | i < path_num_items(right_path); i++) { | |
13723d00 JB |
3381 | ret = ocfs2_path_bh_journal_access(handle, inode, |
3382 | right_path, i); | |
677b9752 TM |
3383 | if (ret) { |
3384 | mlog_errno(ret); | |
3385 | goto out; | |
3386 | } | |
3387 | ||
13723d00 JB |
3388 | ret = ocfs2_path_bh_journal_access(handle, inode, |
3389 | left_path, i); | |
677b9752 TM |
3390 | if (ret) { |
3391 | mlog_errno(ret); | |
3392 | goto out; | |
3393 | } | |
3394 | } | |
3395 | ||
3396 | } else { | |
3397 | BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1); | |
3398 | right_rec = &el->l_recs[index + 1]; | |
3399 | } | |
328d5752 | 3400 | |
13723d00 JB |
3401 | ret = ocfs2_path_bh_journal_access(handle, inode, left_path, |
3402 | path_num_items(left_path) - 1); | |
328d5752 MF |
3403 | if (ret) { |
3404 | mlog_errno(ret); | |
3405 | goto out; | |
3406 | } | |
3407 | ||
3408 | le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters); | |
3409 | ||
3410 | le32_add_cpu(&right_rec->e_cpos, -split_clusters); | |
3411 | le64_add_cpu(&right_rec->e_blkno, | |
3412 | -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); | |
3413 | le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); | |
3414 | ||
3415 | ocfs2_cleanup_merge(el, index); | |
3416 | ||
3417 | ret = ocfs2_journal_dirty(handle, bh); | |
3418 | if (ret) | |
3419 | mlog_errno(ret); | |
3420 | ||
677b9752 TM |
3421 | if (right_path) { |
3422 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); | |
3423 | if (ret) | |
3424 | mlog_errno(ret); | |
3425 | ||
3426 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
3427 | right_path, subtree_index); | |
3428 | } | |
3429 | out: | |
3430 | if (right_path) | |
3431 | ocfs2_free_path(right_path); | |
3432 | return ret; | |
3433 | } | |
3434 | ||
3435 | static int ocfs2_get_left_path(struct inode *inode, | |
3436 | struct ocfs2_path *right_path, | |
3437 | struct ocfs2_path **ret_left_path) | |
3438 | { | |
3439 | int ret; | |
3440 | u32 left_cpos; | |
3441 | struct ocfs2_path *left_path = NULL; | |
3442 | ||
3443 | *ret_left_path = NULL; | |
3444 | ||
3445 | /* This function shouldn't be called for non-trees. */ | |
3446 | BUG_ON(right_path->p_tree_depth == 0); | |
3447 | ||
3448 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, | |
3449 | right_path, &left_cpos); | |
3450 | if (ret) { | |
3451 | mlog_errno(ret); | |
3452 | goto out; | |
3453 | } | |
3454 | ||
3455 | /* This function shouldn't be called for the leftmost leaf. */ | |
3456 | BUG_ON(left_cpos == 0); | |
3457 | ||
ffdd7a54 | 3458 | left_path = ocfs2_new_path_from_path(right_path); |
677b9752 TM |
3459 | if (!left_path) { |
3460 | ret = -ENOMEM; | |
3461 | mlog_errno(ret); | |
3462 | goto out; | |
3463 | } | |
3464 | ||
3465 | ret = ocfs2_find_path(inode, left_path, left_cpos); | |
3466 | if (ret) { | |
3467 | mlog_errno(ret); | |
3468 | goto out; | |
3469 | } | |
3470 | ||
3471 | *ret_left_path = left_path; | |
328d5752 | 3472 | out: |
677b9752 TM |
3473 | if (ret) |
3474 | ocfs2_free_path(left_path); | |
328d5752 MF |
3475 | return ret; |
3476 | } | |
3477 | ||
3478 | /* | |
3479 | * Remove split_rec clusters from the record at index and merge them | |
677b9752 TM |
3480 | * onto the tail of the record "before" it. |
3481 | * For index > 0, the "before" means the extent rec at index - 1. | |
3482 | * | |
3483 | * For index == 0, the "before" means the last record of the previous | |
3484 | * extent block. And there is also a situation that we may need to | |
3485 | * remove the rightmost leaf extent block in the right_path and change | |
3486 | * the right path to indicate the new rightmost path. | |
328d5752 | 3487 | */ |
677b9752 TM |
3488 | static int ocfs2_merge_rec_left(struct inode *inode, |
3489 | struct ocfs2_path *right_path, | |
328d5752 MF |
3490 | handle_t *handle, |
3491 | struct ocfs2_extent_rec *split_rec, | |
677b9752 | 3492 | struct ocfs2_cached_dealloc_ctxt *dealloc, |
e7d4cb6b | 3493 | struct ocfs2_extent_tree *et, |
677b9752 | 3494 | int index) |
328d5752 | 3495 | { |
677b9752 | 3496 | int ret, i, subtree_index = 0, has_empty_extent = 0; |
328d5752 MF |
3497 | unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); |
3498 | struct ocfs2_extent_rec *left_rec; | |
3499 | struct ocfs2_extent_rec *right_rec; | |
677b9752 TM |
3500 | struct ocfs2_extent_list *el = path_leaf_el(right_path); |
3501 | struct buffer_head *bh = path_leaf_bh(right_path); | |
3502 | struct buffer_head *root_bh = NULL; | |
3503 | struct ocfs2_path *left_path = NULL; | |
3504 | struct ocfs2_extent_list *left_el; | |
328d5752 | 3505 | |
677b9752 | 3506 | BUG_ON(index < 0); |
328d5752 | 3507 | |
328d5752 | 3508 | right_rec = &el->l_recs[index]; |
677b9752 TM |
3509 | if (index == 0) { |
3510 | /* we meet with a cross extent block merge. */ | |
3511 | ret = ocfs2_get_left_path(inode, right_path, &left_path); | |
3512 | if (ret) { | |
3513 | mlog_errno(ret); | |
3514 | goto out; | |
3515 | } | |
3516 | ||
3517 | left_el = path_leaf_el(left_path); | |
3518 | BUG_ON(le16_to_cpu(left_el->l_next_free_rec) != | |
3519 | le16_to_cpu(left_el->l_count)); | |
3520 | ||
3521 | left_rec = &left_el->l_recs[ | |
3522 | le16_to_cpu(left_el->l_next_free_rec) - 1]; | |
3523 | BUG_ON(le32_to_cpu(left_rec->e_cpos) + | |
3524 | le16_to_cpu(left_rec->e_leaf_clusters) != | |
3525 | le32_to_cpu(split_rec->e_cpos)); | |
3526 | ||
3527 | subtree_index = ocfs2_find_subtree_root(inode, | |
3528 | left_path, right_path); | |
3529 | ||
3530 | ret = ocfs2_extend_rotate_transaction(handle, subtree_index, | |
3531 | handle->h_buffer_credits, | |
3532 | left_path); | |
3533 | if (ret) { | |
3534 | mlog_errno(ret); | |
3535 | goto out; | |
3536 | } | |
3537 | ||
3538 | root_bh = left_path->p_node[subtree_index].bh; | |
3539 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
3540 | ||
13723d00 JB |
3541 | ret = ocfs2_path_bh_journal_access(handle, inode, right_path, |
3542 | subtree_index); | |
677b9752 TM |
3543 | if (ret) { |
3544 | mlog_errno(ret); | |
3545 | goto out; | |
3546 | } | |
3547 | ||
3548 | for (i = subtree_index + 1; | |
3549 | i < path_num_items(right_path); i++) { | |
13723d00 JB |
3550 | ret = ocfs2_path_bh_journal_access(handle, inode, |
3551 | right_path, i); | |
677b9752 TM |
3552 | if (ret) { |
3553 | mlog_errno(ret); | |
3554 | goto out; | |
3555 | } | |
3556 | ||
13723d00 JB |
3557 | ret = ocfs2_path_bh_journal_access(handle, inode, |
3558 | left_path, i); | |
677b9752 TM |
3559 | if (ret) { |
3560 | mlog_errno(ret); | |
3561 | goto out; | |
3562 | } | |
3563 | } | |
3564 | } else { | |
3565 | left_rec = &el->l_recs[index - 1]; | |
3566 | if (ocfs2_is_empty_extent(&el->l_recs[0])) | |
3567 | has_empty_extent = 1; | |
3568 | } | |
328d5752 | 3569 | |
9047beab TM |
3570 | ret = ocfs2_path_bh_journal_access(handle, inode, right_path, |
3571 | path_num_items(right_path) - 1); | |
328d5752 MF |
3572 | if (ret) { |
3573 | mlog_errno(ret); | |
3574 | goto out; | |
3575 | } | |
3576 | ||
3577 | if (has_empty_extent && index == 1) { | |
3578 | /* | |
3579 | * The easy case - we can just plop the record right in. | |
3580 | */ | |
3581 | *left_rec = *split_rec; | |
3582 | ||
3583 | has_empty_extent = 0; | |
677b9752 | 3584 | } else |
328d5752 | 3585 | le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); |
328d5752 MF |
3586 | |
3587 | le32_add_cpu(&right_rec->e_cpos, split_clusters); | |
3588 | le64_add_cpu(&right_rec->e_blkno, | |
3589 | ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); | |
3590 | le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); | |
3591 | ||
3592 | ocfs2_cleanup_merge(el, index); | |
3593 | ||
3594 | ret = ocfs2_journal_dirty(handle, bh); | |
3595 | if (ret) | |
3596 | mlog_errno(ret); | |
3597 | ||
677b9752 TM |
3598 | if (left_path) { |
3599 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
3600 | if (ret) | |
3601 | mlog_errno(ret); | |
3602 | ||
3603 | /* | |
3604 | * In the situation that the right_rec is empty and the extent | |
3605 | * block is empty also, ocfs2_complete_edge_insert can't handle | |
3606 | * it and we need to delete the right extent block. | |
3607 | */ | |
3608 | if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && | |
3609 | le16_to_cpu(el->l_next_free_rec) == 1) { | |
3610 | ||
3611 | ret = ocfs2_remove_rightmost_path(inode, handle, | |
e7d4cb6b TM |
3612 | right_path, |
3613 | dealloc, et); | |
677b9752 TM |
3614 | if (ret) { |
3615 | mlog_errno(ret); | |
3616 | goto out; | |
3617 | } | |
3618 | ||
3619 | /* Now the rightmost extent block has been deleted. | |
3620 | * So we use the new rightmost path. | |
3621 | */ | |
3622 | ocfs2_mv_path(right_path, left_path); | |
3623 | left_path = NULL; | |
3624 | } else | |
3625 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
3626 | right_path, subtree_index); | |
3627 | } | |
328d5752 | 3628 | out: |
677b9752 TM |
3629 | if (left_path) |
3630 | ocfs2_free_path(left_path); | |
328d5752 MF |
3631 | return ret; |
3632 | } | |
3633 | ||
3634 | static int ocfs2_try_to_merge_extent(struct inode *inode, | |
3635 | handle_t *handle, | |
677b9752 | 3636 | struct ocfs2_path *path, |
328d5752 MF |
3637 | int split_index, |
3638 | struct ocfs2_extent_rec *split_rec, | |
3639 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
e7d4cb6b TM |
3640 | struct ocfs2_merge_ctxt *ctxt, |
3641 | struct ocfs2_extent_tree *et) | |
328d5752 MF |
3642 | |
3643 | { | |
518d7269 | 3644 | int ret = 0; |
677b9752 | 3645 | struct ocfs2_extent_list *el = path_leaf_el(path); |
328d5752 MF |
3646 | struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; |
3647 | ||
3648 | BUG_ON(ctxt->c_contig_type == CONTIG_NONE); | |
3649 | ||
518d7269 TM |
3650 | if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { |
3651 | /* | |
3652 | * The merge code will need to create an empty | |
3653 | * extent to take the place of the newly | |
3654 | * emptied slot. Remove any pre-existing empty | |
3655 | * extents - having more than one in a leaf is | |
3656 | * illegal. | |
3657 | */ | |
677b9752 | 3658 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
e7d4cb6b | 3659 | dealloc, et); |
518d7269 TM |
3660 | if (ret) { |
3661 | mlog_errno(ret); | |
3662 | goto out; | |
328d5752 | 3663 | } |
518d7269 TM |
3664 | split_index--; |
3665 | rec = &el->l_recs[split_index]; | |
328d5752 MF |
3666 | } |
3667 | ||
3668 | if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { | |
3669 | /* | |
3670 | * Left-right contig implies this. | |
3671 | */ | |
3672 | BUG_ON(!ctxt->c_split_covers_rec); | |
328d5752 MF |
3673 | |
3674 | /* | |
3675 | * Since the leftright insert always covers the entire | |
3676 | * extent, this call will delete the insert record | |
3677 | * entirely, resulting in an empty extent record added to | |
3678 | * the extent block. | |
3679 | * | |
3680 | * Since the adding of an empty extent shifts | |
3681 | * everything back to the right, there's no need to | |
3682 | * update split_index here. | |
677b9752 TM |
3683 | * |
3684 | * When the split_index is zero, we need to merge it to the | |
3685 | * prevoius extent block. It is more efficient and easier | |
3686 | * if we do merge_right first and merge_left later. | |
328d5752 | 3687 | */ |
677b9752 TM |
3688 | ret = ocfs2_merge_rec_right(inode, path, |
3689 | handle, split_rec, | |
3690 | split_index); | |
328d5752 MF |
3691 | if (ret) { |
3692 | mlog_errno(ret); | |
3693 | goto out; | |
3694 | } | |
3695 | ||
3696 | /* | |
3697 | * We can only get this from logic error above. | |
3698 | */ | |
3699 | BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); | |
3700 | ||
677b9752 | 3701 | /* The merge left us with an empty extent, remove it. */ |
e7d4cb6b TM |
3702 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
3703 | dealloc, et); | |
328d5752 MF |
3704 | if (ret) { |
3705 | mlog_errno(ret); | |
3706 | goto out; | |
3707 | } | |
677b9752 | 3708 | |
328d5752 MF |
3709 | rec = &el->l_recs[split_index]; |
3710 | ||
3711 | /* | |
3712 | * Note that we don't pass split_rec here on purpose - | |
677b9752 | 3713 | * we've merged it into the rec already. |
328d5752 | 3714 | */ |
677b9752 TM |
3715 | ret = ocfs2_merge_rec_left(inode, path, |
3716 | handle, rec, | |
e7d4cb6b | 3717 | dealloc, et, |
677b9752 TM |
3718 | split_index); |
3719 | ||
328d5752 MF |
3720 | if (ret) { |
3721 | mlog_errno(ret); | |
3722 | goto out; | |
3723 | } | |
3724 | ||
677b9752 | 3725 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
e7d4cb6b | 3726 | dealloc, et); |
328d5752 MF |
3727 | /* |
3728 | * Error from this last rotate is not critical, so | |
3729 | * print but don't bubble it up. | |
3730 | */ | |
3731 | if (ret) | |
3732 | mlog_errno(ret); | |
3733 | ret = 0; | |
3734 | } else { | |
3735 | /* | |
3736 | * Merge a record to the left or right. | |
3737 | * | |
3738 | * 'contig_type' is relative to the existing record, | |
3739 | * so for example, if we're "right contig", it's to | |
3740 | * the record on the left (hence the left merge). | |
3741 | */ | |
3742 | if (ctxt->c_contig_type == CONTIG_RIGHT) { | |
3743 | ret = ocfs2_merge_rec_left(inode, | |
677b9752 TM |
3744 | path, |
3745 | handle, split_rec, | |
e7d4cb6b | 3746 | dealloc, et, |
328d5752 MF |
3747 | split_index); |
3748 | if (ret) { | |
3749 | mlog_errno(ret); | |
3750 | goto out; | |
3751 | } | |
3752 | } else { | |
3753 | ret = ocfs2_merge_rec_right(inode, | |
677b9752 TM |
3754 | path, |
3755 | handle, split_rec, | |
328d5752 MF |
3756 | split_index); |
3757 | if (ret) { | |
3758 | mlog_errno(ret); | |
3759 | goto out; | |
3760 | } | |
3761 | } | |
3762 | ||
3763 | if (ctxt->c_split_covers_rec) { | |
3764 | /* | |
3765 | * The merge may have left an empty extent in | |
3766 | * our leaf. Try to rotate it away. | |
3767 | */ | |
677b9752 | 3768 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
e7d4cb6b | 3769 | dealloc, et); |
328d5752 MF |
3770 | if (ret) |
3771 | mlog_errno(ret); | |
3772 | ret = 0; | |
3773 | } | |
3774 | } | |
3775 | ||
3776 | out: | |
3777 | return ret; | |
3778 | } | |
3779 | ||
3780 | static void ocfs2_subtract_from_rec(struct super_block *sb, | |
3781 | enum ocfs2_split_type split, | |
3782 | struct ocfs2_extent_rec *rec, | |
3783 | struct ocfs2_extent_rec *split_rec) | |
3784 | { | |
3785 | u64 len_blocks; | |
3786 | ||
3787 | len_blocks = ocfs2_clusters_to_blocks(sb, | |
3788 | le16_to_cpu(split_rec->e_leaf_clusters)); | |
3789 | ||
3790 | if (split == SPLIT_LEFT) { | |
3791 | /* | |
3792 | * Region is on the left edge of the existing | |
3793 | * record. | |
3794 | */ | |
3795 | le32_add_cpu(&rec->e_cpos, | |
3796 | le16_to_cpu(split_rec->e_leaf_clusters)); | |
3797 | le64_add_cpu(&rec->e_blkno, len_blocks); | |
3798 | le16_add_cpu(&rec->e_leaf_clusters, | |
3799 | -le16_to_cpu(split_rec->e_leaf_clusters)); | |
3800 | } else { | |
3801 | /* | |
3802 | * Region is on the right edge of the existing | |
3803 | * record. | |
3804 | */ | |
3805 | le16_add_cpu(&rec->e_leaf_clusters, | |
3806 | -le16_to_cpu(split_rec->e_leaf_clusters)); | |
3807 | } | |
3808 | } | |
3809 | ||
3810 | /* | |
3811 | * Do the final bits of extent record insertion at the target leaf | |
3812 | * list. If this leaf is part of an allocation tree, it is assumed | |
3813 | * that the tree above has been prepared. | |
3814 | */ | |
3815 | static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, | |
3816 | struct ocfs2_extent_list *el, | |
3817 | struct ocfs2_insert_type *insert, | |
3818 | struct inode *inode) | |
3819 | { | |
3820 | int i = insert->ins_contig_index; | |
3821 | unsigned int range; | |
3822 | struct ocfs2_extent_rec *rec; | |
3823 | ||
3824 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); | |
3825 | ||
3826 | if (insert->ins_split != SPLIT_NONE) { | |
3827 | i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); | |
3828 | BUG_ON(i == -1); | |
3829 | rec = &el->l_recs[i]; | |
3830 | ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec, | |
3831 | insert_rec); | |
3832 | goto rotate; | |
3833 | } | |
3834 | ||
3835 | /* | |
3836 | * Contiguous insert - either left or right. | |
3837 | */ | |
3838 | if (insert->ins_contig != CONTIG_NONE) { | |
3839 | rec = &el->l_recs[i]; | |
3840 | if (insert->ins_contig == CONTIG_LEFT) { | |
3841 | rec->e_blkno = insert_rec->e_blkno; | |
3842 | rec->e_cpos = insert_rec->e_cpos; | |
3843 | } | |
3844 | le16_add_cpu(&rec->e_leaf_clusters, | |
3845 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3846 | return; | |
3847 | } | |
3848 | ||
3849 | /* | |
3850 | * Handle insert into an empty leaf. | |
3851 | */ | |
3852 | if (le16_to_cpu(el->l_next_free_rec) == 0 || | |
3853 | ((le16_to_cpu(el->l_next_free_rec) == 1) && | |
3854 | ocfs2_is_empty_extent(&el->l_recs[0]))) { | |
3855 | el->l_recs[0] = *insert_rec; | |
3856 | el->l_next_free_rec = cpu_to_le16(1); | |
3857 | return; | |
3858 | } | |
3859 | ||
3860 | /* | |
3861 | * Appending insert. | |
3862 | */ | |
3863 | if (insert->ins_appending == APPEND_TAIL) { | |
3864 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
3865 | rec = &el->l_recs[i]; | |
3866 | range = le32_to_cpu(rec->e_cpos) | |
3867 | + le16_to_cpu(rec->e_leaf_clusters); | |
3868 | BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range); | |
3869 | ||
3870 | mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= | |
3871 | le16_to_cpu(el->l_count), | |
3872 | "inode %lu, depth %u, count %u, next free %u, " | |
3873 | "rec.cpos %u, rec.clusters %u, " | |
3874 | "insert.cpos %u, insert.clusters %u\n", | |
3875 | inode->i_ino, | |
3876 | le16_to_cpu(el->l_tree_depth), | |
3877 | le16_to_cpu(el->l_count), | |
3878 | le16_to_cpu(el->l_next_free_rec), | |
3879 | le32_to_cpu(el->l_recs[i].e_cpos), | |
3880 | le16_to_cpu(el->l_recs[i].e_leaf_clusters), | |
3881 | le32_to_cpu(insert_rec->e_cpos), | |
3882 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3883 | i++; | |
3884 | el->l_recs[i] = *insert_rec; | |
3885 | le16_add_cpu(&el->l_next_free_rec, 1); | |
3886 | return; | |
3887 | } | |
3888 | ||
3889 | rotate: | |
3890 | /* | |
3891 | * Ok, we have to rotate. | |
3892 | * | |
3893 | * At this point, it is safe to assume that inserting into an | |
3894 | * empty leaf and appending to a leaf have both been handled | |
3895 | * above. | |
3896 | * | |
3897 | * This leaf needs to have space, either by the empty 1st | |
3898 | * extent record, or by virtue of an l_next_rec < l_count. | |
3899 | */ | |
3900 | ocfs2_rotate_leaf(el, insert_rec); | |
3901 | } | |
3902 | ||
328d5752 MF |
3903 | static void ocfs2_adjust_rightmost_records(struct inode *inode, |
3904 | handle_t *handle, | |
3905 | struct ocfs2_path *path, | |
3906 | struct ocfs2_extent_rec *insert_rec) | |
3907 | { | |
3908 | int ret, i, next_free; | |
3909 | struct buffer_head *bh; | |
3910 | struct ocfs2_extent_list *el; | |
3911 | struct ocfs2_extent_rec *rec; | |
3912 | ||
3913 | /* | |
3914 | * Update everything except the leaf block. | |
3915 | */ | |
3916 | for (i = 0; i < path->p_tree_depth; i++) { | |
3917 | bh = path->p_node[i].bh; | |
3918 | el = path->p_node[i].el; | |
3919 | ||
dcd0538f MF |
3920 | next_free = le16_to_cpu(el->l_next_free_rec); |
3921 | if (next_free == 0) { | |
3922 | ocfs2_error(inode->i_sb, | |
3923 | "Dinode %llu has a bad extent list", | |
3924 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
3925 | ret = -EIO; | |
328d5752 MF |
3926 | return; |
3927 | } | |
3928 | ||
3929 | rec = &el->l_recs[next_free - 1]; | |
3930 | ||
3931 | rec->e_int_clusters = insert_rec->e_cpos; | |
3932 | le32_add_cpu(&rec->e_int_clusters, | |
3933 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3934 | le32_add_cpu(&rec->e_int_clusters, | |
3935 | -le32_to_cpu(rec->e_cpos)); | |
3936 | ||
3937 | ret = ocfs2_journal_dirty(handle, bh); | |
3938 | if (ret) | |
3939 | mlog_errno(ret); | |
3940 | ||
3941 | } | |
3942 | } | |
3943 | ||
3944 | static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, | |
3945 | struct ocfs2_extent_rec *insert_rec, | |
3946 | struct ocfs2_path *right_path, | |
3947 | struct ocfs2_path **ret_left_path) | |
3948 | { | |
3949 | int ret, next_free; | |
3950 | struct ocfs2_extent_list *el; | |
3951 | struct ocfs2_path *left_path = NULL; | |
3952 | ||
3953 | *ret_left_path = NULL; | |
3954 | ||
3955 | /* | |
3956 | * This shouldn't happen for non-trees. The extent rec cluster | |
3957 | * count manipulation below only works for interior nodes. | |
3958 | */ | |
3959 | BUG_ON(right_path->p_tree_depth == 0); | |
3960 | ||
3961 | /* | |
3962 | * If our appending insert is at the leftmost edge of a leaf, | |
3963 | * then we might need to update the rightmost records of the | |
3964 | * neighboring path. | |
3965 | */ | |
3966 | el = path_leaf_el(right_path); | |
3967 | next_free = le16_to_cpu(el->l_next_free_rec); | |
3968 | if (next_free == 0 || | |
3969 | (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { | |
3970 | u32 left_cpos; | |
3971 | ||
3972 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, | |
3973 | &left_cpos); | |
3974 | if (ret) { | |
3975 | mlog_errno(ret); | |
dcd0538f MF |
3976 | goto out; |
3977 | } | |
3978 | ||
328d5752 MF |
3979 | mlog(0, "Append may need a left path update. cpos: %u, " |
3980 | "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), | |
3981 | left_cpos); | |
e48edee2 | 3982 | |
328d5752 MF |
3983 | /* |
3984 | * No need to worry if the append is already in the | |
3985 | * leftmost leaf. | |
3986 | */ | |
3987 | if (left_cpos) { | |
ffdd7a54 | 3988 | left_path = ocfs2_new_path_from_path(right_path); |
328d5752 MF |
3989 | if (!left_path) { |
3990 | ret = -ENOMEM; | |
3991 | mlog_errno(ret); | |
3992 | goto out; | |
3993 | } | |
dcd0538f | 3994 | |
328d5752 MF |
3995 | ret = ocfs2_find_path(inode, left_path, left_cpos); |
3996 | if (ret) { | |
3997 | mlog_errno(ret); | |
3998 | goto out; | |
3999 | } | |
dcd0538f | 4000 | |
328d5752 MF |
4001 | /* |
4002 | * ocfs2_insert_path() will pass the left_path to the | |
4003 | * journal for us. | |
4004 | */ | |
4005 | } | |
4006 | } | |
dcd0538f | 4007 | |
328d5752 MF |
4008 | ret = ocfs2_journal_access_path(inode, handle, right_path); |
4009 | if (ret) { | |
4010 | mlog_errno(ret); | |
4011 | goto out; | |
dcd0538f MF |
4012 | } |
4013 | ||
328d5752 MF |
4014 | ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec); |
4015 | ||
dcd0538f MF |
4016 | *ret_left_path = left_path; |
4017 | ret = 0; | |
4018 | out: | |
4019 | if (ret != 0) | |
4020 | ocfs2_free_path(left_path); | |
4021 | ||
4022 | return ret; | |
4023 | } | |
4024 | ||
328d5752 MF |
4025 | static void ocfs2_split_record(struct inode *inode, |
4026 | struct ocfs2_path *left_path, | |
4027 | struct ocfs2_path *right_path, | |
4028 | struct ocfs2_extent_rec *split_rec, | |
4029 | enum ocfs2_split_type split) | |
4030 | { | |
4031 | int index; | |
4032 | u32 cpos = le32_to_cpu(split_rec->e_cpos); | |
4033 | struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; | |
4034 | struct ocfs2_extent_rec *rec, *tmprec; | |
4035 | ||
c19a28e1 | 4036 | right_el = path_leaf_el(right_path); |
328d5752 MF |
4037 | if (left_path) |
4038 | left_el = path_leaf_el(left_path); | |
4039 | ||
4040 | el = right_el; | |
4041 | insert_el = right_el; | |
4042 | index = ocfs2_search_extent_list(el, cpos); | |
4043 | if (index != -1) { | |
4044 | if (index == 0 && left_path) { | |
4045 | BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); | |
4046 | ||
4047 | /* | |
4048 | * This typically means that the record | |
4049 | * started in the left path but moved to the | |
4050 | * right as a result of rotation. We either | |
4051 | * move the existing record to the left, or we | |
4052 | * do the later insert there. | |
4053 | * | |
4054 | * In this case, the left path should always | |
4055 | * exist as the rotate code will have passed | |
4056 | * it back for a post-insert update. | |
4057 | */ | |
4058 | ||
4059 | if (split == SPLIT_LEFT) { | |
4060 | /* | |
4061 | * It's a left split. Since we know | |
4062 | * that the rotate code gave us an | |
4063 | * empty extent in the left path, we | |
4064 | * can just do the insert there. | |
4065 | */ | |
4066 | insert_el = left_el; | |
4067 | } else { | |
4068 | /* | |
4069 | * Right split - we have to move the | |
4070 | * existing record over to the left | |
4071 | * leaf. The insert will be into the | |
4072 | * newly created empty extent in the | |
4073 | * right leaf. | |
4074 | */ | |
4075 | tmprec = &right_el->l_recs[index]; | |
4076 | ocfs2_rotate_leaf(left_el, tmprec); | |
4077 | el = left_el; | |
4078 | ||
4079 | memset(tmprec, 0, sizeof(*tmprec)); | |
4080 | index = ocfs2_search_extent_list(left_el, cpos); | |
4081 | BUG_ON(index == -1); | |
4082 | } | |
4083 | } | |
4084 | } else { | |
4085 | BUG_ON(!left_path); | |
4086 | BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0])); | |
4087 | /* | |
4088 | * Left path is easy - we can just allow the insert to | |
4089 | * happen. | |
4090 | */ | |
4091 | el = left_el; | |
4092 | insert_el = left_el; | |
4093 | index = ocfs2_search_extent_list(el, cpos); | |
4094 | BUG_ON(index == -1); | |
4095 | } | |
4096 | ||
4097 | rec = &el->l_recs[index]; | |
4098 | ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec); | |
4099 | ocfs2_rotate_leaf(insert_el, split_rec); | |
4100 | } | |
4101 | ||
dcd0538f | 4102 | /* |
e7d4cb6b TM |
4103 | * This function only does inserts on an allocation b-tree. For tree |
4104 | * depth = 0, ocfs2_insert_at_leaf() is called directly. | |
dcd0538f MF |
4105 | * |
4106 | * right_path is the path we want to do the actual insert | |
4107 | * in. left_path should only be passed in if we need to update that | |
4108 | * portion of the tree after an edge insert. | |
4109 | */ | |
4110 | static int ocfs2_insert_path(struct inode *inode, | |
4111 | handle_t *handle, | |
4112 | struct ocfs2_path *left_path, | |
4113 | struct ocfs2_path *right_path, | |
4114 | struct ocfs2_extent_rec *insert_rec, | |
4115 | struct ocfs2_insert_type *insert) | |
4116 | { | |
4117 | int ret, subtree_index; | |
4118 | struct buffer_head *leaf_bh = path_leaf_bh(right_path); | |
dcd0538f | 4119 | |
dcd0538f MF |
4120 | if (left_path) { |
4121 | int credits = handle->h_buffer_credits; | |
4122 | ||
4123 | /* | |
4124 | * There's a chance that left_path got passed back to | |
4125 | * us without being accounted for in the | |
4126 | * journal. Extend our transaction here to be sure we | |
4127 | * can change those blocks. | |
4128 | */ | |
4129 | credits += left_path->p_tree_depth; | |
4130 | ||
4131 | ret = ocfs2_extend_trans(handle, credits); | |
4132 | if (ret < 0) { | |
4133 | mlog_errno(ret); | |
4134 | goto out; | |
4135 | } | |
4136 | ||
4137 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
4138 | if (ret < 0) { | |
4139 | mlog_errno(ret); | |
4140 | goto out; | |
4141 | } | |
4142 | } | |
4143 | ||
e8aed345 MF |
4144 | /* |
4145 | * Pass both paths to the journal. The majority of inserts | |
4146 | * will be touching all components anyway. | |
4147 | */ | |
4148 | ret = ocfs2_journal_access_path(inode, handle, right_path); | |
4149 | if (ret < 0) { | |
4150 | mlog_errno(ret); | |
4151 | goto out; | |
4152 | } | |
4153 | ||
328d5752 MF |
4154 | if (insert->ins_split != SPLIT_NONE) { |
4155 | /* | |
4156 | * We could call ocfs2_insert_at_leaf() for some types | |
c78bad11 | 4157 | * of splits, but it's easier to just let one separate |
328d5752 MF |
4158 | * function sort it all out. |
4159 | */ | |
4160 | ocfs2_split_record(inode, left_path, right_path, | |
4161 | insert_rec, insert->ins_split); | |
e8aed345 MF |
4162 | |
4163 | /* | |
4164 | * Split might have modified either leaf and we don't | |
4165 | * have a guarantee that the later edge insert will | |
4166 | * dirty this for us. | |
4167 | */ | |
4168 | if (left_path) | |
4169 | ret = ocfs2_journal_dirty(handle, | |
4170 | path_leaf_bh(left_path)); | |
4171 | if (ret) | |
4172 | mlog_errno(ret); | |
328d5752 MF |
4173 | } else |
4174 | ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path), | |
4175 | insert, inode); | |
dcd0538f | 4176 | |
dcd0538f MF |
4177 | ret = ocfs2_journal_dirty(handle, leaf_bh); |
4178 | if (ret) | |
4179 | mlog_errno(ret); | |
4180 | ||
4181 | if (left_path) { | |
4182 | /* | |
4183 | * The rotate code has indicated that we need to fix | |
4184 | * up portions of the tree after the insert. | |
4185 | * | |
4186 | * XXX: Should we extend the transaction here? | |
4187 | */ | |
4188 | subtree_index = ocfs2_find_subtree_root(inode, left_path, | |
4189 | right_path); | |
4190 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
4191 | right_path, subtree_index); | |
4192 | } | |
4193 | ||
4194 | ret = 0; | |
4195 | out: | |
4196 | return ret; | |
4197 | } | |
4198 | ||
4199 | static int ocfs2_do_insert_extent(struct inode *inode, | |
4200 | handle_t *handle, | |
e7d4cb6b | 4201 | struct ocfs2_extent_tree *et, |
dcd0538f MF |
4202 | struct ocfs2_extent_rec *insert_rec, |
4203 | struct ocfs2_insert_type *type) | |
4204 | { | |
4205 | int ret, rotate = 0; | |
4206 | u32 cpos; | |
4207 | struct ocfs2_path *right_path = NULL; | |
4208 | struct ocfs2_path *left_path = NULL; | |
dcd0538f MF |
4209 | struct ocfs2_extent_list *el; |
4210 | ||
ce1d9ea6 | 4211 | el = et->et_root_el; |
dcd0538f | 4212 | |
13723d00 JB |
4213 | ret = ocfs2_et_root_journal_access(handle, inode, et, |
4214 | OCFS2_JOURNAL_ACCESS_WRITE); | |
dcd0538f MF |
4215 | if (ret) { |
4216 | mlog_errno(ret); | |
4217 | goto out; | |
4218 | } | |
4219 | ||
4220 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
4221 | ocfs2_insert_at_leaf(insert_rec, el, type, inode); | |
4222 | goto out_update_clusters; | |
4223 | } | |
4224 | ||
ffdd7a54 | 4225 | right_path = ocfs2_new_path_from_et(et); |
dcd0538f MF |
4226 | if (!right_path) { |
4227 | ret = -ENOMEM; | |
4228 | mlog_errno(ret); | |
4229 | goto out; | |
4230 | } | |
4231 | ||
4232 | /* | |
4233 | * Determine the path to start with. Rotations need the | |
4234 | * rightmost path, everything else can go directly to the | |
4235 | * target leaf. | |
4236 | */ | |
4237 | cpos = le32_to_cpu(insert_rec->e_cpos); | |
4238 | if (type->ins_appending == APPEND_NONE && | |
4239 | type->ins_contig == CONTIG_NONE) { | |
4240 | rotate = 1; | |
4241 | cpos = UINT_MAX; | |
4242 | } | |
4243 | ||
4244 | ret = ocfs2_find_path(inode, right_path, cpos); | |
4245 | if (ret) { | |
4246 | mlog_errno(ret); | |
4247 | goto out; | |
4248 | } | |
4249 | ||
4250 | /* | |
4251 | * Rotations and appends need special treatment - they modify | |
4252 | * parts of the tree's above them. | |
4253 | * | |
4254 | * Both might pass back a path immediate to the left of the | |
4255 | * one being inserted to. This will be cause | |
4256 | * ocfs2_insert_path() to modify the rightmost records of | |
4257 | * left_path to account for an edge insert. | |
4258 | * | |
4259 | * XXX: When modifying this code, keep in mind that an insert | |
4260 | * can wind up skipping both of these two special cases... | |
4261 | */ | |
4262 | if (rotate) { | |
328d5752 | 4263 | ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split, |
dcd0538f MF |
4264 | le32_to_cpu(insert_rec->e_cpos), |
4265 | right_path, &left_path); | |
4266 | if (ret) { | |
4267 | mlog_errno(ret); | |
4268 | goto out; | |
4269 | } | |
e8aed345 MF |
4270 | |
4271 | /* | |
4272 | * ocfs2_rotate_tree_right() might have extended the | |
4273 | * transaction without re-journaling our tree root. | |
4274 | */ | |
13723d00 JB |
4275 | ret = ocfs2_et_root_journal_access(handle, inode, et, |
4276 | OCFS2_JOURNAL_ACCESS_WRITE); | |
e8aed345 MF |
4277 | if (ret) { |
4278 | mlog_errno(ret); | |
4279 | goto out; | |
4280 | } | |
dcd0538f MF |
4281 | } else if (type->ins_appending == APPEND_TAIL |
4282 | && type->ins_contig != CONTIG_LEFT) { | |
4283 | ret = ocfs2_append_rec_to_path(inode, handle, insert_rec, | |
4284 | right_path, &left_path); | |
4285 | if (ret) { | |
4286 | mlog_errno(ret); | |
4287 | goto out; | |
4288 | } | |
4289 | } | |
4290 | ||
4291 | ret = ocfs2_insert_path(inode, handle, left_path, right_path, | |
4292 | insert_rec, type); | |
4293 | if (ret) { | |
4294 | mlog_errno(ret); | |
4295 | goto out; | |
4296 | } | |
4297 | ||
4298 | out_update_clusters: | |
328d5752 | 4299 | if (type->ins_split == SPLIT_NONE) |
35dc0aa3 JB |
4300 | ocfs2_et_update_clusters(inode, et, |
4301 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
dcd0538f | 4302 | |
ce1d9ea6 | 4303 | ret = ocfs2_journal_dirty(handle, et->et_root_bh); |
dcd0538f MF |
4304 | if (ret) |
4305 | mlog_errno(ret); | |
4306 | ||
4307 | out: | |
4308 | ocfs2_free_path(left_path); | |
4309 | ocfs2_free_path(right_path); | |
4310 | ||
4311 | return ret; | |
4312 | } | |
4313 | ||
328d5752 | 4314 | static enum ocfs2_contig_type |
ad5a4d70 | 4315 | ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, |
328d5752 MF |
4316 | struct ocfs2_extent_list *el, int index, |
4317 | struct ocfs2_extent_rec *split_rec) | |
4318 | { | |
ad5a4d70 | 4319 | int status; |
328d5752 | 4320 | enum ocfs2_contig_type ret = CONTIG_NONE; |
ad5a4d70 TM |
4321 | u32 left_cpos, right_cpos; |
4322 | struct ocfs2_extent_rec *rec = NULL; | |
4323 | struct ocfs2_extent_list *new_el; | |
4324 | struct ocfs2_path *left_path = NULL, *right_path = NULL; | |
4325 | struct buffer_head *bh; | |
4326 | struct ocfs2_extent_block *eb; | |
4327 | ||
4328 | if (index > 0) { | |
4329 | rec = &el->l_recs[index - 1]; | |
4330 | } else if (path->p_tree_depth > 0) { | |
4331 | status = ocfs2_find_cpos_for_left_leaf(inode->i_sb, | |
4332 | path, &left_cpos); | |
4333 | if (status) | |
4334 | goto out; | |
4335 | ||
4336 | if (left_cpos != 0) { | |
ffdd7a54 | 4337 | left_path = ocfs2_new_path_from_path(path); |
ad5a4d70 TM |
4338 | if (!left_path) |
4339 | goto out; | |
4340 | ||
4341 | status = ocfs2_find_path(inode, left_path, left_cpos); | |
4342 | if (status) | |
4343 | goto out; | |
4344 | ||
4345 | new_el = path_leaf_el(left_path); | |
4346 | ||
4347 | if (le16_to_cpu(new_el->l_next_free_rec) != | |
4348 | le16_to_cpu(new_el->l_count)) { | |
4349 | bh = path_leaf_bh(left_path); | |
4350 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
5e96581a JB |
4351 | ocfs2_error(inode->i_sb, |
4352 | "Extent block #%llu has an " | |
4353 | "invalid l_next_free_rec of " | |
4354 | "%d. It should have " | |
4355 | "matched the l_count of %d", | |
4356 | (unsigned long long)le64_to_cpu(eb->h_blkno), | |
4357 | le16_to_cpu(new_el->l_next_free_rec), | |
4358 | le16_to_cpu(new_el->l_count)); | |
4359 | status = -EINVAL; | |
ad5a4d70 TM |
4360 | goto out; |
4361 | } | |
4362 | rec = &new_el->l_recs[ | |
4363 | le16_to_cpu(new_el->l_next_free_rec) - 1]; | |
4364 | } | |
4365 | } | |
328d5752 MF |
4366 | |
4367 | /* | |
4368 | * We're careful to check for an empty extent record here - | |
4369 | * the merge code will know what to do if it sees one. | |
4370 | */ | |
ad5a4d70 | 4371 | if (rec) { |
328d5752 MF |
4372 | if (index == 1 && ocfs2_is_empty_extent(rec)) { |
4373 | if (split_rec->e_cpos == el->l_recs[index].e_cpos) | |
4374 | ret = CONTIG_RIGHT; | |
4375 | } else { | |
4376 | ret = ocfs2_extent_contig(inode, rec, split_rec); | |
4377 | } | |
4378 | } | |
4379 | ||
ad5a4d70 TM |
4380 | rec = NULL; |
4381 | if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) | |
4382 | rec = &el->l_recs[index + 1]; | |
4383 | else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && | |
4384 | path->p_tree_depth > 0) { | |
4385 | status = ocfs2_find_cpos_for_right_leaf(inode->i_sb, | |
4386 | path, &right_cpos); | |
4387 | if (status) | |
4388 | goto out; | |
4389 | ||
4390 | if (right_cpos == 0) | |
4391 | goto out; | |
4392 | ||
ffdd7a54 | 4393 | right_path = ocfs2_new_path_from_path(path); |
ad5a4d70 TM |
4394 | if (!right_path) |
4395 | goto out; | |
4396 | ||
4397 | status = ocfs2_find_path(inode, right_path, right_cpos); | |
4398 | if (status) | |
4399 | goto out; | |
4400 | ||
4401 | new_el = path_leaf_el(right_path); | |
4402 | rec = &new_el->l_recs[0]; | |
4403 | if (ocfs2_is_empty_extent(rec)) { | |
4404 | if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { | |
4405 | bh = path_leaf_bh(right_path); | |
4406 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
5e96581a JB |
4407 | ocfs2_error(inode->i_sb, |
4408 | "Extent block #%llu has an " | |
4409 | "invalid l_next_free_rec of %d", | |
4410 | (unsigned long long)le64_to_cpu(eb->h_blkno), | |
4411 | le16_to_cpu(new_el->l_next_free_rec)); | |
4412 | status = -EINVAL; | |
ad5a4d70 TM |
4413 | goto out; |
4414 | } | |
4415 | rec = &new_el->l_recs[1]; | |
4416 | } | |
4417 | } | |
4418 | ||
4419 | if (rec) { | |
328d5752 MF |
4420 | enum ocfs2_contig_type contig_type; |
4421 | ||
328d5752 MF |
4422 | contig_type = ocfs2_extent_contig(inode, rec, split_rec); |
4423 | ||
4424 | if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) | |
4425 | ret = CONTIG_LEFTRIGHT; | |
4426 | else if (ret == CONTIG_NONE) | |
4427 | ret = contig_type; | |
4428 | } | |
4429 | ||
ad5a4d70 TM |
4430 | out: |
4431 | if (left_path) | |
4432 | ocfs2_free_path(left_path); | |
4433 | if (right_path) | |
4434 | ocfs2_free_path(right_path); | |
4435 | ||
328d5752 MF |
4436 | return ret; |
4437 | } | |
4438 | ||
dcd0538f MF |
4439 | static void ocfs2_figure_contig_type(struct inode *inode, |
4440 | struct ocfs2_insert_type *insert, | |
4441 | struct ocfs2_extent_list *el, | |
ca12b7c4 TM |
4442 | struct ocfs2_extent_rec *insert_rec, |
4443 | struct ocfs2_extent_tree *et) | |
dcd0538f MF |
4444 | { |
4445 | int i; | |
4446 | enum ocfs2_contig_type contig_type = CONTIG_NONE; | |
4447 | ||
e48edee2 MF |
4448 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
4449 | ||
dcd0538f MF |
4450 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { |
4451 | contig_type = ocfs2_extent_contig(inode, &el->l_recs[i], | |
4452 | insert_rec); | |
4453 | if (contig_type != CONTIG_NONE) { | |
4454 | insert->ins_contig_index = i; | |
4455 | break; | |
4456 | } | |
4457 | } | |
4458 | insert->ins_contig = contig_type; | |
ca12b7c4 TM |
4459 | |
4460 | if (insert->ins_contig != CONTIG_NONE) { | |
4461 | struct ocfs2_extent_rec *rec = | |
4462 | &el->l_recs[insert->ins_contig_index]; | |
4463 | unsigned int len = le16_to_cpu(rec->e_leaf_clusters) + | |
4464 | le16_to_cpu(insert_rec->e_leaf_clusters); | |
4465 | ||
4466 | /* | |
4467 | * Caller might want us to limit the size of extents, don't | |
4468 | * calculate contiguousness if we might exceed that limit. | |
4469 | */ | |
ce1d9ea6 JB |
4470 | if (et->et_max_leaf_clusters && |
4471 | (len > et->et_max_leaf_clusters)) | |
ca12b7c4 TM |
4472 | insert->ins_contig = CONTIG_NONE; |
4473 | } | |
dcd0538f MF |
4474 | } |
4475 | ||
4476 | /* | |
4477 | * This should only be called against the righmost leaf extent list. | |
4478 | * | |
4479 | * ocfs2_figure_appending_type() will figure out whether we'll have to | |
4480 | * insert at the tail of the rightmost leaf. | |
4481 | * | |
e7d4cb6b TM |
4482 | * This should also work against the root extent list for tree's with 0 |
4483 | * depth. If we consider the root extent list to be the rightmost leaf node | |
dcd0538f MF |
4484 | * then the logic here makes sense. |
4485 | */ | |
4486 | static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert, | |
4487 | struct ocfs2_extent_list *el, | |
4488 | struct ocfs2_extent_rec *insert_rec) | |
4489 | { | |
4490 | int i; | |
4491 | u32 cpos = le32_to_cpu(insert_rec->e_cpos); | |
4492 | struct ocfs2_extent_rec *rec; | |
4493 | ||
4494 | insert->ins_appending = APPEND_NONE; | |
4495 | ||
e48edee2 | 4496 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
dcd0538f MF |
4497 | |
4498 | if (!el->l_next_free_rec) | |
4499 | goto set_tail_append; | |
4500 | ||
4501 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
4502 | /* Were all records empty? */ | |
4503 | if (le16_to_cpu(el->l_next_free_rec) == 1) | |
4504 | goto set_tail_append; | |
4505 | } | |
4506 | ||
4507 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
4508 | rec = &el->l_recs[i]; | |
4509 | ||
e48edee2 MF |
4510 | if (cpos >= |
4511 | (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters))) | |
dcd0538f MF |
4512 | goto set_tail_append; |
4513 | ||
4514 | return; | |
4515 | ||
4516 | set_tail_append: | |
4517 | insert->ins_appending = APPEND_TAIL; | |
4518 | } | |
4519 | ||
4520 | /* | |
4521 | * Helper function called at the begining of an insert. | |
4522 | * | |
4523 | * This computes a few things that are commonly used in the process of | |
4524 | * inserting into the btree: | |
4525 | * - Whether the new extent is contiguous with an existing one. | |
4526 | * - The current tree depth. | |
4527 | * - Whether the insert is an appending one. | |
4528 | * - The total # of free records in the tree. | |
4529 | * | |
4530 | * All of the information is stored on the ocfs2_insert_type | |
4531 | * structure. | |
4532 | */ | |
4533 | static int ocfs2_figure_insert_type(struct inode *inode, | |
e7d4cb6b | 4534 | struct ocfs2_extent_tree *et, |
dcd0538f MF |
4535 | struct buffer_head **last_eb_bh, |
4536 | struct ocfs2_extent_rec *insert_rec, | |
c77534f6 | 4537 | int *free_records, |
dcd0538f MF |
4538 | struct ocfs2_insert_type *insert) |
4539 | { | |
4540 | int ret; | |
dcd0538f MF |
4541 | struct ocfs2_extent_block *eb; |
4542 | struct ocfs2_extent_list *el; | |
4543 | struct ocfs2_path *path = NULL; | |
4544 | struct buffer_head *bh = NULL; | |
4545 | ||
328d5752 MF |
4546 | insert->ins_split = SPLIT_NONE; |
4547 | ||
ce1d9ea6 | 4548 | el = et->et_root_el; |
dcd0538f MF |
4549 | insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth); |
4550 | ||
4551 | if (el->l_tree_depth) { | |
4552 | /* | |
4553 | * If we have tree depth, we read in the | |
4554 | * rightmost extent block ahead of time as | |
4555 | * ocfs2_figure_insert_type() and ocfs2_add_branch() | |
4556 | * may want it later. | |
4557 | */ | |
5e96581a JB |
4558 | ret = ocfs2_read_extent_block(inode, |
4559 | ocfs2_et_get_last_eb_blk(et), | |
4560 | &bh); | |
dcd0538f MF |
4561 | if (ret) { |
4562 | mlog_exit(ret); | |
4563 | goto out; | |
4564 | } | |
ccd979bd | 4565 | eb = (struct ocfs2_extent_block *) bh->b_data; |
ccd979bd | 4566 | el = &eb->h_list; |
dcd0538f | 4567 | } |
ccd979bd | 4568 | |
dcd0538f MF |
4569 | /* |
4570 | * Unless we have a contiguous insert, we'll need to know if | |
4571 | * there is room left in our allocation tree for another | |
4572 | * extent record. | |
4573 | * | |
4574 | * XXX: This test is simplistic, we can search for empty | |
4575 | * extent records too. | |
4576 | */ | |
c77534f6 | 4577 | *free_records = le16_to_cpu(el->l_count) - |
dcd0538f MF |
4578 | le16_to_cpu(el->l_next_free_rec); |
4579 | ||
4580 | if (!insert->ins_tree_depth) { | |
ca12b7c4 | 4581 | ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); |
dcd0538f MF |
4582 | ocfs2_figure_appending_type(insert, el, insert_rec); |
4583 | return 0; | |
ccd979bd MF |
4584 | } |
4585 | ||
ffdd7a54 | 4586 | path = ocfs2_new_path_from_et(et); |
dcd0538f MF |
4587 | if (!path) { |
4588 | ret = -ENOMEM; | |
4589 | mlog_errno(ret); | |
4590 | goto out; | |
4591 | } | |
ccd979bd | 4592 | |
dcd0538f MF |
4593 | /* |
4594 | * In the case that we're inserting past what the tree | |
4595 | * currently accounts for, ocfs2_find_path() will return for | |
4596 | * us the rightmost tree path. This is accounted for below in | |
4597 | * the appending code. | |
4598 | */ | |
4599 | ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos)); | |
4600 | if (ret) { | |
4601 | mlog_errno(ret); | |
4602 | goto out; | |
4603 | } | |
ccd979bd | 4604 | |
dcd0538f MF |
4605 | el = path_leaf_el(path); |
4606 | ||
4607 | /* | |
4608 | * Now that we have the path, there's two things we want to determine: | |
4609 | * 1) Contiguousness (also set contig_index if this is so) | |
4610 | * | |
4611 | * 2) Are we doing an append? We can trivially break this up | |
4612 | * into two types of appends: simple record append, or a | |
4613 | * rotate inside the tail leaf. | |
4614 | */ | |
ca12b7c4 | 4615 | ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); |
dcd0538f MF |
4616 | |
4617 | /* | |
4618 | * The insert code isn't quite ready to deal with all cases of | |
4619 | * left contiguousness. Specifically, if it's an insert into | |
4620 | * the 1st record in a leaf, it will require the adjustment of | |
e48edee2 | 4621 | * cluster count on the last record of the path directly to it's |
dcd0538f MF |
4622 | * left. For now, just catch that case and fool the layers |
4623 | * above us. This works just fine for tree_depth == 0, which | |
4624 | * is why we allow that above. | |
4625 | */ | |
4626 | if (insert->ins_contig == CONTIG_LEFT && | |
4627 | insert->ins_contig_index == 0) | |
4628 | insert->ins_contig = CONTIG_NONE; | |
4629 | ||
4630 | /* | |
4631 | * Ok, so we can simply compare against last_eb to figure out | |
4632 | * whether the path doesn't exist. This will only happen in | |
4633 | * the case that we're doing a tail append, so maybe we can | |
4634 | * take advantage of that information somehow. | |
4635 | */ | |
35dc0aa3 | 4636 | if (ocfs2_et_get_last_eb_blk(et) == |
e7d4cb6b | 4637 | path_leaf_bh(path)->b_blocknr) { |
dcd0538f MF |
4638 | /* |
4639 | * Ok, ocfs2_find_path() returned us the rightmost | |
4640 | * tree path. This might be an appending insert. There are | |
4641 | * two cases: | |
4642 | * 1) We're doing a true append at the tail: | |
4643 | * -This might even be off the end of the leaf | |
4644 | * 2) We're "appending" by rotating in the tail | |
4645 | */ | |
4646 | ocfs2_figure_appending_type(insert, el, insert_rec); | |
4647 | } | |
4648 | ||
4649 | out: | |
4650 | ocfs2_free_path(path); | |
4651 | ||
4652 | if (ret == 0) | |
4653 | *last_eb_bh = bh; | |
4654 | else | |
4655 | brelse(bh); | |
4656 | return ret; | |
ccd979bd MF |
4657 | } |
4658 | ||
dcd0538f MF |
4659 | /* |
4660 | * Insert an extent into an inode btree. | |
4661 | * | |
4662 | * The caller needs to update fe->i_clusters | |
4663 | */ | |
f99b9b7c JB |
4664 | int ocfs2_insert_extent(struct ocfs2_super *osb, |
4665 | handle_t *handle, | |
4666 | struct inode *inode, | |
4667 | struct ocfs2_extent_tree *et, | |
4668 | u32 cpos, | |
4669 | u64 start_blk, | |
4670 | u32 new_clusters, | |
4671 | u8 flags, | |
4672 | struct ocfs2_alloc_context *meta_ac) | |
ccd979bd | 4673 | { |
c3afcbb3 | 4674 | int status; |
c77534f6 | 4675 | int uninitialized_var(free_records); |
ccd979bd | 4676 | struct buffer_head *last_eb_bh = NULL; |
dcd0538f MF |
4677 | struct ocfs2_insert_type insert = {0, }; |
4678 | struct ocfs2_extent_rec rec; | |
4679 | ||
4680 | mlog(0, "add %u clusters at position %u to inode %llu\n", | |
4681 | new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
4682 | ||
e48edee2 | 4683 | memset(&rec, 0, sizeof(rec)); |
dcd0538f MF |
4684 | rec.e_cpos = cpu_to_le32(cpos); |
4685 | rec.e_blkno = cpu_to_le64(start_blk); | |
e48edee2 | 4686 | rec.e_leaf_clusters = cpu_to_le16(new_clusters); |
2ae99a60 | 4687 | rec.e_flags = flags; |
1e61ee79 JB |
4688 | status = ocfs2_et_insert_check(inode, et, &rec); |
4689 | if (status) { | |
4690 | mlog_errno(status); | |
4691 | goto bail; | |
4692 | } | |
dcd0538f | 4693 | |
e7d4cb6b | 4694 | status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec, |
c77534f6 | 4695 | &free_records, &insert); |
dcd0538f MF |
4696 | if (status < 0) { |
4697 | mlog_errno(status); | |
4698 | goto bail; | |
ccd979bd MF |
4699 | } |
4700 | ||
dcd0538f MF |
4701 | mlog(0, "Insert.appending: %u, Insert.Contig: %u, " |
4702 | "Insert.contig_index: %d, Insert.free_records: %d, " | |
4703 | "Insert.tree_depth: %d\n", | |
4704 | insert.ins_appending, insert.ins_contig, insert.ins_contig_index, | |
c77534f6 | 4705 | free_records, insert.ins_tree_depth); |
ccd979bd | 4706 | |
c77534f6 | 4707 | if (insert.ins_contig == CONTIG_NONE && free_records == 0) { |
e7d4cb6b | 4708 | status = ocfs2_grow_tree(inode, handle, et, |
328d5752 | 4709 | &insert.ins_tree_depth, &last_eb_bh, |
c3afcbb3 MF |
4710 | meta_ac); |
4711 | if (status) { | |
ccd979bd MF |
4712 | mlog_errno(status); |
4713 | goto bail; | |
4714 | } | |
ccd979bd MF |
4715 | } |
4716 | ||
dcd0538f | 4717 | /* Finally, we can add clusters. This might rotate the tree for us. */ |
e7d4cb6b | 4718 | status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert); |
ccd979bd MF |
4719 | if (status < 0) |
4720 | mlog_errno(status); | |
f99b9b7c | 4721 | else if (et->et_ops == &ocfs2_dinode_et_ops) |
83418978 | 4722 | ocfs2_extent_map_insert_rec(inode, &rec); |
ccd979bd MF |
4723 | |
4724 | bail: | |
a81cb88b | 4725 | brelse(last_eb_bh); |
ccd979bd | 4726 | |
f56654c4 TM |
4727 | mlog_exit(status); |
4728 | return status; | |
4729 | } | |
4730 | ||
0eb8d47e TM |
4731 | /* |
4732 | * Allcate and add clusters into the extent b-tree. | |
4733 | * The new clusters(clusters_to_add) will be inserted at logical_offset. | |
f99b9b7c | 4734 | * The extent b-tree's root is specified by et, and |
0eb8d47e TM |
4735 | * it is not limited to the file storage. Any extent tree can use this |
4736 | * function if it implements the proper ocfs2_extent_tree. | |
4737 | */ | |
4738 | int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, | |
4739 | struct inode *inode, | |
4740 | u32 *logical_offset, | |
4741 | u32 clusters_to_add, | |
4742 | int mark_unwritten, | |
f99b9b7c | 4743 | struct ocfs2_extent_tree *et, |
0eb8d47e TM |
4744 | handle_t *handle, |
4745 | struct ocfs2_alloc_context *data_ac, | |
4746 | struct ocfs2_alloc_context *meta_ac, | |
f99b9b7c | 4747 | enum ocfs2_alloc_restarted *reason_ret) |
0eb8d47e TM |
4748 | { |
4749 | int status = 0; | |
4750 | int free_extents; | |
4751 | enum ocfs2_alloc_restarted reason = RESTART_NONE; | |
4752 | u32 bit_off, num_bits; | |
4753 | u64 block; | |
4754 | u8 flags = 0; | |
4755 | ||
4756 | BUG_ON(!clusters_to_add); | |
4757 | ||
4758 | if (mark_unwritten) | |
4759 | flags = OCFS2_EXT_UNWRITTEN; | |
4760 | ||
f99b9b7c | 4761 | free_extents = ocfs2_num_free_extents(osb, inode, et); |
0eb8d47e TM |
4762 | if (free_extents < 0) { |
4763 | status = free_extents; | |
4764 | mlog_errno(status); | |
4765 | goto leave; | |
4766 | } | |
4767 | ||
4768 | /* there are two cases which could cause us to EAGAIN in the | |
4769 | * we-need-more-metadata case: | |
4770 | * 1) we haven't reserved *any* | |
4771 | * 2) we are so fragmented, we've needed to add metadata too | |
4772 | * many times. */ | |
4773 | if (!free_extents && !meta_ac) { | |
4774 | mlog(0, "we haven't reserved any metadata!\n"); | |
4775 | status = -EAGAIN; | |
4776 | reason = RESTART_META; | |
4777 | goto leave; | |
4778 | } else if ((!free_extents) | |
4779 | && (ocfs2_alloc_context_bits_left(meta_ac) | |
f99b9b7c | 4780 | < ocfs2_extend_meta_needed(et->et_root_el))) { |
0eb8d47e TM |
4781 | mlog(0, "filesystem is really fragmented...\n"); |
4782 | status = -EAGAIN; | |
4783 | reason = RESTART_META; | |
4784 | goto leave; | |
4785 | } | |
4786 | ||
4787 | status = __ocfs2_claim_clusters(osb, handle, data_ac, 1, | |
4788 | clusters_to_add, &bit_off, &num_bits); | |
4789 | if (status < 0) { | |
4790 | if (status != -ENOSPC) | |
4791 | mlog_errno(status); | |
4792 | goto leave; | |
4793 | } | |
4794 | ||
4795 | BUG_ON(num_bits > clusters_to_add); | |
4796 | ||
13723d00 JB |
4797 | /* reserve our write early -- insert_extent may update the tree root */ |
4798 | status = ocfs2_et_root_journal_access(handle, inode, et, | |
4799 | OCFS2_JOURNAL_ACCESS_WRITE); | |
0eb8d47e TM |
4800 | if (status < 0) { |
4801 | mlog_errno(status); | |
4802 | goto leave; | |
4803 | } | |
4804 | ||
4805 | block = ocfs2_clusters_to_blocks(osb->sb, bit_off); | |
4806 | mlog(0, "Allocating %u clusters at block %u for inode %llu\n", | |
4807 | num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
f99b9b7c JB |
4808 | status = ocfs2_insert_extent(osb, handle, inode, et, |
4809 | *logical_offset, block, | |
4810 | num_bits, flags, meta_ac); | |
0eb8d47e TM |
4811 | if (status < 0) { |
4812 | mlog_errno(status); | |
4813 | goto leave; | |
4814 | } | |
4815 | ||
f99b9b7c | 4816 | status = ocfs2_journal_dirty(handle, et->et_root_bh); |
0eb8d47e TM |
4817 | if (status < 0) { |
4818 | mlog_errno(status); | |
4819 | goto leave; | |
4820 | } | |
4821 | ||
4822 | clusters_to_add -= num_bits; | |
4823 | *logical_offset += num_bits; | |
4824 | ||
4825 | if (clusters_to_add) { | |
4826 | mlog(0, "need to alloc once more, wanted = %u\n", | |
4827 | clusters_to_add); | |
4828 | status = -EAGAIN; | |
4829 | reason = RESTART_TRANS; | |
4830 | } | |
4831 | ||
4832 | leave: | |
4833 | mlog_exit(status); | |
4834 | if (reason_ret) | |
4835 | *reason_ret = reason; | |
4836 | return status; | |
4837 | } | |
4838 | ||
328d5752 MF |
4839 | static void ocfs2_make_right_split_rec(struct super_block *sb, |
4840 | struct ocfs2_extent_rec *split_rec, | |
4841 | u32 cpos, | |
4842 | struct ocfs2_extent_rec *rec) | |
4843 | { | |
4844 | u32 rec_cpos = le32_to_cpu(rec->e_cpos); | |
4845 | u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters); | |
4846 | ||
4847 | memset(split_rec, 0, sizeof(struct ocfs2_extent_rec)); | |
4848 | ||
4849 | split_rec->e_cpos = cpu_to_le32(cpos); | |
4850 | split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos); | |
4851 | ||
4852 | split_rec->e_blkno = rec->e_blkno; | |
4853 | le64_add_cpu(&split_rec->e_blkno, | |
4854 | ocfs2_clusters_to_blocks(sb, cpos - rec_cpos)); | |
4855 | ||
4856 | split_rec->e_flags = rec->e_flags; | |
4857 | } | |
4858 | ||
4859 | static int ocfs2_split_and_insert(struct inode *inode, | |
4860 | handle_t *handle, | |
4861 | struct ocfs2_path *path, | |
e7d4cb6b | 4862 | struct ocfs2_extent_tree *et, |
328d5752 MF |
4863 | struct buffer_head **last_eb_bh, |
4864 | int split_index, | |
4865 | struct ocfs2_extent_rec *orig_split_rec, | |
4866 | struct ocfs2_alloc_context *meta_ac) | |
4867 | { | |
4868 | int ret = 0, depth; | |
4869 | unsigned int insert_range, rec_range, do_leftright = 0; | |
4870 | struct ocfs2_extent_rec tmprec; | |
4871 | struct ocfs2_extent_list *rightmost_el; | |
4872 | struct ocfs2_extent_rec rec; | |
4873 | struct ocfs2_extent_rec split_rec = *orig_split_rec; | |
4874 | struct ocfs2_insert_type insert; | |
4875 | struct ocfs2_extent_block *eb; | |
328d5752 MF |
4876 | |
4877 | leftright: | |
4878 | /* | |
4879 | * Store a copy of the record on the stack - it might move | |
4880 | * around as the tree is manipulated below. | |
4881 | */ | |
4882 | rec = path_leaf_el(path)->l_recs[split_index]; | |
4883 | ||
ce1d9ea6 | 4884 | rightmost_el = et->et_root_el; |
328d5752 MF |
4885 | |
4886 | depth = le16_to_cpu(rightmost_el->l_tree_depth); | |
4887 | if (depth) { | |
4888 | BUG_ON(!(*last_eb_bh)); | |
4889 | eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; | |
4890 | rightmost_el = &eb->h_list; | |
4891 | } | |
4892 | ||
4893 | if (le16_to_cpu(rightmost_el->l_next_free_rec) == | |
4894 | le16_to_cpu(rightmost_el->l_count)) { | |
e7d4cb6b TM |
4895 | ret = ocfs2_grow_tree(inode, handle, et, |
4896 | &depth, last_eb_bh, meta_ac); | |
328d5752 MF |
4897 | if (ret) { |
4898 | mlog_errno(ret); | |
4899 | goto out; | |
4900 | } | |
328d5752 MF |
4901 | } |
4902 | ||
4903 | memset(&insert, 0, sizeof(struct ocfs2_insert_type)); | |
4904 | insert.ins_appending = APPEND_NONE; | |
4905 | insert.ins_contig = CONTIG_NONE; | |
328d5752 MF |
4906 | insert.ins_tree_depth = depth; |
4907 | ||
4908 | insert_range = le32_to_cpu(split_rec.e_cpos) + | |
4909 | le16_to_cpu(split_rec.e_leaf_clusters); | |
4910 | rec_range = le32_to_cpu(rec.e_cpos) + | |
4911 | le16_to_cpu(rec.e_leaf_clusters); | |
4912 | ||
4913 | if (split_rec.e_cpos == rec.e_cpos) { | |
4914 | insert.ins_split = SPLIT_LEFT; | |
4915 | } else if (insert_range == rec_range) { | |
4916 | insert.ins_split = SPLIT_RIGHT; | |
4917 | } else { | |
4918 | /* | |
4919 | * Left/right split. We fake this as a right split | |
4920 | * first and then make a second pass as a left split. | |
4921 | */ | |
4922 | insert.ins_split = SPLIT_RIGHT; | |
4923 | ||
4924 | ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range, | |
4925 | &rec); | |
4926 | ||
4927 | split_rec = tmprec; | |
4928 | ||
4929 | BUG_ON(do_leftright); | |
4930 | do_leftright = 1; | |
4931 | } | |
4932 | ||
e7d4cb6b | 4933 | ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); |
328d5752 MF |
4934 | if (ret) { |
4935 | mlog_errno(ret); | |
4936 | goto out; | |
4937 | } | |
4938 | ||
4939 | if (do_leftright == 1) { | |
4940 | u32 cpos; | |
4941 | struct ocfs2_extent_list *el; | |
4942 | ||
4943 | do_leftright++; | |
4944 | split_rec = *orig_split_rec; | |
4945 | ||
4946 | ocfs2_reinit_path(path, 1); | |
4947 | ||
4948 | cpos = le32_to_cpu(split_rec.e_cpos); | |
4949 | ret = ocfs2_find_path(inode, path, cpos); | |
4950 | if (ret) { | |
4951 | mlog_errno(ret); | |
4952 | goto out; | |
4953 | } | |
4954 | ||
4955 | el = path_leaf_el(path); | |
4956 | split_index = ocfs2_search_extent_list(el, cpos); | |
4957 | goto leftright; | |
4958 | } | |
4959 | out: | |
4960 | ||
4961 | return ret; | |
4962 | } | |
4963 | ||
47be12e4 TM |
4964 | static int ocfs2_replace_extent_rec(struct inode *inode, |
4965 | handle_t *handle, | |
4966 | struct ocfs2_path *path, | |
4967 | struct ocfs2_extent_list *el, | |
4968 | int split_index, | |
4969 | struct ocfs2_extent_rec *split_rec) | |
4970 | { | |
4971 | int ret; | |
4972 | ||
4973 | ret = ocfs2_path_bh_journal_access(handle, inode, path, | |
4974 | path_num_items(path) - 1); | |
4975 | if (ret) { | |
4976 | mlog_errno(ret); | |
4977 | goto out; | |
4978 | } | |
4979 | ||
4980 | el->l_recs[split_index] = *split_rec; | |
4981 | ||
4982 | ocfs2_journal_dirty(handle, path_leaf_bh(path)); | |
4983 | out: | |
4984 | return ret; | |
4985 | } | |
4986 | ||
328d5752 MF |
4987 | /* |
4988 | * Mark part or all of the extent record at split_index in the leaf | |
4989 | * pointed to by path as written. This removes the unwritten | |
4990 | * extent flag. | |
4991 | * | |
4992 | * Care is taken to handle contiguousness so as to not grow the tree. | |
4993 | * | |
4994 | * meta_ac is not strictly necessary - we only truly need it if growth | |
4995 | * of the tree is required. All other cases will degrade into a less | |
4996 | * optimal tree layout. | |
4997 | * | |
e7d4cb6b TM |
4998 | * last_eb_bh should be the rightmost leaf block for any extent |
4999 | * btree. Since a split may grow the tree or a merge might shrink it, | |
5000 | * the caller cannot trust the contents of that buffer after this call. | |
328d5752 MF |
5001 | * |
5002 | * This code is optimized for readability - several passes might be | |
5003 | * made over certain portions of the tree. All of those blocks will | |
5004 | * have been brought into cache (and pinned via the journal), so the | |
5005 | * extra overhead is not expressed in terms of disk reads. | |
5006 | */ | |
5007 | static int __ocfs2_mark_extent_written(struct inode *inode, | |
e7d4cb6b | 5008 | struct ocfs2_extent_tree *et, |
328d5752 MF |
5009 | handle_t *handle, |
5010 | struct ocfs2_path *path, | |
5011 | int split_index, | |
5012 | struct ocfs2_extent_rec *split_rec, | |
5013 | struct ocfs2_alloc_context *meta_ac, | |
5014 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
5015 | { | |
5016 | int ret = 0; | |
5017 | struct ocfs2_extent_list *el = path_leaf_el(path); | |
e8aed345 | 5018 | struct buffer_head *last_eb_bh = NULL; |
328d5752 MF |
5019 | struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; |
5020 | struct ocfs2_merge_ctxt ctxt; | |
5021 | struct ocfs2_extent_list *rightmost_el; | |
5022 | ||
3cf0c507 | 5023 | if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) { |
328d5752 MF |
5024 | ret = -EIO; |
5025 | mlog_errno(ret); | |
5026 | goto out; | |
5027 | } | |
5028 | ||
5029 | if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || | |
5030 | ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < | |
5031 | (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { | |
5032 | ret = -EIO; | |
5033 | mlog_errno(ret); | |
5034 | goto out; | |
5035 | } | |
5036 | ||
ad5a4d70 | 5037 | ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el, |
328d5752 MF |
5038 | split_index, |
5039 | split_rec); | |
5040 | ||
5041 | /* | |
5042 | * The core merge / split code wants to know how much room is | |
5043 | * left in this inodes allocation tree, so we pass the | |
5044 | * rightmost extent list. | |
5045 | */ | |
5046 | if (path->p_tree_depth) { | |
5047 | struct ocfs2_extent_block *eb; | |
328d5752 | 5048 | |
5e96581a JB |
5049 | ret = ocfs2_read_extent_block(inode, |
5050 | ocfs2_et_get_last_eb_blk(et), | |
5051 | &last_eb_bh); | |
328d5752 MF |
5052 | if (ret) { |
5053 | mlog_exit(ret); | |
5054 | goto out; | |
5055 | } | |
5056 | ||
5057 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
328d5752 MF |
5058 | rightmost_el = &eb->h_list; |
5059 | } else | |
5060 | rightmost_el = path_root_el(path); | |
5061 | ||
328d5752 MF |
5062 | if (rec->e_cpos == split_rec->e_cpos && |
5063 | rec->e_leaf_clusters == split_rec->e_leaf_clusters) | |
5064 | ctxt.c_split_covers_rec = 1; | |
5065 | else | |
5066 | ctxt.c_split_covers_rec = 0; | |
5067 | ||
5068 | ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); | |
5069 | ||
015452b1 MF |
5070 | mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", |
5071 | split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, | |
5072 | ctxt.c_split_covers_rec); | |
328d5752 MF |
5073 | |
5074 | if (ctxt.c_contig_type == CONTIG_NONE) { | |
5075 | if (ctxt.c_split_covers_rec) | |
47be12e4 TM |
5076 | ret = ocfs2_replace_extent_rec(inode, handle, |
5077 | path, el, | |
5078 | split_index, split_rec); | |
328d5752 | 5079 | else |
e7d4cb6b | 5080 | ret = ocfs2_split_and_insert(inode, handle, path, et, |
328d5752 MF |
5081 | &last_eb_bh, split_index, |
5082 | split_rec, meta_ac); | |
5083 | if (ret) | |
5084 | mlog_errno(ret); | |
5085 | } else { | |
5086 | ret = ocfs2_try_to_merge_extent(inode, handle, path, | |
5087 | split_index, split_rec, | |
e7d4cb6b | 5088 | dealloc, &ctxt, et); |
328d5752 MF |
5089 | if (ret) |
5090 | mlog_errno(ret); | |
5091 | } | |
5092 | ||
328d5752 MF |
5093 | out: |
5094 | brelse(last_eb_bh); | |
5095 | return ret; | |
5096 | } | |
5097 | ||
5098 | /* | |
5099 | * Mark the already-existing extent at cpos as written for len clusters. | |
5100 | * | |
5101 | * If the existing extent is larger than the request, initiate a | |
5102 | * split. An attempt will be made at merging with adjacent extents. | |
5103 | * | |
5104 | * The caller is responsible for passing down meta_ac if we'll need it. | |
5105 | */ | |
f99b9b7c JB |
5106 | int ocfs2_mark_extent_written(struct inode *inode, |
5107 | struct ocfs2_extent_tree *et, | |
328d5752 MF |
5108 | handle_t *handle, u32 cpos, u32 len, u32 phys, |
5109 | struct ocfs2_alloc_context *meta_ac, | |
f99b9b7c | 5110 | struct ocfs2_cached_dealloc_ctxt *dealloc) |
328d5752 MF |
5111 | { |
5112 | int ret, index; | |
5113 | u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys); | |
5114 | struct ocfs2_extent_rec split_rec; | |
5115 | struct ocfs2_path *left_path = NULL; | |
5116 | struct ocfs2_extent_list *el; | |
5117 | ||
5118 | mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n", | |
5119 | inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno); | |
5120 | ||
5121 | if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { | |
5122 | ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " | |
5123 | "that are being written to, but the feature bit " | |
5124 | "is not set in the super block.", | |
5125 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
5126 | ret = -EROFS; | |
5127 | goto out; | |
5128 | } | |
5129 | ||
5130 | /* | |
5131 | * XXX: This should be fixed up so that we just re-insert the | |
5132 | * next extent records. | |
f99b9b7c JB |
5133 | * |
5134 | * XXX: This is a hack on the extent tree, maybe it should be | |
5135 | * an op? | |
328d5752 | 5136 | */ |
f99b9b7c | 5137 | if (et->et_ops == &ocfs2_dinode_et_ops) |
e7d4cb6b | 5138 | ocfs2_extent_map_trunc(inode, 0); |
328d5752 | 5139 | |
ffdd7a54 | 5140 | left_path = ocfs2_new_path_from_et(et); |
328d5752 MF |
5141 | if (!left_path) { |
5142 | ret = -ENOMEM; | |
5143 | mlog_errno(ret); | |
5144 | goto out; | |
5145 | } | |
5146 | ||
5147 | ret = ocfs2_find_path(inode, left_path, cpos); | |
5148 | if (ret) { | |
5149 | mlog_errno(ret); | |
5150 | goto out; | |
5151 | } | |
5152 | el = path_leaf_el(left_path); | |
5153 | ||
5154 | index = ocfs2_search_extent_list(el, cpos); | |
5155 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
5156 | ocfs2_error(inode->i_sb, | |
5157 | "Inode %llu has an extent at cpos %u which can no " | |
5158 | "longer be found.\n", | |
5159 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); | |
5160 | ret = -EROFS; | |
5161 | goto out; | |
5162 | } | |
5163 | ||
5164 | memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); | |
5165 | split_rec.e_cpos = cpu_to_le32(cpos); | |
5166 | split_rec.e_leaf_clusters = cpu_to_le16(len); | |
5167 | split_rec.e_blkno = cpu_to_le64(start_blkno); | |
5168 | split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags; | |
5169 | split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN; | |
5170 | ||
f99b9b7c | 5171 | ret = __ocfs2_mark_extent_written(inode, et, handle, left_path, |
e7d4cb6b TM |
5172 | index, &split_rec, meta_ac, |
5173 | dealloc); | |
328d5752 MF |
5174 | if (ret) |
5175 | mlog_errno(ret); | |
5176 | ||
5177 | out: | |
5178 | ocfs2_free_path(left_path); | |
5179 | return ret; | |
5180 | } | |
5181 | ||
e7d4cb6b | 5182 | static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, |
d0c7d708 MF |
5183 | handle_t *handle, struct ocfs2_path *path, |
5184 | int index, u32 new_range, | |
5185 | struct ocfs2_alloc_context *meta_ac) | |
5186 | { | |
5187 | int ret, depth, credits = handle->h_buffer_credits; | |
d0c7d708 MF |
5188 | struct buffer_head *last_eb_bh = NULL; |
5189 | struct ocfs2_extent_block *eb; | |
5190 | struct ocfs2_extent_list *rightmost_el, *el; | |
5191 | struct ocfs2_extent_rec split_rec; | |
5192 | struct ocfs2_extent_rec *rec; | |
5193 | struct ocfs2_insert_type insert; | |
5194 | ||
5195 | /* | |
5196 | * Setup the record to split before we grow the tree. | |
5197 | */ | |
5198 | el = path_leaf_el(path); | |
5199 | rec = &el->l_recs[index]; | |
5200 | ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec); | |
5201 | ||
5202 | depth = path->p_tree_depth; | |
5203 | if (depth > 0) { | |
5e96581a JB |
5204 | ret = ocfs2_read_extent_block(inode, |
5205 | ocfs2_et_get_last_eb_blk(et), | |
5206 | &last_eb_bh); | |
d0c7d708 MF |
5207 | if (ret < 0) { |
5208 | mlog_errno(ret); | |
5209 | goto out; | |
5210 | } | |
5211 | ||
5212 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
5213 | rightmost_el = &eb->h_list; | |
5214 | } else | |
5215 | rightmost_el = path_leaf_el(path); | |
5216 | ||
811f933d | 5217 | credits += path->p_tree_depth + |
ce1d9ea6 | 5218 | ocfs2_extend_meta_needed(et->et_root_el); |
d0c7d708 MF |
5219 | ret = ocfs2_extend_trans(handle, credits); |
5220 | if (ret) { | |
5221 | mlog_errno(ret); | |
5222 | goto out; | |
5223 | } | |
5224 | ||
5225 | if (le16_to_cpu(rightmost_el->l_next_free_rec) == | |
5226 | le16_to_cpu(rightmost_el->l_count)) { | |
e7d4cb6b | 5227 | ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh, |
d0c7d708 MF |
5228 | meta_ac); |
5229 | if (ret) { | |
5230 | mlog_errno(ret); | |
5231 | goto out; | |
5232 | } | |
d0c7d708 MF |
5233 | } |
5234 | ||
5235 | memset(&insert, 0, sizeof(struct ocfs2_insert_type)); | |
5236 | insert.ins_appending = APPEND_NONE; | |
5237 | insert.ins_contig = CONTIG_NONE; | |
5238 | insert.ins_split = SPLIT_RIGHT; | |
d0c7d708 MF |
5239 | insert.ins_tree_depth = depth; |
5240 | ||
e7d4cb6b | 5241 | ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); |
d0c7d708 MF |
5242 | if (ret) |
5243 | mlog_errno(ret); | |
5244 | ||
5245 | out: | |
5246 | brelse(last_eb_bh); | |
5247 | return ret; | |
5248 | } | |
5249 | ||
5250 | static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, | |
5251 | struct ocfs2_path *path, int index, | |
5252 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
e7d4cb6b TM |
5253 | u32 cpos, u32 len, |
5254 | struct ocfs2_extent_tree *et) | |
d0c7d708 MF |
5255 | { |
5256 | int ret; | |
5257 | u32 left_cpos, rec_range, trunc_range; | |
5258 | int wants_rotate = 0, is_rightmost_tree_rec = 0; | |
5259 | struct super_block *sb = inode->i_sb; | |
5260 | struct ocfs2_path *left_path = NULL; | |
5261 | struct ocfs2_extent_list *el = path_leaf_el(path); | |
5262 | struct ocfs2_extent_rec *rec; | |
5263 | struct ocfs2_extent_block *eb; | |
5264 | ||
5265 | if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { | |
e7d4cb6b | 5266 | ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); |
d0c7d708 MF |
5267 | if (ret) { |
5268 | mlog_errno(ret); | |
5269 | goto out; | |
5270 | } | |
5271 | ||
5272 | index--; | |
5273 | } | |
5274 | ||
5275 | if (index == (le16_to_cpu(el->l_next_free_rec) - 1) && | |
5276 | path->p_tree_depth) { | |
5277 | /* | |
5278 | * Check whether this is the rightmost tree record. If | |
5279 | * we remove all of this record or part of its right | |
5280 | * edge then an update of the record lengths above it | |
5281 | * will be required. | |
5282 | */ | |
5283 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
5284 | if (eb->h_next_leaf_blk == 0) | |
5285 | is_rightmost_tree_rec = 1; | |
5286 | } | |
5287 | ||
5288 | rec = &el->l_recs[index]; | |
5289 | if (index == 0 && path->p_tree_depth && | |
5290 | le32_to_cpu(rec->e_cpos) == cpos) { | |
5291 | /* | |
5292 | * Changing the leftmost offset (via partial or whole | |
5293 | * record truncate) of an interior (or rightmost) path | |
5294 | * means we have to update the subtree that is formed | |
5295 | * by this leaf and the one to it's left. | |
5296 | * | |
5297 | * There are two cases we can skip: | |
5298 | * 1) Path is the leftmost one in our inode tree. | |
5299 | * 2) The leaf is rightmost and will be empty after | |
5300 | * we remove the extent record - the rotate code | |
5301 | * knows how to update the newly formed edge. | |
5302 | */ | |
5303 | ||
5304 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, | |
5305 | &left_cpos); | |
5306 | if (ret) { | |
5307 | mlog_errno(ret); | |
5308 | goto out; | |
5309 | } | |
5310 | ||
5311 | if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { | |
ffdd7a54 | 5312 | left_path = ocfs2_new_path_from_path(path); |
d0c7d708 MF |
5313 | if (!left_path) { |
5314 | ret = -ENOMEM; | |
5315 | mlog_errno(ret); | |
5316 | goto out; | |
5317 | } | |
5318 | ||
5319 | ret = ocfs2_find_path(inode, left_path, left_cpos); | |
5320 | if (ret) { | |
5321 | mlog_errno(ret); | |
5322 | goto out; | |
5323 | } | |
5324 | } | |
5325 | } | |
5326 | ||
5327 | ret = ocfs2_extend_rotate_transaction(handle, 0, | |
5328 | handle->h_buffer_credits, | |
5329 | path); | |
5330 | if (ret) { | |
5331 | mlog_errno(ret); | |
5332 | goto out; | |
5333 | } | |
5334 | ||
5335 | ret = ocfs2_journal_access_path(inode, handle, path); | |
5336 | if (ret) { | |
5337 | mlog_errno(ret); | |
5338 | goto out; | |
5339 | } | |
5340 | ||
5341 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
5342 | if (ret) { | |
5343 | mlog_errno(ret); | |
5344 | goto out; | |
5345 | } | |
5346 | ||
5347 | rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
5348 | trunc_range = cpos + len; | |
5349 | ||
5350 | if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) { | |
5351 | int next_free; | |
5352 | ||
5353 | memset(rec, 0, sizeof(*rec)); | |
5354 | ocfs2_cleanup_merge(el, index); | |
5355 | wants_rotate = 1; | |
5356 | ||
5357 | next_free = le16_to_cpu(el->l_next_free_rec); | |
5358 | if (is_rightmost_tree_rec && next_free > 1) { | |
5359 | /* | |
5360 | * We skip the edge update if this path will | |
5361 | * be deleted by the rotate code. | |
5362 | */ | |
5363 | rec = &el->l_recs[next_free - 1]; | |
5364 | ocfs2_adjust_rightmost_records(inode, handle, path, | |
5365 | rec); | |
5366 | } | |
5367 | } else if (le32_to_cpu(rec->e_cpos) == cpos) { | |
5368 | /* Remove leftmost portion of the record. */ | |
5369 | le32_add_cpu(&rec->e_cpos, len); | |
5370 | le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len)); | |
5371 | le16_add_cpu(&rec->e_leaf_clusters, -len); | |
5372 | } else if (rec_range == trunc_range) { | |
5373 | /* Remove rightmost portion of the record */ | |
5374 | le16_add_cpu(&rec->e_leaf_clusters, -len); | |
5375 | if (is_rightmost_tree_rec) | |
5376 | ocfs2_adjust_rightmost_records(inode, handle, path, rec); | |
5377 | } else { | |
5378 | /* Caller should have trapped this. */ | |
5379 | mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) " | |
5380 | "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
5381 | le32_to_cpu(rec->e_cpos), | |
5382 | le16_to_cpu(rec->e_leaf_clusters), cpos, len); | |
5383 | BUG(); | |
5384 | } | |
5385 | ||
5386 | if (left_path) { | |
5387 | int subtree_index; | |
5388 | ||
5389 | subtree_index = ocfs2_find_subtree_root(inode, left_path, path); | |
5390 | ocfs2_complete_edge_insert(inode, handle, left_path, path, | |
5391 | subtree_index); | |
5392 | } | |
5393 | ||
5394 | ocfs2_journal_dirty(handle, path_leaf_bh(path)); | |
5395 | ||
e7d4cb6b | 5396 | ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); |
d0c7d708 MF |
5397 | if (ret) { |
5398 | mlog_errno(ret); | |
5399 | goto out; | |
5400 | } | |
5401 | ||
5402 | out: | |
5403 | ocfs2_free_path(left_path); | |
5404 | return ret; | |
5405 | } | |
5406 | ||
f99b9b7c JB |
5407 | int ocfs2_remove_extent(struct inode *inode, |
5408 | struct ocfs2_extent_tree *et, | |
063c4561 MF |
5409 | u32 cpos, u32 len, handle_t *handle, |
5410 | struct ocfs2_alloc_context *meta_ac, | |
f99b9b7c | 5411 | struct ocfs2_cached_dealloc_ctxt *dealloc) |
d0c7d708 MF |
5412 | { |
5413 | int ret, index; | |
5414 | u32 rec_range, trunc_range; | |
5415 | struct ocfs2_extent_rec *rec; | |
5416 | struct ocfs2_extent_list *el; | |
e7d4cb6b | 5417 | struct ocfs2_path *path = NULL; |
d0c7d708 MF |
5418 | |
5419 | ocfs2_extent_map_trunc(inode, 0); | |
5420 | ||
ffdd7a54 | 5421 | path = ocfs2_new_path_from_et(et); |
d0c7d708 MF |
5422 | if (!path) { |
5423 | ret = -ENOMEM; | |
5424 | mlog_errno(ret); | |
5425 | goto out; | |
5426 | } | |
5427 | ||
5428 | ret = ocfs2_find_path(inode, path, cpos); | |
5429 | if (ret) { | |
5430 | mlog_errno(ret); | |
5431 | goto out; | |
5432 | } | |
5433 | ||
5434 | el = path_leaf_el(path); | |
5435 | index = ocfs2_search_extent_list(el, cpos); | |
5436 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
5437 | ocfs2_error(inode->i_sb, | |
5438 | "Inode %llu has an extent at cpos %u which can no " | |
5439 | "longer be found.\n", | |
5440 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); | |
5441 | ret = -EROFS; | |
5442 | goto out; | |
5443 | } | |
5444 | ||
5445 | /* | |
5446 | * We have 3 cases of extent removal: | |
5447 | * 1) Range covers the entire extent rec | |
5448 | * 2) Range begins or ends on one edge of the extent rec | |
5449 | * 3) Range is in the middle of the extent rec (no shared edges) | |
5450 | * | |
5451 | * For case 1 we remove the extent rec and left rotate to | |
5452 | * fill the hole. | |
5453 | * | |
5454 | * For case 2 we just shrink the existing extent rec, with a | |
5455 | * tree update if the shrinking edge is also the edge of an | |
5456 | * extent block. | |
5457 | * | |
5458 | * For case 3 we do a right split to turn the extent rec into | |
5459 | * something case 2 can handle. | |
5460 | */ | |
5461 | rec = &el->l_recs[index]; | |
5462 | rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
5463 | trunc_range = cpos + len; | |
5464 | ||
5465 | BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); | |
5466 | ||
5467 | mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d " | |
5468 | "(cpos %u, len %u)\n", | |
5469 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index, | |
5470 | le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); | |
5471 | ||
5472 | if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { | |
5473 | ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, | |
f99b9b7c | 5474 | cpos, len, et); |
d0c7d708 MF |
5475 | if (ret) { |
5476 | mlog_errno(ret); | |
5477 | goto out; | |
5478 | } | |
5479 | } else { | |
f99b9b7c | 5480 | ret = ocfs2_split_tree(inode, et, handle, path, index, |
d0c7d708 MF |
5481 | trunc_range, meta_ac); |
5482 | if (ret) { | |
5483 | mlog_errno(ret); | |
5484 | goto out; | |
5485 | } | |
5486 | ||
5487 | /* | |
5488 | * The split could have manipulated the tree enough to | |
5489 | * move the record location, so we have to look for it again. | |
5490 | */ | |
5491 | ocfs2_reinit_path(path, 1); | |
5492 | ||
5493 | ret = ocfs2_find_path(inode, path, cpos); | |
5494 | if (ret) { | |
5495 | mlog_errno(ret); | |
5496 | goto out; | |
5497 | } | |
5498 | ||
5499 | el = path_leaf_el(path); | |
5500 | index = ocfs2_search_extent_list(el, cpos); | |
5501 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
5502 | ocfs2_error(inode->i_sb, | |
5503 | "Inode %llu: split at cpos %u lost record.", | |
5504 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
5505 | cpos); | |
5506 | ret = -EROFS; | |
5507 | goto out; | |
5508 | } | |
5509 | ||
5510 | /* | |
5511 | * Double check our values here. If anything is fishy, | |
5512 | * it's easier to catch it at the top level. | |
5513 | */ | |
5514 | rec = &el->l_recs[index]; | |
5515 | rec_range = le32_to_cpu(rec->e_cpos) + | |
5516 | ocfs2_rec_clusters(el, rec); | |
5517 | if (rec_range != trunc_range) { | |
5518 | ocfs2_error(inode->i_sb, | |
5519 | "Inode %llu: error after split at cpos %u" | |
5520 | "trunc len %u, existing record is (%u,%u)", | |
5521 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
5522 | cpos, len, le32_to_cpu(rec->e_cpos), | |
5523 | ocfs2_rec_clusters(el, rec)); | |
5524 | ret = -EROFS; | |
5525 | goto out; | |
5526 | } | |
5527 | ||
5528 | ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, | |
f99b9b7c | 5529 | cpos, len, et); |
d0c7d708 MF |
5530 | if (ret) { |
5531 | mlog_errno(ret); | |
5532 | goto out; | |
5533 | } | |
5534 | } | |
5535 | ||
5536 | out: | |
5537 | ocfs2_free_path(path); | |
5538 | return ret; | |
5539 | } | |
5540 | ||
fecc0112 MF |
5541 | int ocfs2_remove_btree_range(struct inode *inode, |
5542 | struct ocfs2_extent_tree *et, | |
5543 | u32 cpos, u32 phys_cpos, u32 len, | |
5544 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
5545 | { | |
5546 | int ret; | |
5547 | u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); | |
5548 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
5549 | struct inode *tl_inode = osb->osb_tl_inode; | |
5550 | handle_t *handle; | |
5551 | struct ocfs2_alloc_context *meta_ac = NULL; | |
5552 | ||
5553 | ret = ocfs2_lock_allocators(inode, et, 0, 1, NULL, &meta_ac); | |
5554 | if (ret) { | |
5555 | mlog_errno(ret); | |
5556 | return ret; | |
5557 | } | |
5558 | ||
5559 | mutex_lock(&tl_inode->i_mutex); | |
5560 | ||
5561 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
5562 | ret = __ocfs2_flush_truncate_log(osb); | |
5563 | if (ret < 0) { | |
5564 | mlog_errno(ret); | |
5565 | goto out; | |
5566 | } | |
5567 | } | |
5568 | ||
a90714c1 | 5569 | handle = ocfs2_start_trans(osb, ocfs2_remove_extent_credits(osb->sb)); |
fecc0112 MF |
5570 | if (IS_ERR(handle)) { |
5571 | ret = PTR_ERR(handle); | |
5572 | mlog_errno(ret); | |
5573 | goto out; | |
5574 | } | |
5575 | ||
13723d00 JB |
5576 | ret = ocfs2_et_root_journal_access(handle, inode, et, |
5577 | OCFS2_JOURNAL_ACCESS_WRITE); | |
fecc0112 MF |
5578 | if (ret) { |
5579 | mlog_errno(ret); | |
5580 | goto out; | |
5581 | } | |
5582 | ||
fd4ef231 MF |
5583 | vfs_dq_free_space_nodirty(inode, |
5584 | ocfs2_clusters_to_bytes(inode->i_sb, len)); | |
5585 | ||
fecc0112 MF |
5586 | ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac, |
5587 | dealloc); | |
5588 | if (ret) { | |
5589 | mlog_errno(ret); | |
5590 | goto out_commit; | |
5591 | } | |
5592 | ||
5593 | ocfs2_et_update_clusters(inode, et, -len); | |
5594 | ||
5595 | ret = ocfs2_journal_dirty(handle, et->et_root_bh); | |
5596 | if (ret) { | |
5597 | mlog_errno(ret); | |
5598 | goto out_commit; | |
5599 | } | |
5600 | ||
5601 | ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); | |
5602 | if (ret) | |
5603 | mlog_errno(ret); | |
5604 | ||
5605 | out_commit: | |
5606 | ocfs2_commit_trans(osb, handle); | |
5607 | out: | |
5608 | mutex_unlock(&tl_inode->i_mutex); | |
5609 | ||
5610 | if (meta_ac) | |
5611 | ocfs2_free_alloc_context(meta_ac); | |
5612 | ||
5613 | return ret; | |
5614 | } | |
5615 | ||
063c4561 | 5616 | int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) |
ccd979bd MF |
5617 | { |
5618 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
5619 | struct ocfs2_dinode *di; | |
5620 | struct ocfs2_truncate_log *tl; | |
5621 | ||
5622 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
5623 | tl = &di->id2.i_dealloc; | |
5624 | ||
5625 | mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count), | |
5626 | "slot %d, invalid truncate log parameters: used = " | |
5627 | "%u, count = %u\n", osb->slot_num, | |
5628 | le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count)); | |
5629 | return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count); | |
5630 | } | |
5631 | ||
5632 | static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, | |
5633 | unsigned int new_start) | |
5634 | { | |
5635 | unsigned int tail_index; | |
5636 | unsigned int current_tail; | |
5637 | ||
5638 | /* No records, nothing to coalesce */ | |
5639 | if (!le16_to_cpu(tl->tl_used)) | |
5640 | return 0; | |
5641 | ||
5642 | tail_index = le16_to_cpu(tl->tl_used) - 1; | |
5643 | current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start); | |
5644 | current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters); | |
5645 | ||
5646 | return current_tail == new_start; | |
5647 | } | |
5648 | ||
063c4561 MF |
5649 | int ocfs2_truncate_log_append(struct ocfs2_super *osb, |
5650 | handle_t *handle, | |
5651 | u64 start_blk, | |
5652 | unsigned int num_clusters) | |
ccd979bd MF |
5653 | { |
5654 | int status, index; | |
5655 | unsigned int start_cluster, tl_count; | |
5656 | struct inode *tl_inode = osb->osb_tl_inode; | |
5657 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
5658 | struct ocfs2_dinode *di; | |
5659 | struct ocfs2_truncate_log *tl; | |
5660 | ||
b0697053 MF |
5661 | mlog_entry("start_blk = %llu, num_clusters = %u\n", |
5662 | (unsigned long long)start_blk, num_clusters); | |
ccd979bd | 5663 | |
1b1dcc1b | 5664 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
ccd979bd MF |
5665 | |
5666 | start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); | |
5667 | ||
5668 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
ccd979bd | 5669 | |
10995aa2 JB |
5670 | /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated |
5671 | * by the underlying call to ocfs2_read_inode_block(), so any | |
5672 | * corruption is a code bug */ | |
5673 | BUG_ON(!OCFS2_IS_VALID_DINODE(di)); | |
5674 | ||
5675 | tl = &di->id2.i_dealloc; | |
ccd979bd MF |
5676 | tl_count = le16_to_cpu(tl->tl_count); |
5677 | mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || | |
5678 | tl_count == 0, | |
b0697053 MF |
5679 | "Truncate record count on #%llu invalid " |
5680 | "wanted %u, actual %u\n", | |
5681 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, | |
ccd979bd MF |
5682 | ocfs2_truncate_recs_per_inode(osb->sb), |
5683 | le16_to_cpu(tl->tl_count)); | |
5684 | ||
5685 | /* Caller should have known to flush before calling us. */ | |
5686 | index = le16_to_cpu(tl->tl_used); | |
5687 | if (index >= tl_count) { | |
5688 | status = -ENOSPC; | |
5689 | mlog_errno(status); | |
5690 | goto bail; | |
5691 | } | |
5692 | ||
13723d00 JB |
5693 | status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, |
5694 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
5695 | if (status < 0) { |
5696 | mlog_errno(status); | |
5697 | goto bail; | |
5698 | } | |
5699 | ||
5700 | mlog(0, "Log truncate of %u clusters starting at cluster %u to " | |
b0697053 MF |
5701 | "%llu (index = %d)\n", num_clusters, start_cluster, |
5702 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); | |
ccd979bd MF |
5703 | |
5704 | if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { | |
5705 | /* | |
5706 | * Move index back to the record we are coalescing with. | |
5707 | * ocfs2_truncate_log_can_coalesce() guarantees nonzero | |
5708 | */ | |
5709 | index--; | |
5710 | ||
5711 | num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); | |
5712 | mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", | |
5713 | index, le32_to_cpu(tl->tl_recs[index].t_start), | |
5714 | num_clusters); | |
5715 | } else { | |
5716 | tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); | |
5717 | tl->tl_used = cpu_to_le16(index + 1); | |
5718 | } | |
5719 | tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); | |
5720 | ||
5721 | status = ocfs2_journal_dirty(handle, tl_bh); | |
5722 | if (status < 0) { | |
5723 | mlog_errno(status); | |
5724 | goto bail; | |
5725 | } | |
5726 | ||
5727 | bail: | |
5728 | mlog_exit(status); | |
5729 | return status; | |
5730 | } | |
5731 | ||
5732 | static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |
1fabe148 | 5733 | handle_t *handle, |
ccd979bd MF |
5734 | struct inode *data_alloc_inode, |
5735 | struct buffer_head *data_alloc_bh) | |
5736 | { | |
5737 | int status = 0; | |
5738 | int i; | |
5739 | unsigned int num_clusters; | |
5740 | u64 start_blk; | |
5741 | struct ocfs2_truncate_rec rec; | |
5742 | struct ocfs2_dinode *di; | |
5743 | struct ocfs2_truncate_log *tl; | |
5744 | struct inode *tl_inode = osb->osb_tl_inode; | |
5745 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
5746 | ||
5747 | mlog_entry_void(); | |
5748 | ||
5749 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
5750 | tl = &di->id2.i_dealloc; | |
5751 | i = le16_to_cpu(tl->tl_used) - 1; | |
5752 | while (i >= 0) { | |
5753 | /* Caller has given us at least enough credits to | |
5754 | * update the truncate log dinode */ | |
13723d00 JB |
5755 | status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, |
5756 | OCFS2_JOURNAL_ACCESS_WRITE); | |
ccd979bd MF |
5757 | if (status < 0) { |
5758 | mlog_errno(status); | |
5759 | goto bail; | |
5760 | } | |
5761 | ||
5762 | tl->tl_used = cpu_to_le16(i); | |
5763 | ||
5764 | status = ocfs2_journal_dirty(handle, tl_bh); | |
5765 | if (status < 0) { | |
5766 | mlog_errno(status); | |
5767 | goto bail; | |
5768 | } | |
5769 | ||
5770 | /* TODO: Perhaps we can calculate the bulk of the | |
5771 | * credits up front rather than extending like | |
5772 | * this. */ | |
5773 | status = ocfs2_extend_trans(handle, | |
5774 | OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); | |
5775 | if (status < 0) { | |
5776 | mlog_errno(status); | |
5777 | goto bail; | |
5778 | } | |
5779 | ||
5780 | rec = tl->tl_recs[i]; | |
5781 | start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, | |
5782 | le32_to_cpu(rec.t_start)); | |
5783 | num_clusters = le32_to_cpu(rec.t_clusters); | |
5784 | ||
5785 | /* if start_blk is not set, we ignore the record as | |
5786 | * invalid. */ | |
5787 | if (start_blk) { | |
5788 | mlog(0, "free record %d, start = %u, clusters = %u\n", | |
5789 | i, le32_to_cpu(rec.t_start), num_clusters); | |
5790 | ||
5791 | status = ocfs2_free_clusters(handle, data_alloc_inode, | |
5792 | data_alloc_bh, start_blk, | |
5793 | num_clusters); | |
5794 | if (status < 0) { | |
5795 | mlog_errno(status); | |
5796 | goto bail; | |
5797 | } | |
5798 | } | |
5799 | i--; | |
5800 | } | |
5801 | ||
5802 | bail: | |
5803 | mlog_exit(status); | |
5804 | return status; | |
5805 | } | |
5806 | ||
1b1dcc1b | 5807 | /* Expects you to already be holding tl_inode->i_mutex */ |
063c4561 | 5808 | int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) |
ccd979bd MF |
5809 | { |
5810 | int status; | |
5811 | unsigned int num_to_flush; | |
1fabe148 | 5812 | handle_t *handle; |
ccd979bd MF |
5813 | struct inode *tl_inode = osb->osb_tl_inode; |
5814 | struct inode *data_alloc_inode = NULL; | |
5815 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
5816 | struct buffer_head *data_alloc_bh = NULL; | |
5817 | struct ocfs2_dinode *di; | |
5818 | struct ocfs2_truncate_log *tl; | |
5819 | ||
5820 | mlog_entry_void(); | |
5821 | ||
1b1dcc1b | 5822 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
ccd979bd MF |
5823 | |
5824 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
ccd979bd | 5825 | |
10995aa2 JB |
5826 | /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated |
5827 | * by the underlying call to ocfs2_read_inode_block(), so any | |
5828 | * corruption is a code bug */ | |
5829 | BUG_ON(!OCFS2_IS_VALID_DINODE(di)); | |
5830 | ||
5831 | tl = &di->id2.i_dealloc; | |
ccd979bd | 5832 | num_to_flush = le16_to_cpu(tl->tl_used); |
b0697053 MF |
5833 | mlog(0, "Flush %u records from truncate log #%llu\n", |
5834 | num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); | |
ccd979bd MF |
5835 | if (!num_to_flush) { |
5836 | status = 0; | |
e08dc8b9 | 5837 | goto out; |
ccd979bd MF |
5838 | } |
5839 | ||
5840 | data_alloc_inode = ocfs2_get_system_file_inode(osb, | |
5841 | GLOBAL_BITMAP_SYSTEM_INODE, | |
5842 | OCFS2_INVALID_SLOT); | |
5843 | if (!data_alloc_inode) { | |
5844 | status = -EINVAL; | |
5845 | mlog(ML_ERROR, "Could not get bitmap inode!\n"); | |
e08dc8b9 | 5846 | goto out; |
ccd979bd MF |
5847 | } |
5848 | ||
e08dc8b9 MF |
5849 | mutex_lock(&data_alloc_inode->i_mutex); |
5850 | ||
e63aecb6 | 5851 | status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1); |
ccd979bd MF |
5852 | if (status < 0) { |
5853 | mlog_errno(status); | |
e08dc8b9 | 5854 | goto out_mutex; |
ccd979bd MF |
5855 | } |
5856 | ||
65eff9cc | 5857 | handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); |
ccd979bd MF |
5858 | if (IS_ERR(handle)) { |
5859 | status = PTR_ERR(handle); | |
ccd979bd | 5860 | mlog_errno(status); |
e08dc8b9 | 5861 | goto out_unlock; |
ccd979bd MF |
5862 | } |
5863 | ||
5864 | status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, | |
5865 | data_alloc_bh); | |
e08dc8b9 | 5866 | if (status < 0) |
ccd979bd | 5867 | mlog_errno(status); |
ccd979bd | 5868 | |
02dc1af4 | 5869 | ocfs2_commit_trans(osb, handle); |
ccd979bd | 5870 | |
e08dc8b9 MF |
5871 | out_unlock: |
5872 | brelse(data_alloc_bh); | |
e63aecb6 | 5873 | ocfs2_inode_unlock(data_alloc_inode, 1); |
ccd979bd | 5874 | |
e08dc8b9 MF |
5875 | out_mutex: |
5876 | mutex_unlock(&data_alloc_inode->i_mutex); | |
5877 | iput(data_alloc_inode); | |
ccd979bd | 5878 | |
e08dc8b9 | 5879 | out: |
ccd979bd MF |
5880 | mlog_exit(status); |
5881 | return status; | |
5882 | } | |
5883 | ||
5884 | int ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |
5885 | { | |
5886 | int status; | |
5887 | struct inode *tl_inode = osb->osb_tl_inode; | |
5888 | ||
1b1dcc1b | 5889 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd | 5890 | status = __ocfs2_flush_truncate_log(osb); |
1b1dcc1b | 5891 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
5892 | |
5893 | return status; | |
5894 | } | |
5895 | ||
c4028958 | 5896 | static void ocfs2_truncate_log_worker(struct work_struct *work) |
ccd979bd MF |
5897 | { |
5898 | int status; | |
c4028958 DH |
5899 | struct ocfs2_super *osb = |
5900 | container_of(work, struct ocfs2_super, | |
5901 | osb_truncate_log_wq.work); | |
ccd979bd MF |
5902 | |
5903 | mlog_entry_void(); | |
5904 | ||
5905 | status = ocfs2_flush_truncate_log(osb); | |
5906 | if (status < 0) | |
5907 | mlog_errno(status); | |
4d0ddb2c TM |
5908 | else |
5909 | ocfs2_init_inode_steal_slot(osb); | |
ccd979bd MF |
5910 | |
5911 | mlog_exit(status); | |
5912 | } | |
5913 | ||
5914 | #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) | |
5915 | void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, | |
5916 | int cancel) | |
5917 | { | |
5918 | if (osb->osb_tl_inode) { | |
5919 | /* We want to push off log flushes while truncates are | |
5920 | * still running. */ | |
5921 | if (cancel) | |
5922 | cancel_delayed_work(&osb->osb_truncate_log_wq); | |
5923 | ||
5924 | queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, | |
5925 | OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); | |
5926 | } | |
5927 | } | |
5928 | ||
5929 | static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, | |
5930 | int slot_num, | |
5931 | struct inode **tl_inode, | |
5932 | struct buffer_head **tl_bh) | |
5933 | { | |
5934 | int status; | |
5935 | struct inode *inode = NULL; | |
5936 | struct buffer_head *bh = NULL; | |
5937 | ||
5938 | inode = ocfs2_get_system_file_inode(osb, | |
5939 | TRUNCATE_LOG_SYSTEM_INODE, | |
5940 | slot_num); | |
5941 | if (!inode) { | |
5942 | status = -EINVAL; | |
5943 | mlog(ML_ERROR, "Could not get load truncate log inode!\n"); | |
5944 | goto bail; | |
5945 | } | |
5946 | ||
b657c95c | 5947 | status = ocfs2_read_inode_block(inode, &bh); |
ccd979bd MF |
5948 | if (status < 0) { |
5949 | iput(inode); | |
5950 | mlog_errno(status); | |
5951 | goto bail; | |
5952 | } | |
5953 | ||
5954 | *tl_inode = inode; | |
5955 | *tl_bh = bh; | |
5956 | bail: | |
5957 | mlog_exit(status); | |
5958 | return status; | |
5959 | } | |
5960 | ||
5961 | /* called during the 1st stage of node recovery. we stamp a clean | |
5962 | * truncate log and pass back a copy for processing later. if the | |
5963 | * truncate log does not require processing, a *tl_copy is set to | |
5964 | * NULL. */ | |
5965 | int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, | |
5966 | int slot_num, | |
5967 | struct ocfs2_dinode **tl_copy) | |
5968 | { | |
5969 | int status; | |
5970 | struct inode *tl_inode = NULL; | |
5971 | struct buffer_head *tl_bh = NULL; | |
5972 | struct ocfs2_dinode *di; | |
5973 | struct ocfs2_truncate_log *tl; | |
5974 | ||
5975 | *tl_copy = NULL; | |
5976 | ||
5977 | mlog(0, "recover truncate log from slot %d\n", slot_num); | |
5978 | ||
5979 | status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); | |
5980 | if (status < 0) { | |
5981 | mlog_errno(status); | |
5982 | goto bail; | |
5983 | } | |
5984 | ||
5985 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
ccd979bd | 5986 | |
10995aa2 JB |
5987 | /* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's |
5988 | * validated by the underlying call to ocfs2_read_inode_block(), | |
5989 | * so any corruption is a code bug */ | |
5990 | BUG_ON(!OCFS2_IS_VALID_DINODE(di)); | |
5991 | ||
5992 | tl = &di->id2.i_dealloc; | |
ccd979bd MF |
5993 | if (le16_to_cpu(tl->tl_used)) { |
5994 | mlog(0, "We'll have %u logs to recover\n", | |
5995 | le16_to_cpu(tl->tl_used)); | |
5996 | ||
5997 | *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); | |
5998 | if (!(*tl_copy)) { | |
5999 | status = -ENOMEM; | |
6000 | mlog_errno(status); | |
6001 | goto bail; | |
6002 | } | |
6003 | ||
6004 | /* Assuming the write-out below goes well, this copy | |
6005 | * will be passed back to recovery for processing. */ | |
6006 | memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); | |
6007 | ||
6008 | /* All we need to do to clear the truncate log is set | |
6009 | * tl_used. */ | |
6010 | tl->tl_used = 0; | |
6011 | ||
13723d00 | 6012 | ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check); |
ccd979bd MF |
6013 | status = ocfs2_write_block(osb, tl_bh, tl_inode); |
6014 | if (status < 0) { | |
6015 | mlog_errno(status); | |
6016 | goto bail; | |
6017 | } | |
6018 | } | |
6019 | ||
6020 | bail: | |
6021 | if (tl_inode) | |
6022 | iput(tl_inode); | |
a81cb88b | 6023 | brelse(tl_bh); |
ccd979bd MF |
6024 | |
6025 | if (status < 0 && (*tl_copy)) { | |
6026 | kfree(*tl_copy); | |
6027 | *tl_copy = NULL; | |
6028 | } | |
6029 | ||
6030 | mlog_exit(status); | |
6031 | return status; | |
6032 | } | |
6033 | ||
6034 | int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, | |
6035 | struct ocfs2_dinode *tl_copy) | |
6036 | { | |
6037 | int status = 0; | |
6038 | int i; | |
6039 | unsigned int clusters, num_recs, start_cluster; | |
6040 | u64 start_blk; | |
1fabe148 | 6041 | handle_t *handle; |
ccd979bd MF |
6042 | struct inode *tl_inode = osb->osb_tl_inode; |
6043 | struct ocfs2_truncate_log *tl; | |
6044 | ||
6045 | mlog_entry_void(); | |
6046 | ||
6047 | if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { | |
6048 | mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); | |
6049 | return -EINVAL; | |
6050 | } | |
6051 | ||
6052 | tl = &tl_copy->id2.i_dealloc; | |
6053 | num_recs = le16_to_cpu(tl->tl_used); | |
b0697053 | 6054 | mlog(0, "cleanup %u records from %llu\n", num_recs, |
1ca1a111 | 6055 | (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); |
ccd979bd | 6056 | |
1b1dcc1b | 6057 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd MF |
6058 | for(i = 0; i < num_recs; i++) { |
6059 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
6060 | status = __ocfs2_flush_truncate_log(osb); | |
6061 | if (status < 0) { | |
6062 | mlog_errno(status); | |
6063 | goto bail_up; | |
6064 | } | |
6065 | } | |
6066 | ||
65eff9cc | 6067 | handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); |
ccd979bd MF |
6068 | if (IS_ERR(handle)) { |
6069 | status = PTR_ERR(handle); | |
6070 | mlog_errno(status); | |
6071 | goto bail_up; | |
6072 | } | |
6073 | ||
6074 | clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); | |
6075 | start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); | |
6076 | start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); | |
6077 | ||
6078 | status = ocfs2_truncate_log_append(osb, handle, | |
6079 | start_blk, clusters); | |
02dc1af4 | 6080 | ocfs2_commit_trans(osb, handle); |
ccd979bd MF |
6081 | if (status < 0) { |
6082 | mlog_errno(status); | |
6083 | goto bail_up; | |
6084 | } | |
6085 | } | |
6086 | ||
6087 | bail_up: | |
1b1dcc1b | 6088 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
6089 | |
6090 | mlog_exit(status); | |
6091 | return status; | |
6092 | } | |
6093 | ||
6094 | void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) | |
6095 | { | |
6096 | int status; | |
6097 | struct inode *tl_inode = osb->osb_tl_inode; | |
6098 | ||
6099 | mlog_entry_void(); | |
6100 | ||
6101 | if (tl_inode) { | |
6102 | cancel_delayed_work(&osb->osb_truncate_log_wq); | |
6103 | flush_workqueue(ocfs2_wq); | |
6104 | ||
6105 | status = ocfs2_flush_truncate_log(osb); | |
6106 | if (status < 0) | |
6107 | mlog_errno(status); | |
6108 | ||
6109 | brelse(osb->osb_tl_bh); | |
6110 | iput(osb->osb_tl_inode); | |
6111 | } | |
6112 | ||
6113 | mlog_exit_void(); | |
6114 | } | |
6115 | ||
6116 | int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |
6117 | { | |
6118 | int status; | |
6119 | struct inode *tl_inode = NULL; | |
6120 | struct buffer_head *tl_bh = NULL; | |
6121 | ||
6122 | mlog_entry_void(); | |
6123 | ||
6124 | status = ocfs2_get_truncate_log_info(osb, | |
6125 | osb->slot_num, | |
6126 | &tl_inode, | |
6127 | &tl_bh); | |
6128 | if (status < 0) | |
6129 | mlog_errno(status); | |
6130 | ||
6131 | /* ocfs2_truncate_log_shutdown keys on the existence of | |
6132 | * osb->osb_tl_inode so we don't set any of the osb variables | |
6133 | * until we're sure all is well. */ | |
c4028958 DH |
6134 | INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, |
6135 | ocfs2_truncate_log_worker); | |
ccd979bd MF |
6136 | osb->osb_tl_bh = tl_bh; |
6137 | osb->osb_tl_inode = tl_inode; | |
6138 | ||
6139 | mlog_exit(status); | |
6140 | return status; | |
6141 | } | |
6142 | ||
2b604351 MF |
6143 | /* |
6144 | * Delayed de-allocation of suballocator blocks. | |
6145 | * | |
6146 | * Some sets of block de-allocations might involve multiple suballocator inodes. | |
6147 | * | |
6148 | * The locking for this can get extremely complicated, especially when | |
6149 | * the suballocator inodes to delete from aren't known until deep | |
6150 | * within an unrelated codepath. | |
6151 | * | |
6152 | * ocfs2_extent_block structures are a good example of this - an inode | |
6153 | * btree could have been grown by any number of nodes each allocating | |
6154 | * out of their own suballoc inode. | |
6155 | * | |
6156 | * These structures allow the delay of block de-allocation until a | |
6157 | * later time, when locking of multiple cluster inodes won't cause | |
6158 | * deadlock. | |
6159 | */ | |
6160 | ||
6161 | /* | |
2891d290 TM |
6162 | * Describe a single bit freed from a suballocator. For the block |
6163 | * suballocators, it represents one block. For the global cluster | |
6164 | * allocator, it represents some clusters and free_bit indicates | |
6165 | * clusters number. | |
2b604351 MF |
6166 | */ |
6167 | struct ocfs2_cached_block_free { | |
6168 | struct ocfs2_cached_block_free *free_next; | |
6169 | u64 free_blk; | |
6170 | unsigned int free_bit; | |
6171 | }; | |
6172 | ||
6173 | struct ocfs2_per_slot_free_list { | |
6174 | struct ocfs2_per_slot_free_list *f_next_suballocator; | |
6175 | int f_inode_type; | |
6176 | int f_slot; | |
6177 | struct ocfs2_cached_block_free *f_first; | |
6178 | }; | |
6179 | ||
2891d290 TM |
6180 | static int ocfs2_free_cached_blocks(struct ocfs2_super *osb, |
6181 | int sysfile_type, | |
6182 | int slot, | |
6183 | struct ocfs2_cached_block_free *head) | |
2b604351 MF |
6184 | { |
6185 | int ret; | |
6186 | u64 bg_blkno; | |
6187 | handle_t *handle; | |
6188 | struct inode *inode; | |
6189 | struct buffer_head *di_bh = NULL; | |
6190 | struct ocfs2_cached_block_free *tmp; | |
6191 | ||
6192 | inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot); | |
6193 | if (!inode) { | |
6194 | ret = -EINVAL; | |
6195 | mlog_errno(ret); | |
6196 | goto out; | |
6197 | } | |
6198 | ||
6199 | mutex_lock(&inode->i_mutex); | |
6200 | ||
e63aecb6 | 6201 | ret = ocfs2_inode_lock(inode, &di_bh, 1); |
2b604351 MF |
6202 | if (ret) { |
6203 | mlog_errno(ret); | |
6204 | goto out_mutex; | |
6205 | } | |
6206 | ||
6207 | handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); | |
6208 | if (IS_ERR(handle)) { | |
6209 | ret = PTR_ERR(handle); | |
6210 | mlog_errno(ret); | |
6211 | goto out_unlock; | |
6212 | } | |
6213 | ||
6214 | while (head) { | |
6215 | bg_blkno = ocfs2_which_suballoc_group(head->free_blk, | |
6216 | head->free_bit); | |
6217 | mlog(0, "Free bit: (bit %u, blkno %llu)\n", | |
6218 | head->free_bit, (unsigned long long)head->free_blk); | |
6219 | ||
6220 | ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, | |
6221 | head->free_bit, bg_blkno, 1); | |
6222 | if (ret) { | |
6223 | mlog_errno(ret); | |
6224 | goto out_journal; | |
6225 | } | |
6226 | ||
6227 | ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); | |
6228 | if (ret) { | |
6229 | mlog_errno(ret); | |
6230 | goto out_journal; | |
6231 | } | |
6232 | ||
6233 | tmp = head; | |
6234 | head = head->free_next; | |
6235 | kfree(tmp); | |
6236 | } | |
6237 | ||
6238 | out_journal: | |
6239 | ocfs2_commit_trans(osb, handle); | |
6240 | ||
6241 | out_unlock: | |
e63aecb6 | 6242 | ocfs2_inode_unlock(inode, 1); |
2b604351 MF |
6243 | brelse(di_bh); |
6244 | out_mutex: | |
6245 | mutex_unlock(&inode->i_mutex); | |
6246 | iput(inode); | |
6247 | out: | |
6248 | while(head) { | |
6249 | /* Premature exit may have left some dangling items. */ | |
6250 | tmp = head; | |
6251 | head = head->free_next; | |
6252 | kfree(tmp); | |
6253 | } | |
6254 | ||
6255 | return ret; | |
6256 | } | |
6257 | ||
2891d290 TM |
6258 | int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, |
6259 | u64 blkno, unsigned int bit) | |
6260 | { | |
6261 | int ret = 0; | |
6262 | struct ocfs2_cached_block_free *item; | |
6263 | ||
6264 | item = kmalloc(sizeof(*item), GFP_NOFS); | |
6265 | if (item == NULL) { | |
6266 | ret = -ENOMEM; | |
6267 | mlog_errno(ret); | |
6268 | return ret; | |
6269 | } | |
6270 | ||
6271 | mlog(0, "Insert clusters: (bit %u, blk %llu)\n", | |
6272 | bit, (unsigned long long)blkno); | |
6273 | ||
6274 | item->free_blk = blkno; | |
6275 | item->free_bit = bit; | |
6276 | item->free_next = ctxt->c_global_allocator; | |
6277 | ||
6278 | ctxt->c_global_allocator = item; | |
6279 | return ret; | |
6280 | } | |
6281 | ||
6282 | static int ocfs2_free_cached_clusters(struct ocfs2_super *osb, | |
6283 | struct ocfs2_cached_block_free *head) | |
6284 | { | |
6285 | struct ocfs2_cached_block_free *tmp; | |
6286 | struct inode *tl_inode = osb->osb_tl_inode; | |
6287 | handle_t *handle; | |
6288 | int ret = 0; | |
6289 | ||
6290 | mutex_lock(&tl_inode->i_mutex); | |
6291 | ||
6292 | while (head) { | |
6293 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
6294 | ret = __ocfs2_flush_truncate_log(osb); | |
6295 | if (ret < 0) { | |
6296 | mlog_errno(ret); | |
6297 | break; | |
6298 | } | |
6299 | } | |
6300 | ||
6301 | handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); | |
6302 | if (IS_ERR(handle)) { | |
6303 | ret = PTR_ERR(handle); | |
6304 | mlog_errno(ret); | |
6305 | break; | |
6306 | } | |
6307 | ||
6308 | ret = ocfs2_truncate_log_append(osb, handle, head->free_blk, | |
6309 | head->free_bit); | |
6310 | ||
6311 | ocfs2_commit_trans(osb, handle); | |
6312 | tmp = head; | |
6313 | head = head->free_next; | |
6314 | kfree(tmp); | |
6315 | ||
6316 | if (ret < 0) { | |
6317 | mlog_errno(ret); | |
6318 | break; | |
6319 | } | |
6320 | } | |
6321 | ||
6322 | mutex_unlock(&tl_inode->i_mutex); | |
6323 | ||
6324 | while (head) { | |
6325 | /* Premature exit may have left some dangling items. */ | |
6326 | tmp = head; | |
6327 | head = head->free_next; | |
6328 | kfree(tmp); | |
6329 | } | |
6330 | ||
6331 | return ret; | |
6332 | } | |
6333 | ||
2b604351 MF |
6334 | int ocfs2_run_deallocs(struct ocfs2_super *osb, |
6335 | struct ocfs2_cached_dealloc_ctxt *ctxt) | |
6336 | { | |
6337 | int ret = 0, ret2; | |
6338 | struct ocfs2_per_slot_free_list *fl; | |
6339 | ||
6340 | if (!ctxt) | |
6341 | return 0; | |
6342 | ||
6343 | while (ctxt->c_first_suballocator) { | |
6344 | fl = ctxt->c_first_suballocator; | |
6345 | ||
6346 | if (fl->f_first) { | |
6347 | mlog(0, "Free items: (type %u, slot %d)\n", | |
6348 | fl->f_inode_type, fl->f_slot); | |
2891d290 TM |
6349 | ret2 = ocfs2_free_cached_blocks(osb, |
6350 | fl->f_inode_type, | |
6351 | fl->f_slot, | |
6352 | fl->f_first); | |
2b604351 MF |
6353 | if (ret2) |
6354 | mlog_errno(ret2); | |
6355 | if (!ret) | |
6356 | ret = ret2; | |
6357 | } | |
6358 | ||
6359 | ctxt->c_first_suballocator = fl->f_next_suballocator; | |
6360 | kfree(fl); | |
6361 | } | |
6362 | ||
2891d290 TM |
6363 | if (ctxt->c_global_allocator) { |
6364 | ret2 = ocfs2_free_cached_clusters(osb, | |
6365 | ctxt->c_global_allocator); | |
6366 | if (ret2) | |
6367 | mlog_errno(ret2); | |
6368 | if (!ret) | |
6369 | ret = ret2; | |
6370 | ||
6371 | ctxt->c_global_allocator = NULL; | |
6372 | } | |
6373 | ||
2b604351 MF |
6374 | return ret; |
6375 | } | |
6376 | ||
6377 | static struct ocfs2_per_slot_free_list * | |
6378 | ocfs2_find_per_slot_free_list(int type, | |
6379 | int slot, | |
6380 | struct ocfs2_cached_dealloc_ctxt *ctxt) | |
6381 | { | |
6382 | struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; | |
6383 | ||
6384 | while (fl) { | |
6385 | if (fl->f_inode_type == type && fl->f_slot == slot) | |
6386 | return fl; | |
6387 | ||
6388 | fl = fl->f_next_suballocator; | |
6389 | } | |
6390 | ||
6391 | fl = kmalloc(sizeof(*fl), GFP_NOFS); | |
6392 | if (fl) { | |
6393 | fl->f_inode_type = type; | |
6394 | fl->f_slot = slot; | |
6395 | fl->f_first = NULL; | |
6396 | fl->f_next_suballocator = ctxt->c_first_suballocator; | |
6397 | ||
6398 | ctxt->c_first_suballocator = fl; | |
6399 | } | |
6400 | return fl; | |
6401 | } | |
6402 | ||
6403 | static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, | |
6404 | int type, int slot, u64 blkno, | |
6405 | unsigned int bit) | |
6406 | { | |
6407 | int ret; | |
6408 | struct ocfs2_per_slot_free_list *fl; | |
6409 | struct ocfs2_cached_block_free *item; | |
6410 | ||
6411 | fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); | |
6412 | if (fl == NULL) { | |
6413 | ret = -ENOMEM; | |
6414 | mlog_errno(ret); | |
6415 | goto out; | |
6416 | } | |
6417 | ||
6418 | item = kmalloc(sizeof(*item), GFP_NOFS); | |
6419 | if (item == NULL) { | |
6420 | ret = -ENOMEM; | |
6421 | mlog_errno(ret); | |
6422 | goto out; | |
6423 | } | |
6424 | ||
6425 | mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", | |
6426 | type, slot, bit, (unsigned long long)blkno); | |
6427 | ||
6428 | item->free_blk = blkno; | |
6429 | item->free_bit = bit; | |
6430 | item->free_next = fl->f_first; | |
6431 | ||
6432 | fl->f_first = item; | |
6433 | ||
6434 | ret = 0; | |
6435 | out: | |
6436 | return ret; | |
6437 | } | |
6438 | ||
59a5e416 MF |
6439 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, |
6440 | struct ocfs2_extent_block *eb) | |
6441 | { | |
6442 | return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, | |
6443 | le16_to_cpu(eb->h_suballoc_slot), | |
6444 | le64_to_cpu(eb->h_blkno), | |
6445 | le16_to_cpu(eb->h_suballoc_bit)); | |
6446 | } | |
6447 | ||
ccd979bd MF |
6448 | /* This function will figure out whether the currently last extent |
6449 | * block will be deleted, and if it will, what the new last extent | |
6450 | * block will be so we can update his h_next_leaf_blk field, as well | |
6451 | * as the dinodes i_last_eb_blk */ | |
dcd0538f | 6452 | static int ocfs2_find_new_last_ext_blk(struct inode *inode, |
3a0782d0 | 6453 | unsigned int clusters_to_del, |
dcd0538f | 6454 | struct ocfs2_path *path, |
ccd979bd MF |
6455 | struct buffer_head **new_last_eb) |
6456 | { | |
3a0782d0 | 6457 | int next_free, ret = 0; |
dcd0538f | 6458 | u32 cpos; |
3a0782d0 | 6459 | struct ocfs2_extent_rec *rec; |
ccd979bd MF |
6460 | struct ocfs2_extent_block *eb; |
6461 | struct ocfs2_extent_list *el; | |
6462 | struct buffer_head *bh = NULL; | |
6463 | ||
6464 | *new_last_eb = NULL; | |
6465 | ||
ccd979bd | 6466 | /* we have no tree, so of course, no last_eb. */ |
dcd0538f MF |
6467 | if (!path->p_tree_depth) |
6468 | goto out; | |
ccd979bd MF |
6469 | |
6470 | /* trunc to zero special case - this makes tree_depth = 0 | |
6471 | * regardless of what it is. */ | |
3a0782d0 | 6472 | if (OCFS2_I(inode)->ip_clusters == clusters_to_del) |
dcd0538f | 6473 | goto out; |
ccd979bd | 6474 | |
dcd0538f | 6475 | el = path_leaf_el(path); |
ccd979bd MF |
6476 | BUG_ON(!el->l_next_free_rec); |
6477 | ||
3a0782d0 MF |
6478 | /* |
6479 | * Make sure that this extent list will actually be empty | |
6480 | * after we clear away the data. We can shortcut out if | |
6481 | * there's more than one non-empty extent in the | |
6482 | * list. Otherwise, a check of the remaining extent is | |
6483 | * necessary. | |
6484 | */ | |
6485 | next_free = le16_to_cpu(el->l_next_free_rec); | |
6486 | rec = NULL; | |
dcd0538f | 6487 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { |
3a0782d0 | 6488 | if (next_free > 2) |
dcd0538f | 6489 | goto out; |
3a0782d0 MF |
6490 | |
6491 | /* We may have a valid extent in index 1, check it. */ | |
6492 | if (next_free == 2) | |
6493 | rec = &el->l_recs[1]; | |
6494 | ||
6495 | /* | |
6496 | * Fall through - no more nonempty extents, so we want | |
6497 | * to delete this leaf. | |
6498 | */ | |
6499 | } else { | |
6500 | if (next_free > 1) | |
6501 | goto out; | |
6502 | ||
6503 | rec = &el->l_recs[0]; | |
6504 | } | |
6505 | ||
6506 | if (rec) { | |
6507 | /* | |
6508 | * Check it we'll only be trimming off the end of this | |
6509 | * cluster. | |
6510 | */ | |
e48edee2 | 6511 | if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del) |
3a0782d0 MF |
6512 | goto out; |
6513 | } | |
ccd979bd | 6514 | |
dcd0538f MF |
6515 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); |
6516 | if (ret) { | |
6517 | mlog_errno(ret); | |
6518 | goto out; | |
6519 | } | |
ccd979bd | 6520 | |
dcd0538f MF |
6521 | ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh); |
6522 | if (ret) { | |
6523 | mlog_errno(ret); | |
6524 | goto out; | |
6525 | } | |
ccd979bd | 6526 | |
dcd0538f MF |
6527 | eb = (struct ocfs2_extent_block *) bh->b_data; |
6528 | el = &eb->h_list; | |
5e96581a JB |
6529 | |
6530 | /* ocfs2_find_leaf() gets the eb from ocfs2_read_extent_block(). | |
6531 | * Any corruption is a code bug. */ | |
6532 | BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); | |
ccd979bd MF |
6533 | |
6534 | *new_last_eb = bh; | |
6535 | get_bh(*new_last_eb); | |
dcd0538f MF |
6536 | mlog(0, "returning block %llu, (cpos: %u)\n", |
6537 | (unsigned long long)le64_to_cpu(eb->h_blkno), cpos); | |
6538 | out: | |
6539 | brelse(bh); | |
ccd979bd | 6540 | |
dcd0538f | 6541 | return ret; |
ccd979bd MF |
6542 | } |
6543 | ||
3a0782d0 MF |
6544 | /* |
6545 | * Trim some clusters off the rightmost edge of a tree. Only called | |
6546 | * during truncate. | |
6547 | * | |
6548 | * The caller needs to: | |
6549 | * - start journaling of each path component. | |
6550 | * - compute and fully set up any new last ext block | |
6551 | */ | |
6552 | static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path, | |
6553 | handle_t *handle, struct ocfs2_truncate_context *tc, | |
6554 | u32 clusters_to_del, u64 *delete_start) | |
6555 | { | |
6556 | int ret, i, index = path->p_tree_depth; | |
6557 | u32 new_edge = 0; | |
6558 | u64 deleted_eb = 0; | |
6559 | struct buffer_head *bh; | |
6560 | struct ocfs2_extent_list *el; | |
6561 | struct ocfs2_extent_rec *rec; | |
6562 | ||
6563 | *delete_start = 0; | |
6564 | ||
6565 | while (index >= 0) { | |
6566 | bh = path->p_node[index].bh; | |
6567 | el = path->p_node[index].el; | |
6568 | ||
6569 | mlog(0, "traveling tree (index = %d, block = %llu)\n", | |
6570 | index, (unsigned long long)bh->b_blocknr); | |
6571 | ||
6572 | BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); | |
6573 | ||
6574 | if (index != | |
6575 | (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) { | |
6576 | ocfs2_error(inode->i_sb, | |
6577 | "Inode %lu has invalid ext. block %llu", | |
6578 | inode->i_ino, | |
6579 | (unsigned long long)bh->b_blocknr); | |
6580 | ret = -EROFS; | |
6581 | goto out; | |
6582 | } | |
6583 | ||
6584 | find_tail_record: | |
6585 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
6586 | rec = &el->l_recs[i]; | |
6587 | ||
6588 | mlog(0, "Extent list before: record %d: (%u, %u, %llu), " | |
6589 | "next = %u\n", i, le32_to_cpu(rec->e_cpos), | |
e48edee2 | 6590 | ocfs2_rec_clusters(el, rec), |
3a0782d0 MF |
6591 | (unsigned long long)le64_to_cpu(rec->e_blkno), |
6592 | le16_to_cpu(el->l_next_free_rec)); | |
6593 | ||
e48edee2 | 6594 | BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del); |
3a0782d0 MF |
6595 | |
6596 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
6597 | /* | |
6598 | * If the leaf block contains a single empty | |
6599 | * extent and no records, we can just remove | |
6600 | * the block. | |
6601 | */ | |
6602 | if (i == 0 && ocfs2_is_empty_extent(rec)) { | |
6603 | memset(rec, 0, | |
6604 | sizeof(struct ocfs2_extent_rec)); | |
6605 | el->l_next_free_rec = cpu_to_le16(0); | |
6606 | ||
6607 | goto delete; | |
6608 | } | |
6609 | ||
6610 | /* | |
6611 | * Remove any empty extents by shifting things | |
6612 | * left. That should make life much easier on | |
6613 | * the code below. This condition is rare | |
6614 | * enough that we shouldn't see a performance | |
6615 | * hit. | |
6616 | */ | |
6617 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
6618 | le16_add_cpu(&el->l_next_free_rec, -1); | |
6619 | ||
6620 | for(i = 0; | |
6621 | i < le16_to_cpu(el->l_next_free_rec); i++) | |
6622 | el->l_recs[i] = el->l_recs[i + 1]; | |
6623 | ||
6624 | memset(&el->l_recs[i], 0, | |
6625 | sizeof(struct ocfs2_extent_rec)); | |
6626 | ||
6627 | /* | |
6628 | * We've modified our extent list. The | |
6629 | * simplest way to handle this change | |
6630 | * is to being the search from the | |
6631 | * start again. | |
6632 | */ | |
6633 | goto find_tail_record; | |
6634 | } | |
6635 | ||
e48edee2 | 6636 | le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del); |
3a0782d0 MF |
6637 | |
6638 | /* | |
6639 | * We'll use "new_edge" on our way back up the | |
6640 | * tree to know what our rightmost cpos is. | |
6641 | */ | |
e48edee2 | 6642 | new_edge = le16_to_cpu(rec->e_leaf_clusters); |
3a0782d0 MF |
6643 | new_edge += le32_to_cpu(rec->e_cpos); |
6644 | ||
6645 | /* | |
6646 | * The caller will use this to delete data blocks. | |
6647 | */ | |
6648 | *delete_start = le64_to_cpu(rec->e_blkno) | |
6649 | + ocfs2_clusters_to_blocks(inode->i_sb, | |
e48edee2 | 6650 | le16_to_cpu(rec->e_leaf_clusters)); |
3a0782d0 MF |
6651 | |
6652 | /* | |
6653 | * If it's now empty, remove this record. | |
6654 | */ | |
e48edee2 | 6655 | if (le16_to_cpu(rec->e_leaf_clusters) == 0) { |
3a0782d0 MF |
6656 | memset(rec, 0, |
6657 | sizeof(struct ocfs2_extent_rec)); | |
6658 | le16_add_cpu(&el->l_next_free_rec, -1); | |
6659 | } | |
6660 | } else { | |
6661 | if (le64_to_cpu(rec->e_blkno) == deleted_eb) { | |
6662 | memset(rec, 0, | |
6663 | sizeof(struct ocfs2_extent_rec)); | |
6664 | le16_add_cpu(&el->l_next_free_rec, -1); | |
6665 | ||
6666 | goto delete; | |
6667 | } | |
6668 | ||
6669 | /* Can this actually happen? */ | |
6670 | if (le16_to_cpu(el->l_next_free_rec) == 0) | |
6671 | goto delete; | |
6672 | ||
6673 | /* | |
6674 | * We never actually deleted any clusters | |
6675 | * because our leaf was empty. There's no | |
6676 | * reason to adjust the rightmost edge then. | |
6677 | */ | |
6678 | if (new_edge == 0) | |
6679 | goto delete; | |
6680 | ||
e48edee2 MF |
6681 | rec->e_int_clusters = cpu_to_le32(new_edge); |
6682 | le32_add_cpu(&rec->e_int_clusters, | |
3a0782d0 MF |
6683 | -le32_to_cpu(rec->e_cpos)); |
6684 | ||
6685 | /* | |
6686 | * A deleted child record should have been | |
6687 | * caught above. | |
6688 | */ | |
e48edee2 | 6689 | BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0); |
3a0782d0 MF |
6690 | } |
6691 | ||
6692 | delete: | |
6693 | ret = ocfs2_journal_dirty(handle, bh); | |
6694 | if (ret) { | |
6695 | mlog_errno(ret); | |
6696 | goto out; | |
6697 | } | |
6698 | ||
6699 | mlog(0, "extent list container %llu, after: record %d: " | |
6700 | "(%u, %u, %llu), next = %u.\n", | |
6701 | (unsigned long long)bh->b_blocknr, i, | |
e48edee2 | 6702 | le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec), |
3a0782d0 MF |
6703 | (unsigned long long)le64_to_cpu(rec->e_blkno), |
6704 | le16_to_cpu(el->l_next_free_rec)); | |
6705 | ||
6706 | /* | |
6707 | * We must be careful to only attempt delete of an | |
6708 | * extent block (and not the root inode block). | |
6709 | */ | |
6710 | if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) { | |
6711 | struct ocfs2_extent_block *eb = | |
6712 | (struct ocfs2_extent_block *)bh->b_data; | |
6713 | ||
6714 | /* | |
6715 | * Save this for use when processing the | |
6716 | * parent block. | |
6717 | */ | |
6718 | deleted_eb = le64_to_cpu(eb->h_blkno); | |
6719 | ||
6720 | mlog(0, "deleting this extent block.\n"); | |
6721 | ||
6722 | ocfs2_remove_from_cache(inode, bh); | |
6723 | ||
e48edee2 | 6724 | BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0])); |
3a0782d0 MF |
6725 | BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos)); |
6726 | BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno)); | |
6727 | ||
59a5e416 MF |
6728 | ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb); |
6729 | /* An error here is not fatal. */ | |
6730 | if (ret < 0) | |
6731 | mlog_errno(ret); | |
3a0782d0 MF |
6732 | } else { |
6733 | deleted_eb = 0; | |
6734 | } | |
6735 | ||
6736 | index--; | |
6737 | } | |
6738 | ||
6739 | ret = 0; | |
6740 | out: | |
6741 | return ret; | |
6742 | } | |
6743 | ||
ccd979bd MF |
6744 | static int ocfs2_do_truncate(struct ocfs2_super *osb, |
6745 | unsigned int clusters_to_del, | |
6746 | struct inode *inode, | |
6747 | struct buffer_head *fe_bh, | |
1fabe148 | 6748 | handle_t *handle, |
dcd0538f MF |
6749 | struct ocfs2_truncate_context *tc, |
6750 | struct ocfs2_path *path) | |
ccd979bd | 6751 | { |
3a0782d0 | 6752 | int status; |
ccd979bd | 6753 | struct ocfs2_dinode *fe; |
ccd979bd MF |
6754 | struct ocfs2_extent_block *last_eb = NULL; |
6755 | struct ocfs2_extent_list *el; | |
ccd979bd | 6756 | struct buffer_head *last_eb_bh = NULL; |
ccd979bd MF |
6757 | u64 delete_blk = 0; |
6758 | ||
6759 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
6760 | ||
3a0782d0 | 6761 | status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del, |
dcd0538f | 6762 | path, &last_eb_bh); |
ccd979bd MF |
6763 | if (status < 0) { |
6764 | mlog_errno(status); | |
6765 | goto bail; | |
6766 | } | |
dcd0538f MF |
6767 | |
6768 | /* | |
6769 | * Each component will be touched, so we might as well journal | |
6770 | * here to avoid having to handle errors later. | |
6771 | */ | |
3a0782d0 MF |
6772 | status = ocfs2_journal_access_path(inode, handle, path); |
6773 | if (status < 0) { | |
6774 | mlog_errno(status); | |
6775 | goto bail; | |
dcd0538f MF |
6776 | } |
6777 | ||
6778 | if (last_eb_bh) { | |
13723d00 JB |
6779 | status = ocfs2_journal_access_eb(handle, inode, last_eb_bh, |
6780 | OCFS2_JOURNAL_ACCESS_WRITE); | |
dcd0538f MF |
6781 | if (status < 0) { |
6782 | mlog_errno(status); | |
6783 | goto bail; | |
6784 | } | |
6785 | ||
ccd979bd | 6786 | last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; |
dcd0538f | 6787 | } |
ccd979bd | 6788 | |
dcd0538f MF |
6789 | el = &(fe->id2.i_list); |
6790 | ||
6791 | /* | |
6792 | * Lower levels depend on this never happening, but it's best | |
6793 | * to check it up here before changing the tree. | |
6794 | */ | |
e48edee2 | 6795 | if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) { |
dcd0538f MF |
6796 | ocfs2_error(inode->i_sb, |
6797 | "Inode %lu has an empty extent record, depth %u\n", | |
6798 | inode->i_ino, le16_to_cpu(el->l_tree_depth)); | |
3a0782d0 | 6799 | status = -EROFS; |
ccd979bd MF |
6800 | goto bail; |
6801 | } | |
ccd979bd | 6802 | |
a90714c1 JK |
6803 | vfs_dq_free_space_nodirty(inode, |
6804 | ocfs2_clusters_to_bytes(osb->sb, clusters_to_del)); | |
ccd979bd MF |
6805 | spin_lock(&OCFS2_I(inode)->ip_lock); |
6806 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - | |
6807 | clusters_to_del; | |
6808 | spin_unlock(&OCFS2_I(inode)->ip_lock); | |
6809 | le32_add_cpu(&fe->i_clusters, -clusters_to_del); | |
e535e2ef | 6810 | inode->i_blocks = ocfs2_inode_sector_count(inode); |
ccd979bd | 6811 | |
3a0782d0 MF |
6812 | status = ocfs2_trim_tree(inode, path, handle, tc, |
6813 | clusters_to_del, &delete_blk); | |
6814 | if (status) { | |
6815 | mlog_errno(status); | |
6816 | goto bail; | |
ccd979bd MF |
6817 | } |
6818 | ||
dcd0538f | 6819 | if (le32_to_cpu(fe->i_clusters) == 0) { |
ccd979bd MF |
6820 | /* trunc to zero is a special case. */ |
6821 | el->l_tree_depth = 0; | |
6822 | fe->i_last_eb_blk = 0; | |
6823 | } else if (last_eb) | |
6824 | fe->i_last_eb_blk = last_eb->h_blkno; | |
6825 | ||
6826 | status = ocfs2_journal_dirty(handle, fe_bh); | |
6827 | if (status < 0) { | |
6828 | mlog_errno(status); | |
6829 | goto bail; | |
6830 | } | |
6831 | ||
6832 | if (last_eb) { | |
6833 | /* If there will be a new last extent block, then by | |
6834 | * definition, there cannot be any leaves to the right of | |
6835 | * him. */ | |
ccd979bd MF |
6836 | last_eb->h_next_leaf_blk = 0; |
6837 | status = ocfs2_journal_dirty(handle, last_eb_bh); | |
6838 | if (status < 0) { | |
6839 | mlog_errno(status); | |
6840 | goto bail; | |
6841 | } | |
6842 | } | |
6843 | ||
3a0782d0 MF |
6844 | if (delete_blk) { |
6845 | status = ocfs2_truncate_log_append(osb, handle, delete_blk, | |
6846 | clusters_to_del); | |
ccd979bd MF |
6847 | if (status < 0) { |
6848 | mlog_errno(status); | |
6849 | goto bail; | |
6850 | } | |
ccd979bd MF |
6851 | } |
6852 | status = 0; | |
6853 | bail: | |
60e2ec48 | 6854 | brelse(last_eb_bh); |
ccd979bd MF |
6855 | mlog_exit(status); |
6856 | return status; | |
6857 | } | |
6858 | ||
2b4e30fb | 6859 | static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) |
60b11392 MF |
6860 | { |
6861 | set_buffer_uptodate(bh); | |
6862 | mark_buffer_dirty(bh); | |
6863 | return 0; | |
6864 | } | |
6865 | ||
1d410a6e MF |
6866 | static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, |
6867 | unsigned int from, unsigned int to, | |
6868 | struct page *page, int zero, u64 *phys) | |
6869 | { | |
6870 | int ret, partial = 0; | |
6871 | ||
6872 | ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); | |
6873 | if (ret) | |
6874 | mlog_errno(ret); | |
6875 | ||
6876 | if (zero) | |
eebd2aa3 | 6877 | zero_user_segment(page, from, to); |
1d410a6e MF |
6878 | |
6879 | /* | |
6880 | * Need to set the buffers we zero'd into uptodate | |
6881 | * here if they aren't - ocfs2_map_page_blocks() | |
6882 | * might've skipped some | |
6883 | */ | |
2b4e30fb JB |
6884 | ret = walk_page_buffers(handle, page_buffers(page), |
6885 | from, to, &partial, | |
6886 | ocfs2_zero_func); | |
6887 | if (ret < 0) | |
6888 | mlog_errno(ret); | |
6889 | else if (ocfs2_should_order_data(inode)) { | |
6890 | ret = ocfs2_jbd2_file_inode(handle, inode); | |
1d410a6e MF |
6891 | if (ret < 0) |
6892 | mlog_errno(ret); | |
6893 | } | |
6894 | ||
6895 | if (!partial) | |
6896 | SetPageUptodate(page); | |
6897 | ||
6898 | flush_dcache_page(page); | |
6899 | } | |
6900 | ||
35edec1d MF |
6901 | static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, |
6902 | loff_t end, struct page **pages, | |
6903 | int numpages, u64 phys, handle_t *handle) | |
60b11392 | 6904 | { |
1d410a6e | 6905 | int i; |
60b11392 MF |
6906 | struct page *page; |
6907 | unsigned int from, to = PAGE_CACHE_SIZE; | |
6908 | struct super_block *sb = inode->i_sb; | |
6909 | ||
6910 | BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); | |
6911 | ||
6912 | if (numpages == 0) | |
6913 | goto out; | |
6914 | ||
35edec1d | 6915 | to = PAGE_CACHE_SIZE; |
60b11392 MF |
6916 | for(i = 0; i < numpages; i++) { |
6917 | page = pages[i]; | |
6918 | ||
35edec1d MF |
6919 | from = start & (PAGE_CACHE_SIZE - 1); |
6920 | if ((end >> PAGE_CACHE_SHIFT) == page->index) | |
6921 | to = end & (PAGE_CACHE_SIZE - 1); | |
6922 | ||
60b11392 MF |
6923 | BUG_ON(from > PAGE_CACHE_SIZE); |
6924 | BUG_ON(to > PAGE_CACHE_SIZE); | |
6925 | ||
1d410a6e MF |
6926 | ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, |
6927 | &phys); | |
60b11392 | 6928 | |
35edec1d | 6929 | start = (page->index + 1) << PAGE_CACHE_SHIFT; |
60b11392 MF |
6930 | } |
6931 | out: | |
1d410a6e MF |
6932 | if (pages) |
6933 | ocfs2_unlock_and_free_pages(pages, numpages); | |
60b11392 MF |
6934 | } |
6935 | ||
35edec1d | 6936 | static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, |
1d410a6e | 6937 | struct page **pages, int *num) |
60b11392 | 6938 | { |
1d410a6e | 6939 | int numpages, ret = 0; |
60b11392 MF |
6940 | struct super_block *sb = inode->i_sb; |
6941 | struct address_space *mapping = inode->i_mapping; | |
6942 | unsigned long index; | |
35edec1d | 6943 | loff_t last_page_bytes; |
60b11392 | 6944 | |
35edec1d | 6945 | BUG_ON(start > end); |
60b11392 | 6946 | |
35edec1d MF |
6947 | BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != |
6948 | (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); | |
6949 | ||
1d410a6e | 6950 | numpages = 0; |
35edec1d MF |
6951 | last_page_bytes = PAGE_ALIGN(end); |
6952 | index = start >> PAGE_CACHE_SHIFT; | |
60b11392 MF |
6953 | do { |
6954 | pages[numpages] = grab_cache_page(mapping, index); | |
6955 | if (!pages[numpages]) { | |
6956 | ret = -ENOMEM; | |
6957 | mlog_errno(ret); | |
6958 | goto out; | |
6959 | } | |
6960 | ||
6961 | numpages++; | |
6962 | index++; | |
35edec1d | 6963 | } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); |
60b11392 MF |
6964 | |
6965 | out: | |
6966 | if (ret != 0) { | |
1d410a6e MF |
6967 | if (pages) |
6968 | ocfs2_unlock_and_free_pages(pages, numpages); | |
60b11392 MF |
6969 | numpages = 0; |
6970 | } | |
6971 | ||
6972 | *num = numpages; | |
6973 | ||
6974 | return ret; | |
6975 | } | |
6976 | ||
6977 | /* | |
6978 | * Zero the area past i_size but still within an allocated | |
6979 | * cluster. This avoids exposing nonzero data on subsequent file | |
6980 | * extends. | |
6981 | * | |
6982 | * We need to call this before i_size is updated on the inode because | |
6983 | * otherwise block_write_full_page() will skip writeout of pages past | |
6984 | * i_size. The new_i_size parameter is passed for this reason. | |
6985 | */ | |
35edec1d MF |
6986 | int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, |
6987 | u64 range_start, u64 range_end) | |
60b11392 | 6988 | { |
1d410a6e | 6989 | int ret = 0, numpages; |
60b11392 MF |
6990 | struct page **pages = NULL; |
6991 | u64 phys; | |
1d410a6e MF |
6992 | unsigned int ext_flags; |
6993 | struct super_block *sb = inode->i_sb; | |
60b11392 MF |
6994 | |
6995 | /* | |
6996 | * File systems which don't support sparse files zero on every | |
6997 | * extend. | |
6998 | */ | |
1d410a6e | 6999 | if (!ocfs2_sparse_alloc(OCFS2_SB(sb))) |
60b11392 MF |
7000 | return 0; |
7001 | ||
1d410a6e | 7002 | pages = kcalloc(ocfs2_pages_per_cluster(sb), |
60b11392 MF |
7003 | sizeof(struct page *), GFP_NOFS); |
7004 | if (pages == NULL) { | |
7005 | ret = -ENOMEM; | |
7006 | mlog_errno(ret); | |
7007 | goto out; | |
7008 | } | |
7009 | ||
1d410a6e MF |
7010 | if (range_start == range_end) |
7011 | goto out; | |
7012 | ||
7013 | ret = ocfs2_extent_map_get_blocks(inode, | |
7014 | range_start >> sb->s_blocksize_bits, | |
7015 | &phys, NULL, &ext_flags); | |
60b11392 MF |
7016 | if (ret) { |
7017 | mlog_errno(ret); | |
7018 | goto out; | |
7019 | } | |
7020 | ||
1d410a6e MF |
7021 | /* |
7022 | * Tail is a hole, or is marked unwritten. In either case, we | |
7023 | * can count on read and write to return/push zero's. | |
7024 | */ | |
7025 | if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) | |
60b11392 MF |
7026 | goto out; |
7027 | ||
1d410a6e MF |
7028 | ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, |
7029 | &numpages); | |
7030 | if (ret) { | |
7031 | mlog_errno(ret); | |
7032 | goto out; | |
7033 | } | |
7034 | ||
35edec1d MF |
7035 | ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, |
7036 | numpages, phys, handle); | |
60b11392 MF |
7037 | |
7038 | /* | |
7039 | * Initiate writeout of the pages we zero'd here. We don't | |
7040 | * wait on them - the truncate_inode_pages() call later will | |
7041 | * do that for us. | |
7042 | */ | |
35edec1d MF |
7043 | ret = do_sync_mapping_range(inode->i_mapping, range_start, |
7044 | range_end - 1, SYNC_FILE_RANGE_WRITE); | |
60b11392 MF |
7045 | if (ret) |
7046 | mlog_errno(ret); | |
7047 | ||
7048 | out: | |
7049 | if (pages) | |
7050 | kfree(pages); | |
7051 | ||
7052 | return ret; | |
7053 | } | |
7054 | ||
fdd77704 TY |
7055 | static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode, |
7056 | struct ocfs2_dinode *di) | |
1afc32b9 MF |
7057 | { |
7058 | unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits; | |
fdd77704 | 7059 | unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); |
1afc32b9 | 7060 | |
fdd77704 TY |
7061 | if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) |
7062 | memset(&di->id2, 0, blocksize - | |
7063 | offsetof(struct ocfs2_dinode, id2) - | |
7064 | xattrsize); | |
7065 | else | |
7066 | memset(&di->id2, 0, blocksize - | |
7067 | offsetof(struct ocfs2_dinode, id2)); | |
1afc32b9 MF |
7068 | } |
7069 | ||
5b6a3a2b MF |
7070 | void ocfs2_dinode_new_extent_list(struct inode *inode, |
7071 | struct ocfs2_dinode *di) | |
7072 | { | |
fdd77704 | 7073 | ocfs2_zero_dinode_id2_with_xattr(inode, di); |
5b6a3a2b MF |
7074 | di->id2.i_list.l_tree_depth = 0; |
7075 | di->id2.i_list.l_next_free_rec = 0; | |
fdd77704 TY |
7076 | di->id2.i_list.l_count = cpu_to_le16( |
7077 | ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di)); | |
5b6a3a2b MF |
7078 | } |
7079 | ||
1afc32b9 MF |
7080 | void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) |
7081 | { | |
7082 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
7083 | struct ocfs2_inline_data *idata = &di->id2.i_data; | |
7084 | ||
7085 | spin_lock(&oi->ip_lock); | |
7086 | oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL; | |
7087 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | |
7088 | spin_unlock(&oi->ip_lock); | |
7089 | ||
7090 | /* | |
7091 | * We clear the entire i_data structure here so that all | |
7092 | * fields can be properly initialized. | |
7093 | */ | |
fdd77704 | 7094 | ocfs2_zero_dinode_id2_with_xattr(inode, di); |
1afc32b9 | 7095 | |
fdd77704 TY |
7096 | idata->id_count = cpu_to_le16( |
7097 | ocfs2_max_inline_data_with_xattr(inode->i_sb, di)); | |
1afc32b9 MF |
7098 | } |
7099 | ||
7100 | int ocfs2_convert_inline_data_to_extents(struct inode *inode, | |
7101 | struct buffer_head *di_bh) | |
7102 | { | |
7103 | int ret, i, has_data, num_pages = 0; | |
7104 | handle_t *handle; | |
7105 | u64 uninitialized_var(block); | |
7106 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
7107 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
7108 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
1afc32b9 MF |
7109 | struct ocfs2_alloc_context *data_ac = NULL; |
7110 | struct page **pages = NULL; | |
7111 | loff_t end = osb->s_clustersize; | |
f99b9b7c | 7112 | struct ocfs2_extent_tree et; |
a90714c1 | 7113 | int did_quota = 0; |
1afc32b9 MF |
7114 | |
7115 | has_data = i_size_read(inode) ? 1 : 0; | |
7116 | ||
7117 | if (has_data) { | |
7118 | pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), | |
7119 | sizeof(struct page *), GFP_NOFS); | |
7120 | if (pages == NULL) { | |
7121 | ret = -ENOMEM; | |
7122 | mlog_errno(ret); | |
7123 | goto out; | |
7124 | } | |
7125 | ||
7126 | ret = ocfs2_reserve_clusters(osb, 1, &data_ac); | |
7127 | if (ret) { | |
7128 | mlog_errno(ret); | |
7129 | goto out; | |
7130 | } | |
7131 | } | |
7132 | ||
a90714c1 JK |
7133 | handle = ocfs2_start_trans(osb, |
7134 | ocfs2_inline_to_extents_credits(osb->sb)); | |
1afc32b9 MF |
7135 | if (IS_ERR(handle)) { |
7136 | ret = PTR_ERR(handle); | |
7137 | mlog_errno(ret); | |
7138 | goto out_unlock; | |
7139 | } | |
7140 | ||
13723d00 JB |
7141 | ret = ocfs2_journal_access_di(handle, inode, di_bh, |
7142 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1afc32b9 MF |
7143 | if (ret) { |
7144 | mlog_errno(ret); | |
7145 | goto out_commit; | |
7146 | } | |
7147 | ||
7148 | if (has_data) { | |
7149 | u32 bit_off, num; | |
7150 | unsigned int page_end; | |
7151 | u64 phys; | |
7152 | ||
a90714c1 JK |
7153 | if (vfs_dq_alloc_space_nodirty(inode, |
7154 | ocfs2_clusters_to_bytes(osb->sb, 1))) { | |
7155 | ret = -EDQUOT; | |
7156 | goto out_commit; | |
7157 | } | |
7158 | did_quota = 1; | |
7159 | ||
1afc32b9 MF |
7160 | ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, |
7161 | &num); | |
7162 | if (ret) { | |
7163 | mlog_errno(ret); | |
7164 | goto out_commit; | |
7165 | } | |
7166 | ||
7167 | /* | |
7168 | * Save two copies, one for insert, and one that can | |
7169 | * be changed by ocfs2_map_and_dirty_page() below. | |
7170 | */ | |
7171 | block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); | |
7172 | ||
7173 | /* | |
7174 | * Non sparse file systems zero on extend, so no need | |
7175 | * to do that now. | |
7176 | */ | |
7177 | if (!ocfs2_sparse_alloc(osb) && | |
7178 | PAGE_CACHE_SIZE < osb->s_clustersize) | |
7179 | end = PAGE_CACHE_SIZE; | |
7180 | ||
7181 | ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); | |
7182 | if (ret) { | |
7183 | mlog_errno(ret); | |
7184 | goto out_commit; | |
7185 | } | |
7186 | ||
7187 | /* | |
7188 | * This should populate the 1st page for us and mark | |
7189 | * it up to date. | |
7190 | */ | |
7191 | ret = ocfs2_read_inline_data(inode, pages[0], di_bh); | |
7192 | if (ret) { | |
7193 | mlog_errno(ret); | |
7194 | goto out_commit; | |
7195 | } | |
7196 | ||
7197 | page_end = PAGE_CACHE_SIZE; | |
7198 | if (PAGE_CACHE_SIZE > osb->s_clustersize) | |
7199 | page_end = osb->s_clustersize; | |
7200 | ||
7201 | for (i = 0; i < num_pages; i++) | |
7202 | ocfs2_map_and_dirty_page(inode, handle, 0, page_end, | |
7203 | pages[i], i > 0, &phys); | |
7204 | } | |
7205 | ||
7206 | spin_lock(&oi->ip_lock); | |
7207 | oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; | |
7208 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | |
7209 | spin_unlock(&oi->ip_lock); | |
7210 | ||
5b6a3a2b | 7211 | ocfs2_dinode_new_extent_list(inode, di); |
1afc32b9 MF |
7212 | |
7213 | ocfs2_journal_dirty(handle, di_bh); | |
7214 | ||
7215 | if (has_data) { | |
7216 | /* | |
7217 | * An error at this point should be extremely rare. If | |
7218 | * this proves to be false, we could always re-build | |
7219 | * the in-inode data from our pages. | |
7220 | */ | |
8d6220d6 | 7221 | ocfs2_init_dinode_extent_tree(&et, inode, di_bh); |
f99b9b7c JB |
7222 | ret = ocfs2_insert_extent(osb, handle, inode, &et, |
7223 | 0, block, 1, 0, NULL); | |
1afc32b9 MF |
7224 | if (ret) { |
7225 | mlog_errno(ret); | |
7226 | goto out_commit; | |
7227 | } | |
7228 | ||
7229 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
7230 | } | |
7231 | ||
7232 | out_commit: | |
a90714c1 JK |
7233 | if (ret < 0 && did_quota) |
7234 | vfs_dq_free_space_nodirty(inode, | |
7235 | ocfs2_clusters_to_bytes(osb->sb, 1)); | |
7236 | ||
1afc32b9 MF |
7237 | ocfs2_commit_trans(osb, handle); |
7238 | ||
7239 | out_unlock: | |
7240 | if (data_ac) | |
7241 | ocfs2_free_alloc_context(data_ac); | |
7242 | ||
7243 | out: | |
7244 | if (pages) { | |
7245 | ocfs2_unlock_and_free_pages(pages, num_pages); | |
7246 | kfree(pages); | |
7247 | } | |
7248 | ||
7249 | return ret; | |
7250 | } | |
7251 | ||
ccd979bd MF |
7252 | /* |
7253 | * It is expected, that by the time you call this function, | |
7254 | * inode->i_size and fe->i_size have been adjusted. | |
7255 | * | |
7256 | * WARNING: This will kfree the truncate context | |
7257 | */ | |
7258 | int ocfs2_commit_truncate(struct ocfs2_super *osb, | |
7259 | struct inode *inode, | |
7260 | struct buffer_head *fe_bh, | |
7261 | struct ocfs2_truncate_context *tc) | |
7262 | { | |
7263 | int status, i, credits, tl_sem = 0; | |
dcd0538f | 7264 | u32 clusters_to_del, new_highest_cpos, range; |
ccd979bd | 7265 | struct ocfs2_extent_list *el; |
1fabe148 | 7266 | handle_t *handle = NULL; |
ccd979bd | 7267 | struct inode *tl_inode = osb->osb_tl_inode; |
dcd0538f | 7268 | struct ocfs2_path *path = NULL; |
e7d4cb6b | 7269 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; |
ccd979bd MF |
7270 | |
7271 | mlog_entry_void(); | |
7272 | ||
dcd0538f | 7273 | new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, |
ccd979bd MF |
7274 | i_size_read(inode)); |
7275 | ||
13723d00 JB |
7276 | path = ocfs2_new_path(fe_bh, &di->id2.i_list, |
7277 | ocfs2_journal_access_di); | |
dcd0538f MF |
7278 | if (!path) { |
7279 | status = -ENOMEM; | |
7280 | mlog_errno(status); | |
7281 | goto bail; | |
7282 | } | |
83418978 MF |
7283 | |
7284 | ocfs2_extent_map_trunc(inode, new_highest_cpos); | |
7285 | ||
ccd979bd | 7286 | start: |
3a0782d0 MF |
7287 | /* |
7288 | * Check that we still have allocation to delete. | |
7289 | */ | |
7290 | if (OCFS2_I(inode)->ip_clusters == 0) { | |
7291 | status = 0; | |
7292 | goto bail; | |
7293 | } | |
7294 | ||
dcd0538f MF |
7295 | /* |
7296 | * Truncate always works against the rightmost tree branch. | |
7297 | */ | |
7298 | status = ocfs2_find_path(inode, path, UINT_MAX); | |
7299 | if (status) { | |
7300 | mlog_errno(status); | |
7301 | goto bail; | |
ccd979bd MF |
7302 | } |
7303 | ||
dcd0538f MF |
7304 | mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", |
7305 | OCFS2_I(inode)->ip_clusters, path->p_tree_depth); | |
7306 | ||
7307 | /* | |
7308 | * By now, el will point to the extent list on the bottom most | |
7309 | * portion of this tree. Only the tail record is considered in | |
7310 | * each pass. | |
7311 | * | |
7312 | * We handle the following cases, in order: | |
7313 | * - empty extent: delete the remaining branch | |
7314 | * - remove the entire record | |
7315 | * - remove a partial record | |
7316 | * - no record needs to be removed (truncate has completed) | |
7317 | */ | |
7318 | el = path_leaf_el(path); | |
3a0782d0 MF |
7319 | if (le16_to_cpu(el->l_next_free_rec) == 0) { |
7320 | ocfs2_error(inode->i_sb, | |
7321 | "Inode %llu has empty extent block at %llu\n", | |
7322 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
7323 | (unsigned long long)path_leaf_bh(path)->b_blocknr); | |
7324 | status = -EROFS; | |
7325 | goto bail; | |
7326 | } | |
7327 | ||
ccd979bd | 7328 | i = le16_to_cpu(el->l_next_free_rec) - 1; |
dcd0538f | 7329 | range = le32_to_cpu(el->l_recs[i].e_cpos) + |
e48edee2 | 7330 | ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f MF |
7331 | if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) { |
7332 | clusters_to_del = 0; | |
7333 | } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) { | |
e48edee2 | 7334 | clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f | 7335 | } else if (range > new_highest_cpos) { |
e48edee2 | 7336 | clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) + |
ccd979bd | 7337 | le32_to_cpu(el->l_recs[i].e_cpos)) - |
dcd0538f MF |
7338 | new_highest_cpos; |
7339 | } else { | |
7340 | status = 0; | |
7341 | goto bail; | |
7342 | } | |
ccd979bd | 7343 | |
dcd0538f MF |
7344 | mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n", |
7345 | clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr); | |
7346 | ||
1b1dcc1b | 7347 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd MF |
7348 | tl_sem = 1; |
7349 | /* ocfs2_truncate_log_needs_flush guarantees us at least one | |
7350 | * record is free for use. If there isn't any, we flush to get | |
7351 | * an empty truncate log. */ | |
7352 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
7353 | status = __ocfs2_flush_truncate_log(osb); | |
7354 | if (status < 0) { | |
7355 | mlog_errno(status); | |
7356 | goto bail; | |
7357 | } | |
7358 | } | |
7359 | ||
7360 | credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, | |
dcd0538f MF |
7361 | (struct ocfs2_dinode *)fe_bh->b_data, |
7362 | el); | |
65eff9cc | 7363 | handle = ocfs2_start_trans(osb, credits); |
ccd979bd MF |
7364 | if (IS_ERR(handle)) { |
7365 | status = PTR_ERR(handle); | |
7366 | handle = NULL; | |
7367 | mlog_errno(status); | |
7368 | goto bail; | |
7369 | } | |
7370 | ||
dcd0538f MF |
7371 | status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle, |
7372 | tc, path); | |
ccd979bd MF |
7373 | if (status < 0) { |
7374 | mlog_errno(status); | |
7375 | goto bail; | |
7376 | } | |
7377 | ||
1b1dcc1b | 7378 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
7379 | tl_sem = 0; |
7380 | ||
02dc1af4 | 7381 | ocfs2_commit_trans(osb, handle); |
ccd979bd MF |
7382 | handle = NULL; |
7383 | ||
dcd0538f MF |
7384 | ocfs2_reinit_path(path, 1); |
7385 | ||
7386 | /* | |
3a0782d0 MF |
7387 | * The check above will catch the case where we've truncated |
7388 | * away all allocation. | |
dcd0538f | 7389 | */ |
3a0782d0 MF |
7390 | goto start; |
7391 | ||
ccd979bd | 7392 | bail: |
ccd979bd MF |
7393 | |
7394 | ocfs2_schedule_truncate_log_flush(osb, 1); | |
7395 | ||
7396 | if (tl_sem) | |
1b1dcc1b | 7397 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
7398 | |
7399 | if (handle) | |
02dc1af4 | 7400 | ocfs2_commit_trans(osb, handle); |
ccd979bd | 7401 | |
59a5e416 MF |
7402 | ocfs2_run_deallocs(osb, &tc->tc_dealloc); |
7403 | ||
dcd0538f | 7404 | ocfs2_free_path(path); |
ccd979bd MF |
7405 | |
7406 | /* This will drop the ext_alloc cluster lock for us */ | |
7407 | ocfs2_free_truncate_context(tc); | |
7408 | ||
7409 | mlog_exit(status); | |
7410 | return status; | |
7411 | } | |
7412 | ||
ccd979bd | 7413 | /* |
59a5e416 | 7414 | * Expects the inode to already be locked. |
ccd979bd MF |
7415 | */ |
7416 | int ocfs2_prepare_truncate(struct ocfs2_super *osb, | |
7417 | struct inode *inode, | |
7418 | struct buffer_head *fe_bh, | |
7419 | struct ocfs2_truncate_context **tc) | |
7420 | { | |
59a5e416 | 7421 | int status; |
ccd979bd MF |
7422 | unsigned int new_i_clusters; |
7423 | struct ocfs2_dinode *fe; | |
7424 | struct ocfs2_extent_block *eb; | |
ccd979bd | 7425 | struct buffer_head *last_eb_bh = NULL; |
ccd979bd MF |
7426 | |
7427 | mlog_entry_void(); | |
7428 | ||
7429 | *tc = NULL; | |
7430 | ||
7431 | new_i_clusters = ocfs2_clusters_for_bytes(osb->sb, | |
7432 | i_size_read(inode)); | |
7433 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
7434 | ||
7435 | mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size =" | |
1ca1a111 MF |
7436 | "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters, |
7437 | (unsigned long long)le64_to_cpu(fe->i_size)); | |
ccd979bd | 7438 | |
cd861280 | 7439 | *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); |
ccd979bd MF |
7440 | if (!(*tc)) { |
7441 | status = -ENOMEM; | |
7442 | mlog_errno(status); | |
7443 | goto bail; | |
7444 | } | |
59a5e416 | 7445 | ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); |
ccd979bd | 7446 | |
ccd979bd | 7447 | if (fe->id2.i_list.l_tree_depth) { |
5e96581a JB |
7448 | status = ocfs2_read_extent_block(inode, |
7449 | le64_to_cpu(fe->i_last_eb_blk), | |
7450 | &last_eb_bh); | |
ccd979bd MF |
7451 | if (status < 0) { |
7452 | mlog_errno(status); | |
7453 | goto bail; | |
7454 | } | |
7455 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
ccd979bd MF |
7456 | } |
7457 | ||
7458 | (*tc)->tc_last_eb_bh = last_eb_bh; | |
7459 | ||
ccd979bd MF |
7460 | status = 0; |
7461 | bail: | |
7462 | if (status < 0) { | |
7463 | if (*tc) | |
7464 | ocfs2_free_truncate_context(*tc); | |
7465 | *tc = NULL; | |
7466 | } | |
7467 | mlog_exit_void(); | |
7468 | return status; | |
7469 | } | |
7470 | ||
1afc32b9 MF |
7471 | /* |
7472 | * 'start' is inclusive, 'end' is not. | |
7473 | */ | |
7474 | int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, | |
7475 | unsigned int start, unsigned int end, int trunc) | |
7476 | { | |
7477 | int ret; | |
7478 | unsigned int numbytes; | |
7479 | handle_t *handle; | |
7480 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
7481 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
7482 | struct ocfs2_inline_data *idata = &di->id2.i_data; | |
7483 | ||
7484 | if (end > i_size_read(inode)) | |
7485 | end = i_size_read(inode); | |
7486 | ||
7487 | BUG_ON(start >= end); | |
7488 | ||
7489 | if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || | |
7490 | !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || | |
7491 | !ocfs2_supports_inline_data(osb)) { | |
7492 | ocfs2_error(inode->i_sb, | |
7493 | "Inline data flags for inode %llu don't agree! " | |
7494 | "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n", | |
7495 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
7496 | le16_to_cpu(di->i_dyn_features), | |
7497 | OCFS2_I(inode)->ip_dyn_features, | |
7498 | osb->s_feature_incompat); | |
7499 | ret = -EROFS; | |
7500 | goto out; | |
7501 | } | |
7502 | ||
7503 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | |
7504 | if (IS_ERR(handle)) { | |
7505 | ret = PTR_ERR(handle); | |
7506 | mlog_errno(ret); | |
7507 | goto out; | |
7508 | } | |
7509 | ||
13723d00 JB |
7510 | ret = ocfs2_journal_access_di(handle, inode, di_bh, |
7511 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1afc32b9 MF |
7512 | if (ret) { |
7513 | mlog_errno(ret); | |
7514 | goto out_commit; | |
7515 | } | |
7516 | ||
7517 | numbytes = end - start; | |
7518 | memset(idata->id_data + start, 0, numbytes); | |
7519 | ||
7520 | /* | |
7521 | * No need to worry about the data page here - it's been | |
7522 | * truncated already and inline data doesn't need it for | |
7523 | * pushing zero's to disk, so we'll let readpage pick it up | |
7524 | * later. | |
7525 | */ | |
7526 | if (trunc) { | |
7527 | i_size_write(inode, start); | |
7528 | di->i_size = cpu_to_le64(start); | |
7529 | } | |
7530 | ||
7531 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
7532 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | |
7533 | ||
7534 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); | |
7535 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | |
7536 | ||
7537 | ocfs2_journal_dirty(handle, di_bh); | |
7538 | ||
7539 | out_commit: | |
7540 | ocfs2_commit_trans(osb, handle); | |
7541 | ||
7542 | out: | |
7543 | return ret; | |
7544 | } | |
7545 | ||
ccd979bd MF |
7546 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc) |
7547 | { | |
59a5e416 MF |
7548 | /* |
7549 | * The caller is responsible for completing deallocation | |
7550 | * before freeing the context. | |
7551 | */ | |
7552 | if (tc->tc_dealloc.c_first_suballocator != NULL) | |
7553 | mlog(ML_NOTICE, | |
7554 | "Truncate completion has non-empty dealloc context\n"); | |
ccd979bd | 7555 | |
a81cb88b | 7556 | brelse(tc->tc_last_eb_bh); |
ccd979bd MF |
7557 | |
7558 | kfree(tc); | |
7559 | } |