]>
Commit | Line | Data |
---|---|---|
ccd979bd MF |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * alloc.c | |
5 | * | |
6 | * Extent allocs and frees | |
7 | * | |
8 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | */ | |
25 | ||
26 | #include <linux/fs.h> | |
27 | #include <linux/types.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/highmem.h> | |
60b11392 | 30 | #include <linux/swap.h> |
ccd979bd MF |
31 | |
32 | #define MLOG_MASK_PREFIX ML_DISK_ALLOC | |
33 | #include <cluster/masklog.h> | |
34 | ||
35 | #include "ocfs2.h" | |
36 | ||
37 | #include "alloc.h" | |
60b11392 | 38 | #include "aops.h" |
ccd979bd MF |
39 | #include "dlmglue.h" |
40 | #include "extent_map.h" | |
41 | #include "inode.h" | |
42 | #include "journal.h" | |
43 | #include "localalloc.h" | |
44 | #include "suballoc.h" | |
45 | #include "sysfile.h" | |
46 | #include "file.h" | |
47 | #include "super.h" | |
48 | #include "uptodate.h" | |
49 | ||
50 | #include "buffer_head_io.h" | |
51 | ||
dcd0538f | 52 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); |
59a5e416 MF |
53 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, |
54 | struct ocfs2_extent_block *eb); | |
ccd979bd | 55 | |
dcd0538f MF |
56 | /* |
57 | * Structures which describe a path through a btree, and functions to | |
58 | * manipulate them. | |
59 | * | |
60 | * The idea here is to be as generic as possible with the tree | |
61 | * manipulation code. | |
62 | */ | |
63 | struct ocfs2_path_item { | |
64 | struct buffer_head *bh; | |
65 | struct ocfs2_extent_list *el; | |
66 | }; | |
ccd979bd | 67 | |
dcd0538f | 68 | #define OCFS2_MAX_PATH_DEPTH 5 |
ccd979bd | 69 | |
dcd0538f MF |
70 | struct ocfs2_path { |
71 | int p_tree_depth; | |
72 | struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; | |
73 | }; | |
ccd979bd | 74 | |
dcd0538f MF |
75 | #define path_root_bh(_path) ((_path)->p_node[0].bh) |
76 | #define path_root_el(_path) ((_path)->p_node[0].el) | |
77 | #define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) | |
78 | #define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) | |
79 | #define path_num_items(_path) ((_path)->p_tree_depth + 1) | |
ccd979bd | 80 | |
dcd0538f MF |
81 | /* |
82 | * Reset the actual path elements so that we can re-use the structure | |
83 | * to build another path. Generally, this involves freeing the buffer | |
84 | * heads. | |
85 | */ | |
86 | static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) | |
87 | { | |
88 | int i, start = 0, depth = 0; | |
89 | struct ocfs2_path_item *node; | |
ccd979bd | 90 | |
dcd0538f MF |
91 | if (keep_root) |
92 | start = 1; | |
ccd979bd | 93 | |
dcd0538f MF |
94 | for(i = start; i < path_num_items(path); i++) { |
95 | node = &path->p_node[i]; | |
96 | ||
97 | brelse(node->bh); | |
98 | node->bh = NULL; | |
99 | node->el = NULL; | |
100 | } | |
101 | ||
102 | /* | |
103 | * Tree depth may change during truncate, or insert. If we're | |
104 | * keeping the root extent list, then make sure that our path | |
105 | * structure reflects the proper depth. | |
106 | */ | |
107 | if (keep_root) | |
108 | depth = le16_to_cpu(path_root_el(path)->l_tree_depth); | |
109 | ||
110 | path->p_tree_depth = depth; | |
111 | } | |
112 | ||
113 | static void ocfs2_free_path(struct ocfs2_path *path) | |
114 | { | |
115 | if (path) { | |
116 | ocfs2_reinit_path(path, 0); | |
117 | kfree(path); | |
118 | } | |
119 | } | |
120 | ||
328d5752 MF |
121 | /* |
122 | * All the elements of src into dest. After this call, src could be freed | |
123 | * without affecting dest. | |
124 | * | |
125 | * Both paths should have the same root. Any non-root elements of dest | |
126 | * will be freed. | |
127 | */ | |
128 | static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) | |
129 | { | |
130 | int i; | |
131 | ||
132 | BUG_ON(path_root_bh(dest) != path_root_bh(src)); | |
133 | BUG_ON(path_root_el(dest) != path_root_el(src)); | |
134 | ||
135 | ocfs2_reinit_path(dest, 1); | |
136 | ||
137 | for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { | |
138 | dest->p_node[i].bh = src->p_node[i].bh; | |
139 | dest->p_node[i].el = src->p_node[i].el; | |
140 | ||
141 | if (dest->p_node[i].bh) | |
142 | get_bh(dest->p_node[i].bh); | |
143 | } | |
144 | } | |
145 | ||
dcd0538f MF |
146 | /* |
147 | * Make the *dest path the same as src and re-initialize src path to | |
148 | * have a root only. | |
149 | */ | |
150 | static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) | |
151 | { | |
152 | int i; | |
153 | ||
154 | BUG_ON(path_root_bh(dest) != path_root_bh(src)); | |
155 | ||
156 | for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { | |
157 | brelse(dest->p_node[i].bh); | |
158 | ||
159 | dest->p_node[i].bh = src->p_node[i].bh; | |
160 | dest->p_node[i].el = src->p_node[i].el; | |
161 | ||
162 | src->p_node[i].bh = NULL; | |
163 | src->p_node[i].el = NULL; | |
164 | } | |
165 | } | |
166 | ||
167 | /* | |
168 | * Insert an extent block at given index. | |
169 | * | |
170 | * This will not take an additional reference on eb_bh. | |
171 | */ | |
172 | static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, | |
173 | struct buffer_head *eb_bh) | |
174 | { | |
175 | struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; | |
176 | ||
177 | /* | |
178 | * Right now, no root bh is an extent block, so this helps | |
179 | * catch code errors with dinode trees. The assertion can be | |
180 | * safely removed if we ever need to insert extent block | |
181 | * structures at the root. | |
182 | */ | |
183 | BUG_ON(index == 0); | |
184 | ||
185 | path->p_node[index].bh = eb_bh; | |
186 | path->p_node[index].el = &eb->h_list; | |
187 | } | |
188 | ||
189 | static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, | |
190 | struct ocfs2_extent_list *root_el) | |
191 | { | |
192 | struct ocfs2_path *path; | |
ccd979bd | 193 | |
dcd0538f MF |
194 | BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH); |
195 | ||
196 | path = kzalloc(sizeof(*path), GFP_NOFS); | |
197 | if (path) { | |
198 | path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); | |
199 | get_bh(root_bh); | |
200 | path_root_bh(path) = root_bh; | |
201 | path_root_el(path) = root_el; | |
202 | } | |
203 | ||
204 | return path; | |
205 | } | |
206 | ||
207 | /* | |
208 | * Allocate and initialize a new path based on a disk inode tree. | |
209 | */ | |
210 | static struct ocfs2_path *ocfs2_new_inode_path(struct buffer_head *di_bh) | |
211 | { | |
212 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
213 | struct ocfs2_extent_list *el = &di->id2.i_list; | |
214 | ||
215 | return ocfs2_new_path(di_bh, el); | |
216 | } | |
217 | ||
218 | /* | |
219 | * Convenience function to journal all components in a path. | |
220 | */ | |
221 | static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, | |
222 | struct ocfs2_path *path) | |
223 | { | |
224 | int i, ret = 0; | |
225 | ||
226 | if (!path) | |
227 | goto out; | |
228 | ||
229 | for(i = 0; i < path_num_items(path); i++) { | |
230 | ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh, | |
231 | OCFS2_JOURNAL_ACCESS_WRITE); | |
232 | if (ret < 0) { | |
233 | mlog_errno(ret); | |
234 | goto out; | |
235 | } | |
236 | } | |
237 | ||
238 | out: | |
239 | return ret; | |
240 | } | |
241 | ||
328d5752 MF |
242 | /* |
243 | * Return the index of the extent record which contains cluster #v_cluster. | |
244 | * -1 is returned if it was not found. | |
245 | * | |
246 | * Should work fine on interior and exterior nodes. | |
247 | */ | |
248 | int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) | |
249 | { | |
250 | int ret = -1; | |
251 | int i; | |
252 | struct ocfs2_extent_rec *rec; | |
253 | u32 rec_end, rec_start, clusters; | |
254 | ||
255 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { | |
256 | rec = &el->l_recs[i]; | |
257 | ||
258 | rec_start = le32_to_cpu(rec->e_cpos); | |
259 | clusters = ocfs2_rec_clusters(el, rec); | |
260 | ||
261 | rec_end = rec_start + clusters; | |
262 | ||
263 | if (v_cluster >= rec_start && v_cluster < rec_end) { | |
264 | ret = i; | |
265 | break; | |
266 | } | |
267 | } | |
268 | ||
269 | return ret; | |
270 | } | |
271 | ||
dcd0538f MF |
272 | enum ocfs2_contig_type { |
273 | CONTIG_NONE = 0, | |
274 | CONTIG_LEFT, | |
328d5752 MF |
275 | CONTIG_RIGHT, |
276 | CONTIG_LEFTRIGHT, | |
dcd0538f MF |
277 | }; |
278 | ||
e48edee2 MF |
279 | |
280 | /* | |
281 | * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and | |
282 | * ocfs2_extent_contig only work properly against leaf nodes! | |
283 | */ | |
dcd0538f MF |
284 | static int ocfs2_block_extent_contig(struct super_block *sb, |
285 | struct ocfs2_extent_rec *ext, | |
286 | u64 blkno) | |
ccd979bd | 287 | { |
e48edee2 MF |
288 | u64 blk_end = le64_to_cpu(ext->e_blkno); |
289 | ||
290 | blk_end += ocfs2_clusters_to_blocks(sb, | |
291 | le16_to_cpu(ext->e_leaf_clusters)); | |
292 | ||
293 | return blkno == blk_end; | |
ccd979bd MF |
294 | } |
295 | ||
dcd0538f MF |
296 | static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, |
297 | struct ocfs2_extent_rec *right) | |
298 | { | |
e48edee2 MF |
299 | u32 left_range; |
300 | ||
301 | left_range = le32_to_cpu(left->e_cpos) + | |
302 | le16_to_cpu(left->e_leaf_clusters); | |
303 | ||
304 | return (left_range == le32_to_cpu(right->e_cpos)); | |
dcd0538f MF |
305 | } |
306 | ||
307 | static enum ocfs2_contig_type | |
308 | ocfs2_extent_contig(struct inode *inode, | |
309 | struct ocfs2_extent_rec *ext, | |
310 | struct ocfs2_extent_rec *insert_rec) | |
311 | { | |
312 | u64 blkno = le64_to_cpu(insert_rec->e_blkno); | |
313 | ||
328d5752 MF |
314 | /* |
315 | * Refuse to coalesce extent records with different flag | |
316 | * fields - we don't want to mix unwritten extents with user | |
317 | * data. | |
318 | */ | |
319 | if (ext->e_flags != insert_rec->e_flags) | |
320 | return CONTIG_NONE; | |
321 | ||
dcd0538f MF |
322 | if (ocfs2_extents_adjacent(ext, insert_rec) && |
323 | ocfs2_block_extent_contig(inode->i_sb, ext, blkno)) | |
324 | return CONTIG_RIGHT; | |
325 | ||
326 | blkno = le64_to_cpu(ext->e_blkno); | |
327 | if (ocfs2_extents_adjacent(insert_rec, ext) && | |
328 | ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno)) | |
329 | return CONTIG_LEFT; | |
330 | ||
331 | return CONTIG_NONE; | |
332 | } | |
333 | ||
334 | /* | |
335 | * NOTE: We can have pretty much any combination of contiguousness and | |
336 | * appending. | |
337 | * | |
338 | * The usefulness of APPEND_TAIL is more in that it lets us know that | |
339 | * we'll have to update the path to that leaf. | |
340 | */ | |
341 | enum ocfs2_append_type { | |
342 | APPEND_NONE = 0, | |
343 | APPEND_TAIL, | |
344 | }; | |
345 | ||
328d5752 MF |
346 | enum ocfs2_split_type { |
347 | SPLIT_NONE = 0, | |
348 | SPLIT_LEFT, | |
349 | SPLIT_RIGHT, | |
350 | }; | |
351 | ||
dcd0538f | 352 | struct ocfs2_insert_type { |
328d5752 | 353 | enum ocfs2_split_type ins_split; |
dcd0538f MF |
354 | enum ocfs2_append_type ins_appending; |
355 | enum ocfs2_contig_type ins_contig; | |
356 | int ins_contig_index; | |
dcd0538f MF |
357 | int ins_tree_depth; |
358 | }; | |
359 | ||
328d5752 MF |
360 | struct ocfs2_merge_ctxt { |
361 | enum ocfs2_contig_type c_contig_type; | |
362 | int c_has_empty_extent; | |
363 | int c_split_covers_rec; | |
328d5752 MF |
364 | }; |
365 | ||
ccd979bd MF |
366 | /* |
367 | * How many free extents have we got before we need more meta data? | |
368 | */ | |
369 | int ocfs2_num_free_extents(struct ocfs2_super *osb, | |
370 | struct inode *inode, | |
371 | struct ocfs2_dinode *fe) | |
372 | { | |
373 | int retval; | |
374 | struct ocfs2_extent_list *el; | |
375 | struct ocfs2_extent_block *eb; | |
376 | struct buffer_head *eb_bh = NULL; | |
377 | ||
378 | mlog_entry_void(); | |
379 | ||
380 | if (!OCFS2_IS_VALID_DINODE(fe)) { | |
381 | OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); | |
382 | retval = -EIO; | |
383 | goto bail; | |
384 | } | |
385 | ||
386 | if (fe->i_last_eb_blk) { | |
387 | retval = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk), | |
388 | &eb_bh, OCFS2_BH_CACHED, inode); | |
389 | if (retval < 0) { | |
390 | mlog_errno(retval); | |
391 | goto bail; | |
392 | } | |
393 | eb = (struct ocfs2_extent_block *) eb_bh->b_data; | |
394 | el = &eb->h_list; | |
395 | } else | |
396 | el = &fe->id2.i_list; | |
397 | ||
398 | BUG_ON(el->l_tree_depth != 0); | |
399 | ||
400 | retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); | |
401 | bail: | |
402 | if (eb_bh) | |
403 | brelse(eb_bh); | |
404 | ||
405 | mlog_exit(retval); | |
406 | return retval; | |
407 | } | |
408 | ||
409 | /* expects array to already be allocated | |
410 | * | |
411 | * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and | |
412 | * l_count for you | |
413 | */ | |
414 | static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, | |
1fabe148 | 415 | handle_t *handle, |
ccd979bd MF |
416 | struct inode *inode, |
417 | int wanted, | |
418 | struct ocfs2_alloc_context *meta_ac, | |
419 | struct buffer_head *bhs[]) | |
420 | { | |
421 | int count, status, i; | |
422 | u16 suballoc_bit_start; | |
423 | u32 num_got; | |
424 | u64 first_blkno; | |
425 | struct ocfs2_extent_block *eb; | |
426 | ||
427 | mlog_entry_void(); | |
428 | ||
429 | count = 0; | |
430 | while (count < wanted) { | |
431 | status = ocfs2_claim_metadata(osb, | |
432 | handle, | |
433 | meta_ac, | |
434 | wanted - count, | |
435 | &suballoc_bit_start, | |
436 | &num_got, | |
437 | &first_blkno); | |
438 | if (status < 0) { | |
439 | mlog_errno(status); | |
440 | goto bail; | |
441 | } | |
442 | ||
443 | for(i = count; i < (num_got + count); i++) { | |
444 | bhs[i] = sb_getblk(osb->sb, first_blkno); | |
445 | if (bhs[i] == NULL) { | |
446 | status = -EIO; | |
447 | mlog_errno(status); | |
448 | goto bail; | |
449 | } | |
450 | ocfs2_set_new_buffer_uptodate(inode, bhs[i]); | |
451 | ||
452 | status = ocfs2_journal_access(handle, inode, bhs[i], | |
453 | OCFS2_JOURNAL_ACCESS_CREATE); | |
454 | if (status < 0) { | |
455 | mlog_errno(status); | |
456 | goto bail; | |
457 | } | |
458 | ||
459 | memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); | |
460 | eb = (struct ocfs2_extent_block *) bhs[i]->b_data; | |
461 | /* Ok, setup the minimal stuff here. */ | |
462 | strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); | |
463 | eb->h_blkno = cpu_to_le64(first_blkno); | |
464 | eb->h_fs_generation = cpu_to_le32(osb->fs_generation); | |
ccd979bd | 465 | eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); |
ccd979bd MF |
466 | eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); |
467 | eb->h_list.l_count = | |
468 | cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); | |
469 | ||
470 | suballoc_bit_start++; | |
471 | first_blkno++; | |
472 | ||
473 | /* We'll also be dirtied by the caller, so | |
474 | * this isn't absolutely necessary. */ | |
475 | status = ocfs2_journal_dirty(handle, bhs[i]); | |
476 | if (status < 0) { | |
477 | mlog_errno(status); | |
478 | goto bail; | |
479 | } | |
480 | } | |
481 | ||
482 | count += num_got; | |
483 | } | |
484 | ||
485 | status = 0; | |
486 | bail: | |
487 | if (status < 0) { | |
488 | for(i = 0; i < wanted; i++) { | |
489 | if (bhs[i]) | |
490 | brelse(bhs[i]); | |
491 | bhs[i] = NULL; | |
492 | } | |
493 | } | |
494 | mlog_exit(status); | |
495 | return status; | |
496 | } | |
497 | ||
dcd0538f MF |
498 | /* |
499 | * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth(). | |
500 | * | |
501 | * Returns the sum of the rightmost extent rec logical offset and | |
502 | * cluster count. | |
503 | * | |
504 | * ocfs2_add_branch() uses this to determine what logical cluster | |
505 | * value should be populated into the leftmost new branch records. | |
506 | * | |
507 | * ocfs2_shift_tree_depth() uses this to determine the # clusters | |
508 | * value for the new topmost tree record. | |
509 | */ | |
510 | static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) | |
511 | { | |
512 | int i; | |
513 | ||
514 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
515 | ||
516 | return le32_to_cpu(el->l_recs[i].e_cpos) + | |
e48edee2 | 517 | ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f MF |
518 | } |
519 | ||
ccd979bd MF |
520 | /* |
521 | * Add an entire tree branch to our inode. eb_bh is the extent block | |
522 | * to start at, if we don't want to start the branch at the dinode | |
523 | * structure. | |
524 | * | |
525 | * last_eb_bh is required as we have to update it's next_leaf pointer | |
526 | * for the new last extent block. | |
527 | * | |
528 | * the new branch will be 'empty' in the sense that every block will | |
e48edee2 | 529 | * contain a single record with cluster count == 0. |
ccd979bd MF |
530 | */ |
531 | static int ocfs2_add_branch(struct ocfs2_super *osb, | |
1fabe148 | 532 | handle_t *handle, |
ccd979bd MF |
533 | struct inode *inode, |
534 | struct buffer_head *fe_bh, | |
535 | struct buffer_head *eb_bh, | |
328d5752 | 536 | struct buffer_head **last_eb_bh, |
ccd979bd MF |
537 | struct ocfs2_alloc_context *meta_ac) |
538 | { | |
539 | int status, new_blocks, i; | |
540 | u64 next_blkno, new_last_eb_blk; | |
541 | struct buffer_head *bh; | |
542 | struct buffer_head **new_eb_bhs = NULL; | |
543 | struct ocfs2_dinode *fe; | |
544 | struct ocfs2_extent_block *eb; | |
545 | struct ocfs2_extent_list *eb_el; | |
546 | struct ocfs2_extent_list *el; | |
dcd0538f | 547 | u32 new_cpos; |
ccd979bd MF |
548 | |
549 | mlog_entry_void(); | |
550 | ||
328d5752 | 551 | BUG_ON(!last_eb_bh || !*last_eb_bh); |
ccd979bd MF |
552 | |
553 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
554 | ||
555 | if (eb_bh) { | |
556 | eb = (struct ocfs2_extent_block *) eb_bh->b_data; | |
557 | el = &eb->h_list; | |
558 | } else | |
559 | el = &fe->id2.i_list; | |
560 | ||
561 | /* we never add a branch to a leaf. */ | |
562 | BUG_ON(!el->l_tree_depth); | |
563 | ||
564 | new_blocks = le16_to_cpu(el->l_tree_depth); | |
565 | ||
566 | /* allocate the number of new eb blocks we need */ | |
567 | new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *), | |
568 | GFP_KERNEL); | |
569 | if (!new_eb_bhs) { | |
570 | status = -ENOMEM; | |
571 | mlog_errno(status); | |
572 | goto bail; | |
573 | } | |
574 | ||
575 | status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks, | |
576 | meta_ac, new_eb_bhs); | |
577 | if (status < 0) { | |
578 | mlog_errno(status); | |
579 | goto bail; | |
580 | } | |
581 | ||
328d5752 | 582 | eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; |
dcd0538f MF |
583 | new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); |
584 | ||
ccd979bd MF |
585 | /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be |
586 | * linked with the rest of the tree. | |
587 | * conversly, new_eb_bhs[0] is the new bottommost leaf. | |
588 | * | |
589 | * when we leave the loop, new_last_eb_blk will point to the | |
590 | * newest leaf, and next_blkno will point to the topmost extent | |
591 | * block. */ | |
592 | next_blkno = new_last_eb_blk = 0; | |
593 | for(i = 0; i < new_blocks; i++) { | |
594 | bh = new_eb_bhs[i]; | |
595 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
596 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
597 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
598 | status = -EIO; | |
599 | goto bail; | |
600 | } | |
601 | eb_el = &eb->h_list; | |
602 | ||
603 | status = ocfs2_journal_access(handle, inode, bh, | |
604 | OCFS2_JOURNAL_ACCESS_CREATE); | |
605 | if (status < 0) { | |
606 | mlog_errno(status); | |
607 | goto bail; | |
608 | } | |
609 | ||
610 | eb->h_next_leaf_blk = 0; | |
611 | eb_el->l_tree_depth = cpu_to_le16(i); | |
612 | eb_el->l_next_free_rec = cpu_to_le16(1); | |
dcd0538f MF |
613 | /* |
614 | * This actually counts as an empty extent as | |
615 | * c_clusters == 0 | |
616 | */ | |
617 | eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos); | |
ccd979bd | 618 | eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); |
e48edee2 MF |
619 | /* |
620 | * eb_el isn't always an interior node, but even leaf | |
621 | * nodes want a zero'd flags and reserved field so | |
622 | * this gets the whole 32 bits regardless of use. | |
623 | */ | |
624 | eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0); | |
ccd979bd MF |
625 | if (!eb_el->l_tree_depth) |
626 | new_last_eb_blk = le64_to_cpu(eb->h_blkno); | |
627 | ||
628 | status = ocfs2_journal_dirty(handle, bh); | |
629 | if (status < 0) { | |
630 | mlog_errno(status); | |
631 | goto bail; | |
632 | } | |
633 | ||
634 | next_blkno = le64_to_cpu(eb->h_blkno); | |
635 | } | |
636 | ||
637 | /* This is a bit hairy. We want to update up to three blocks | |
638 | * here without leaving any of them in an inconsistent state | |
639 | * in case of error. We don't have to worry about | |
640 | * journal_dirty erroring as it won't unless we've aborted the | |
641 | * handle (in which case we would never be here) so reserving | |
642 | * the write with journal_access is all we need to do. */ | |
328d5752 | 643 | status = ocfs2_journal_access(handle, inode, *last_eb_bh, |
ccd979bd MF |
644 | OCFS2_JOURNAL_ACCESS_WRITE); |
645 | if (status < 0) { | |
646 | mlog_errno(status); | |
647 | goto bail; | |
648 | } | |
649 | status = ocfs2_journal_access(handle, inode, fe_bh, | |
650 | OCFS2_JOURNAL_ACCESS_WRITE); | |
651 | if (status < 0) { | |
652 | mlog_errno(status); | |
653 | goto bail; | |
654 | } | |
655 | if (eb_bh) { | |
656 | status = ocfs2_journal_access(handle, inode, eb_bh, | |
657 | OCFS2_JOURNAL_ACCESS_WRITE); | |
658 | if (status < 0) { | |
659 | mlog_errno(status); | |
660 | goto bail; | |
661 | } | |
662 | } | |
663 | ||
664 | /* Link the new branch into the rest of the tree (el will | |
665 | * either be on the fe, or the extent block passed in. */ | |
666 | i = le16_to_cpu(el->l_next_free_rec); | |
667 | el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); | |
dcd0538f | 668 | el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); |
e48edee2 | 669 | el->l_recs[i].e_int_clusters = 0; |
ccd979bd MF |
670 | le16_add_cpu(&el->l_next_free_rec, 1); |
671 | ||
672 | /* fe needs a new last extent block pointer, as does the | |
673 | * next_leaf on the previously last-extent-block. */ | |
674 | fe->i_last_eb_blk = cpu_to_le64(new_last_eb_blk); | |
675 | ||
328d5752 | 676 | eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; |
ccd979bd MF |
677 | eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); |
678 | ||
328d5752 | 679 | status = ocfs2_journal_dirty(handle, *last_eb_bh); |
ccd979bd MF |
680 | if (status < 0) |
681 | mlog_errno(status); | |
682 | status = ocfs2_journal_dirty(handle, fe_bh); | |
683 | if (status < 0) | |
684 | mlog_errno(status); | |
685 | if (eb_bh) { | |
686 | status = ocfs2_journal_dirty(handle, eb_bh); | |
687 | if (status < 0) | |
688 | mlog_errno(status); | |
689 | } | |
690 | ||
328d5752 MF |
691 | /* |
692 | * Some callers want to track the rightmost leaf so pass it | |
693 | * back here. | |
694 | */ | |
695 | brelse(*last_eb_bh); | |
696 | get_bh(new_eb_bhs[0]); | |
697 | *last_eb_bh = new_eb_bhs[0]; | |
698 | ||
ccd979bd MF |
699 | status = 0; |
700 | bail: | |
701 | if (new_eb_bhs) { | |
702 | for (i = 0; i < new_blocks; i++) | |
703 | if (new_eb_bhs[i]) | |
704 | brelse(new_eb_bhs[i]); | |
705 | kfree(new_eb_bhs); | |
706 | } | |
707 | ||
708 | mlog_exit(status); | |
709 | return status; | |
710 | } | |
711 | ||
712 | /* | |
713 | * adds another level to the allocation tree. | |
714 | * returns back the new extent block so you can add a branch to it | |
715 | * after this call. | |
716 | */ | |
717 | static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, | |
1fabe148 | 718 | handle_t *handle, |
ccd979bd MF |
719 | struct inode *inode, |
720 | struct buffer_head *fe_bh, | |
721 | struct ocfs2_alloc_context *meta_ac, | |
722 | struct buffer_head **ret_new_eb_bh) | |
723 | { | |
724 | int status, i; | |
dcd0538f | 725 | u32 new_clusters; |
ccd979bd MF |
726 | struct buffer_head *new_eb_bh = NULL; |
727 | struct ocfs2_dinode *fe; | |
728 | struct ocfs2_extent_block *eb; | |
729 | struct ocfs2_extent_list *fe_el; | |
730 | struct ocfs2_extent_list *eb_el; | |
731 | ||
732 | mlog_entry_void(); | |
733 | ||
734 | status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac, | |
735 | &new_eb_bh); | |
736 | if (status < 0) { | |
737 | mlog_errno(status); | |
738 | goto bail; | |
739 | } | |
740 | ||
741 | eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; | |
742 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
743 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
744 | status = -EIO; | |
745 | goto bail; | |
746 | } | |
747 | ||
748 | eb_el = &eb->h_list; | |
749 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
750 | fe_el = &fe->id2.i_list; | |
751 | ||
752 | status = ocfs2_journal_access(handle, inode, new_eb_bh, | |
753 | OCFS2_JOURNAL_ACCESS_CREATE); | |
754 | if (status < 0) { | |
755 | mlog_errno(status); | |
756 | goto bail; | |
757 | } | |
758 | ||
759 | /* copy the fe data into the new extent block */ | |
760 | eb_el->l_tree_depth = fe_el->l_tree_depth; | |
761 | eb_el->l_next_free_rec = fe_el->l_next_free_rec; | |
e48edee2 MF |
762 | for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++) |
763 | eb_el->l_recs[i] = fe_el->l_recs[i]; | |
ccd979bd MF |
764 | |
765 | status = ocfs2_journal_dirty(handle, new_eb_bh); | |
766 | if (status < 0) { | |
767 | mlog_errno(status); | |
768 | goto bail; | |
769 | } | |
770 | ||
771 | status = ocfs2_journal_access(handle, inode, fe_bh, | |
772 | OCFS2_JOURNAL_ACCESS_WRITE); | |
773 | if (status < 0) { | |
774 | mlog_errno(status); | |
775 | goto bail; | |
776 | } | |
777 | ||
dcd0538f MF |
778 | new_clusters = ocfs2_sum_rightmost_rec(eb_el); |
779 | ||
ccd979bd MF |
780 | /* update fe now */ |
781 | le16_add_cpu(&fe_el->l_tree_depth, 1); | |
782 | fe_el->l_recs[0].e_cpos = 0; | |
783 | fe_el->l_recs[0].e_blkno = eb->h_blkno; | |
e48edee2 MF |
784 | fe_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters); |
785 | for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++) | |
786 | memset(&fe_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); | |
ccd979bd MF |
787 | fe_el->l_next_free_rec = cpu_to_le16(1); |
788 | ||
789 | /* If this is our 1st tree depth shift, then last_eb_blk | |
790 | * becomes the allocated extent block */ | |
791 | if (fe_el->l_tree_depth == cpu_to_le16(1)) | |
792 | fe->i_last_eb_blk = eb->h_blkno; | |
793 | ||
794 | status = ocfs2_journal_dirty(handle, fe_bh); | |
795 | if (status < 0) { | |
796 | mlog_errno(status); | |
797 | goto bail; | |
798 | } | |
799 | ||
800 | *ret_new_eb_bh = new_eb_bh; | |
801 | new_eb_bh = NULL; | |
802 | status = 0; | |
803 | bail: | |
804 | if (new_eb_bh) | |
805 | brelse(new_eb_bh); | |
806 | ||
807 | mlog_exit(status); | |
808 | return status; | |
809 | } | |
810 | ||
ccd979bd MF |
811 | /* |
812 | * Should only be called when there is no space left in any of the | |
813 | * leaf nodes. What we want to do is find the lowest tree depth | |
814 | * non-leaf extent block with room for new records. There are three | |
815 | * valid results of this search: | |
816 | * | |
817 | * 1) a lowest extent block is found, then we pass it back in | |
818 | * *lowest_eb_bh and return '0' | |
819 | * | |
820 | * 2) the search fails to find anything, but the dinode has room. We | |
821 | * pass NULL back in *lowest_eb_bh, but still return '0' | |
822 | * | |
823 | * 3) the search fails to find anything AND the dinode is full, in | |
824 | * which case we return > 0 | |
825 | * | |
826 | * return status < 0 indicates an error. | |
827 | */ | |
828 | static int ocfs2_find_branch_target(struct ocfs2_super *osb, | |
829 | struct inode *inode, | |
830 | struct buffer_head *fe_bh, | |
831 | struct buffer_head **target_bh) | |
832 | { | |
833 | int status = 0, i; | |
834 | u64 blkno; | |
835 | struct ocfs2_dinode *fe; | |
836 | struct ocfs2_extent_block *eb; | |
837 | struct ocfs2_extent_list *el; | |
838 | struct buffer_head *bh = NULL; | |
839 | struct buffer_head *lowest_bh = NULL; | |
840 | ||
841 | mlog_entry_void(); | |
842 | ||
843 | *target_bh = NULL; | |
844 | ||
845 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
846 | el = &fe->id2.i_list; | |
847 | ||
848 | while(le16_to_cpu(el->l_tree_depth) > 1) { | |
849 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
b0697053 | 850 | ocfs2_error(inode->i_sb, "Dinode %llu has empty " |
ccd979bd | 851 | "extent list (next_free_rec == 0)", |
b0697053 | 852 | (unsigned long long)OCFS2_I(inode)->ip_blkno); |
ccd979bd MF |
853 | status = -EIO; |
854 | goto bail; | |
855 | } | |
856 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
857 | blkno = le64_to_cpu(el->l_recs[i].e_blkno); | |
858 | if (!blkno) { | |
b0697053 | 859 | ocfs2_error(inode->i_sb, "Dinode %llu has extent " |
ccd979bd MF |
860 | "list where extent # %d has no physical " |
861 | "block start", | |
b0697053 | 862 | (unsigned long long)OCFS2_I(inode)->ip_blkno, i); |
ccd979bd MF |
863 | status = -EIO; |
864 | goto bail; | |
865 | } | |
866 | ||
867 | if (bh) { | |
868 | brelse(bh); | |
869 | bh = NULL; | |
870 | } | |
871 | ||
872 | status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED, | |
873 | inode); | |
874 | if (status < 0) { | |
875 | mlog_errno(status); | |
876 | goto bail; | |
877 | } | |
dcd0538f MF |
878 | |
879 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
880 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
881 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
882 | status = -EIO; | |
883 | goto bail; | |
884 | } | |
885 | el = &eb->h_list; | |
886 | ||
887 | if (le16_to_cpu(el->l_next_free_rec) < | |
888 | le16_to_cpu(el->l_count)) { | |
889 | if (lowest_bh) | |
890 | brelse(lowest_bh); | |
891 | lowest_bh = bh; | |
892 | get_bh(lowest_bh); | |
893 | } | |
894 | } | |
895 | ||
896 | /* If we didn't find one and the fe doesn't have any room, | |
897 | * then return '1' */ | |
898 | if (!lowest_bh | |
899 | && (fe->id2.i_list.l_next_free_rec == fe->id2.i_list.l_count)) | |
900 | status = 1; | |
901 | ||
902 | *target_bh = lowest_bh; | |
903 | bail: | |
904 | if (bh) | |
905 | brelse(bh); | |
906 | ||
907 | mlog_exit(status); | |
908 | return status; | |
909 | } | |
910 | ||
c3afcbb3 MF |
911 | /* |
912 | * Grow a b-tree so that it has more records. | |
913 | * | |
914 | * We might shift the tree depth in which case existing paths should | |
915 | * be considered invalid. | |
916 | * | |
917 | * Tree depth after the grow is returned via *final_depth. | |
328d5752 MF |
918 | * |
919 | * *last_eb_bh will be updated by ocfs2_add_branch(). | |
c3afcbb3 MF |
920 | */ |
921 | static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, | |
922 | struct buffer_head *di_bh, int *final_depth, | |
328d5752 | 923 | struct buffer_head **last_eb_bh, |
c3afcbb3 MF |
924 | struct ocfs2_alloc_context *meta_ac) |
925 | { | |
926 | int ret, shift; | |
927 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
928 | int depth = le16_to_cpu(di->id2.i_list.l_tree_depth); | |
929 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
930 | struct buffer_head *bh = NULL; | |
931 | ||
932 | BUG_ON(meta_ac == NULL); | |
933 | ||
934 | shift = ocfs2_find_branch_target(osb, inode, di_bh, &bh); | |
935 | if (shift < 0) { | |
936 | ret = shift; | |
937 | mlog_errno(ret); | |
938 | goto out; | |
939 | } | |
940 | ||
941 | /* We traveled all the way to the bottom of the allocation tree | |
942 | * and didn't find room for any more extents - we need to add | |
943 | * another tree level */ | |
944 | if (shift) { | |
945 | BUG_ON(bh); | |
946 | mlog(0, "need to shift tree depth (current = %d)\n", depth); | |
947 | ||
948 | /* ocfs2_shift_tree_depth will return us a buffer with | |
949 | * the new extent block (so we can pass that to | |
950 | * ocfs2_add_branch). */ | |
951 | ret = ocfs2_shift_tree_depth(osb, handle, inode, di_bh, | |
952 | meta_ac, &bh); | |
953 | if (ret < 0) { | |
954 | mlog_errno(ret); | |
955 | goto out; | |
956 | } | |
957 | depth++; | |
328d5752 MF |
958 | if (depth == 1) { |
959 | /* | |
960 | * Special case: we have room now if we shifted from | |
961 | * tree_depth 0, so no more work needs to be done. | |
962 | * | |
963 | * We won't be calling add_branch, so pass | |
964 | * back *last_eb_bh as the new leaf. At depth | |
965 | * zero, it should always be null so there's | |
966 | * no reason to brelse. | |
967 | */ | |
968 | BUG_ON(*last_eb_bh); | |
969 | get_bh(bh); | |
970 | *last_eb_bh = bh; | |
c3afcbb3 | 971 | goto out; |
328d5752 | 972 | } |
c3afcbb3 MF |
973 | } |
974 | ||
975 | /* call ocfs2_add_branch to add the final part of the tree with | |
976 | * the new data. */ | |
977 | mlog(0, "add branch. bh = %p\n", bh); | |
978 | ret = ocfs2_add_branch(osb, handle, inode, di_bh, bh, last_eb_bh, | |
979 | meta_ac); | |
980 | if (ret < 0) { | |
981 | mlog_errno(ret); | |
982 | goto out; | |
983 | } | |
984 | ||
985 | out: | |
986 | if (final_depth) | |
987 | *final_depth = depth; | |
988 | brelse(bh); | |
989 | return ret; | |
990 | } | |
991 | ||
e48edee2 MF |
992 | /* |
993 | * This is only valid for leaf nodes, which are the only ones that can | |
994 | * have empty extents anyway. | |
995 | */ | |
dcd0538f MF |
996 | static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) |
997 | { | |
e48edee2 | 998 | return !rec->e_leaf_clusters; |
dcd0538f MF |
999 | } |
1000 | ||
1001 | /* | |
1002 | * This function will discard the rightmost extent record. | |
1003 | */ | |
1004 | static void ocfs2_shift_records_right(struct ocfs2_extent_list *el) | |
1005 | { | |
1006 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
1007 | int count = le16_to_cpu(el->l_count); | |
1008 | unsigned int num_bytes; | |
1009 | ||
1010 | BUG_ON(!next_free); | |
1011 | /* This will cause us to go off the end of our extent list. */ | |
1012 | BUG_ON(next_free >= count); | |
1013 | ||
1014 | num_bytes = sizeof(struct ocfs2_extent_rec) * next_free; | |
1015 | ||
1016 | memmove(&el->l_recs[1], &el->l_recs[0], num_bytes); | |
1017 | } | |
1018 | ||
1019 | static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, | |
1020 | struct ocfs2_extent_rec *insert_rec) | |
1021 | { | |
1022 | int i, insert_index, next_free, has_empty, num_bytes; | |
1023 | u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos); | |
1024 | struct ocfs2_extent_rec *rec; | |
1025 | ||
1026 | next_free = le16_to_cpu(el->l_next_free_rec); | |
1027 | has_empty = ocfs2_is_empty_extent(&el->l_recs[0]); | |
1028 | ||
1029 | BUG_ON(!next_free); | |
1030 | ||
1031 | /* The tree code before us didn't allow enough room in the leaf. */ | |
b1f3550f | 1032 | BUG_ON(el->l_next_free_rec == el->l_count && !has_empty); |
dcd0538f MF |
1033 | |
1034 | /* | |
1035 | * The easiest way to approach this is to just remove the | |
1036 | * empty extent and temporarily decrement next_free. | |
1037 | */ | |
1038 | if (has_empty) { | |
1039 | /* | |
1040 | * If next_free was 1 (only an empty extent), this | |
1041 | * loop won't execute, which is fine. We still want | |
1042 | * the decrement above to happen. | |
1043 | */ | |
1044 | for(i = 0; i < (next_free - 1); i++) | |
1045 | el->l_recs[i] = el->l_recs[i+1]; | |
1046 | ||
1047 | next_free--; | |
1048 | } | |
1049 | ||
1050 | /* | |
1051 | * Figure out what the new record index should be. | |
1052 | */ | |
1053 | for(i = 0; i < next_free; i++) { | |
1054 | rec = &el->l_recs[i]; | |
1055 | ||
1056 | if (insert_cpos < le32_to_cpu(rec->e_cpos)) | |
1057 | break; | |
1058 | } | |
1059 | insert_index = i; | |
1060 | ||
1061 | mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", | |
1062 | insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); | |
1063 | ||
1064 | BUG_ON(insert_index < 0); | |
1065 | BUG_ON(insert_index >= le16_to_cpu(el->l_count)); | |
1066 | BUG_ON(insert_index > next_free); | |
1067 | ||
1068 | /* | |
1069 | * No need to memmove if we're just adding to the tail. | |
1070 | */ | |
1071 | if (insert_index != next_free) { | |
1072 | BUG_ON(next_free >= le16_to_cpu(el->l_count)); | |
1073 | ||
1074 | num_bytes = next_free - insert_index; | |
1075 | num_bytes *= sizeof(struct ocfs2_extent_rec); | |
1076 | memmove(&el->l_recs[insert_index + 1], | |
1077 | &el->l_recs[insert_index], | |
1078 | num_bytes); | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * Either we had an empty extent, and need to re-increment or | |
1083 | * there was no empty extent on a non full rightmost leaf node, | |
1084 | * in which case we still need to increment. | |
1085 | */ | |
1086 | next_free++; | |
1087 | el->l_next_free_rec = cpu_to_le16(next_free); | |
1088 | /* | |
1089 | * Make sure none of the math above just messed up our tree. | |
1090 | */ | |
1091 | BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)); | |
1092 | ||
1093 | el->l_recs[insert_index] = *insert_rec; | |
1094 | ||
1095 | } | |
1096 | ||
328d5752 MF |
1097 | static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el) |
1098 | { | |
1099 | int size, num_recs = le16_to_cpu(el->l_next_free_rec); | |
1100 | ||
1101 | BUG_ON(num_recs == 0); | |
1102 | ||
1103 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
1104 | num_recs--; | |
1105 | size = num_recs * sizeof(struct ocfs2_extent_rec); | |
1106 | memmove(&el->l_recs[0], &el->l_recs[1], size); | |
1107 | memset(&el->l_recs[num_recs], 0, | |
1108 | sizeof(struct ocfs2_extent_rec)); | |
1109 | el->l_next_free_rec = cpu_to_le16(num_recs); | |
1110 | } | |
1111 | } | |
1112 | ||
dcd0538f MF |
1113 | /* |
1114 | * Create an empty extent record . | |
1115 | * | |
1116 | * l_next_free_rec may be updated. | |
1117 | * | |
1118 | * If an empty extent already exists do nothing. | |
1119 | */ | |
1120 | static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el) | |
1121 | { | |
1122 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
1123 | ||
e48edee2 MF |
1124 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
1125 | ||
dcd0538f MF |
1126 | if (next_free == 0) |
1127 | goto set_and_inc; | |
1128 | ||
1129 | if (ocfs2_is_empty_extent(&el->l_recs[0])) | |
1130 | return; | |
1131 | ||
1132 | mlog_bug_on_msg(el->l_count == el->l_next_free_rec, | |
1133 | "Asked to create an empty extent in a full list:\n" | |
1134 | "count = %u, tree depth = %u", | |
1135 | le16_to_cpu(el->l_count), | |
1136 | le16_to_cpu(el->l_tree_depth)); | |
1137 | ||
1138 | ocfs2_shift_records_right(el); | |
1139 | ||
1140 | set_and_inc: | |
1141 | le16_add_cpu(&el->l_next_free_rec, 1); | |
1142 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * For a rotation which involves two leaf nodes, the "root node" is | |
1147 | * the lowest level tree node which contains a path to both leafs. This | |
1148 | * resulting set of information can be used to form a complete "subtree" | |
1149 | * | |
1150 | * This function is passed two full paths from the dinode down to a | |
1151 | * pair of adjacent leaves. It's task is to figure out which path | |
1152 | * index contains the subtree root - this can be the root index itself | |
1153 | * in a worst-case rotation. | |
1154 | * | |
1155 | * The array index of the subtree root is passed back. | |
1156 | */ | |
1157 | static int ocfs2_find_subtree_root(struct inode *inode, | |
1158 | struct ocfs2_path *left, | |
1159 | struct ocfs2_path *right) | |
1160 | { | |
1161 | int i = 0; | |
1162 | ||
1163 | /* | |
1164 | * Check that the caller passed in two paths from the same tree. | |
1165 | */ | |
1166 | BUG_ON(path_root_bh(left) != path_root_bh(right)); | |
1167 | ||
1168 | do { | |
1169 | i++; | |
1170 | ||
1171 | /* | |
1172 | * The caller didn't pass two adjacent paths. | |
1173 | */ | |
1174 | mlog_bug_on_msg(i > left->p_tree_depth, | |
1175 | "Inode %lu, left depth %u, right depth %u\n" | |
1176 | "left leaf blk %llu, right leaf blk %llu\n", | |
1177 | inode->i_ino, left->p_tree_depth, | |
1178 | right->p_tree_depth, | |
1179 | (unsigned long long)path_leaf_bh(left)->b_blocknr, | |
1180 | (unsigned long long)path_leaf_bh(right)->b_blocknr); | |
1181 | } while (left->p_node[i].bh->b_blocknr == | |
1182 | right->p_node[i].bh->b_blocknr); | |
1183 | ||
1184 | return i - 1; | |
1185 | } | |
1186 | ||
1187 | typedef void (path_insert_t)(void *, struct buffer_head *); | |
1188 | ||
1189 | /* | |
1190 | * Traverse a btree path in search of cpos, starting at root_el. | |
1191 | * | |
1192 | * This code can be called with a cpos larger than the tree, in which | |
1193 | * case it will return the rightmost path. | |
1194 | */ | |
1195 | static int __ocfs2_find_path(struct inode *inode, | |
1196 | struct ocfs2_extent_list *root_el, u32 cpos, | |
1197 | path_insert_t *func, void *data) | |
1198 | { | |
1199 | int i, ret = 0; | |
1200 | u32 range; | |
1201 | u64 blkno; | |
1202 | struct buffer_head *bh = NULL; | |
1203 | struct ocfs2_extent_block *eb; | |
1204 | struct ocfs2_extent_list *el; | |
1205 | struct ocfs2_extent_rec *rec; | |
1206 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
1207 | ||
1208 | el = root_el; | |
1209 | while (el->l_tree_depth) { | |
1210 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
1211 | ocfs2_error(inode->i_sb, | |
1212 | "Inode %llu has empty extent list at " | |
1213 | "depth %u\n", | |
1214 | (unsigned long long)oi->ip_blkno, | |
1215 | le16_to_cpu(el->l_tree_depth)); | |
1216 | ret = -EROFS; | |
1217 | goto out; | |
1218 | ||
1219 | } | |
1220 | ||
1221 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) { | |
1222 | rec = &el->l_recs[i]; | |
1223 | ||
1224 | /* | |
1225 | * In the case that cpos is off the allocation | |
1226 | * tree, this should just wind up returning the | |
1227 | * rightmost record. | |
1228 | */ | |
1229 | range = le32_to_cpu(rec->e_cpos) + | |
e48edee2 | 1230 | ocfs2_rec_clusters(el, rec); |
dcd0538f MF |
1231 | if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) |
1232 | break; | |
1233 | } | |
1234 | ||
1235 | blkno = le64_to_cpu(el->l_recs[i].e_blkno); | |
1236 | if (blkno == 0) { | |
1237 | ocfs2_error(inode->i_sb, | |
1238 | "Inode %llu has bad blkno in extent list " | |
1239 | "at depth %u (index %d)\n", | |
1240 | (unsigned long long)oi->ip_blkno, | |
1241 | le16_to_cpu(el->l_tree_depth), i); | |
1242 | ret = -EROFS; | |
1243 | goto out; | |
1244 | } | |
1245 | ||
1246 | brelse(bh); | |
1247 | bh = NULL; | |
1248 | ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno, | |
1249 | &bh, OCFS2_BH_CACHED, inode); | |
1250 | if (ret) { | |
1251 | mlog_errno(ret); | |
1252 | goto out; | |
1253 | } | |
1254 | ||
1255 | eb = (struct ocfs2_extent_block *) bh->b_data; | |
1256 | el = &eb->h_list; | |
1257 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
1258 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
1259 | ret = -EIO; | |
1260 | goto out; | |
1261 | } | |
1262 | ||
1263 | if (le16_to_cpu(el->l_next_free_rec) > | |
1264 | le16_to_cpu(el->l_count)) { | |
1265 | ocfs2_error(inode->i_sb, | |
1266 | "Inode %llu has bad count in extent list " | |
1267 | "at block %llu (next free=%u, count=%u)\n", | |
1268 | (unsigned long long)oi->ip_blkno, | |
1269 | (unsigned long long)bh->b_blocknr, | |
1270 | le16_to_cpu(el->l_next_free_rec), | |
1271 | le16_to_cpu(el->l_count)); | |
1272 | ret = -EROFS; | |
1273 | goto out; | |
1274 | } | |
1275 | ||
1276 | if (func) | |
1277 | func(data, bh); | |
1278 | } | |
1279 | ||
1280 | out: | |
1281 | /* | |
1282 | * Catch any trailing bh that the loop didn't handle. | |
1283 | */ | |
1284 | brelse(bh); | |
1285 | ||
1286 | return ret; | |
1287 | } | |
1288 | ||
1289 | /* | |
1290 | * Given an initialized path (that is, it has a valid root extent | |
1291 | * list), this function will traverse the btree in search of the path | |
1292 | * which would contain cpos. | |
1293 | * | |
1294 | * The path traveled is recorded in the path structure. | |
1295 | * | |
1296 | * Note that this will not do any comparisons on leaf node extent | |
1297 | * records, so it will work fine in the case that we just added a tree | |
1298 | * branch. | |
1299 | */ | |
1300 | struct find_path_data { | |
1301 | int index; | |
1302 | struct ocfs2_path *path; | |
1303 | }; | |
1304 | static void find_path_ins(void *data, struct buffer_head *bh) | |
1305 | { | |
1306 | struct find_path_data *fp = data; | |
1307 | ||
1308 | get_bh(bh); | |
1309 | ocfs2_path_insert_eb(fp->path, fp->index, bh); | |
1310 | fp->index++; | |
1311 | } | |
1312 | static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, | |
1313 | u32 cpos) | |
1314 | { | |
1315 | struct find_path_data data; | |
1316 | ||
1317 | data.index = 1; | |
1318 | data.path = path; | |
1319 | return __ocfs2_find_path(inode, path_root_el(path), cpos, | |
1320 | find_path_ins, &data); | |
1321 | } | |
1322 | ||
1323 | static void find_leaf_ins(void *data, struct buffer_head *bh) | |
1324 | { | |
1325 | struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; | |
1326 | struct ocfs2_extent_list *el = &eb->h_list; | |
1327 | struct buffer_head **ret = data; | |
1328 | ||
1329 | /* We want to retain only the leaf block. */ | |
1330 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
1331 | get_bh(bh); | |
1332 | *ret = bh; | |
1333 | } | |
1334 | } | |
1335 | /* | |
1336 | * Find the leaf block in the tree which would contain cpos. No | |
1337 | * checking of the actual leaf is done. | |
1338 | * | |
1339 | * Some paths want to call this instead of allocating a path structure | |
1340 | * and calling ocfs2_find_path(). | |
1341 | * | |
1342 | * This function doesn't handle non btree extent lists. | |
1343 | */ | |
363041a5 MF |
1344 | int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, |
1345 | u32 cpos, struct buffer_head **leaf_bh) | |
dcd0538f MF |
1346 | { |
1347 | int ret; | |
1348 | struct buffer_head *bh = NULL; | |
1349 | ||
1350 | ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh); | |
1351 | if (ret) { | |
1352 | mlog_errno(ret); | |
1353 | goto out; | |
1354 | } | |
1355 | ||
1356 | *leaf_bh = bh; | |
1357 | out: | |
1358 | return ret; | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * Adjust the adjacent records (left_rec, right_rec) involved in a rotation. | |
1363 | * | |
1364 | * Basically, we've moved stuff around at the bottom of the tree and | |
1365 | * we need to fix up the extent records above the changes to reflect | |
1366 | * the new changes. | |
1367 | * | |
1368 | * left_rec: the record on the left. | |
1369 | * left_child_el: is the child list pointed to by left_rec | |
1370 | * right_rec: the record to the right of left_rec | |
1371 | * right_child_el: is the child list pointed to by right_rec | |
1372 | * | |
1373 | * By definition, this only works on interior nodes. | |
1374 | */ | |
1375 | static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, | |
1376 | struct ocfs2_extent_list *left_child_el, | |
1377 | struct ocfs2_extent_rec *right_rec, | |
1378 | struct ocfs2_extent_list *right_child_el) | |
1379 | { | |
1380 | u32 left_clusters, right_end; | |
1381 | ||
1382 | /* | |
1383 | * Interior nodes never have holes. Their cpos is the cpos of | |
1384 | * the leftmost record in their child list. Their cluster | |
1385 | * count covers the full theoretical range of their child list | |
1386 | * - the range between their cpos and the cpos of the record | |
1387 | * immediately to their right. | |
1388 | */ | |
1389 | left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); | |
328d5752 MF |
1390 | if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) { |
1391 | BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); | |
1392 | left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); | |
1393 | } | |
dcd0538f | 1394 | left_clusters -= le32_to_cpu(left_rec->e_cpos); |
e48edee2 | 1395 | left_rec->e_int_clusters = cpu_to_le32(left_clusters); |
dcd0538f MF |
1396 | |
1397 | /* | |
1398 | * Calculate the rightmost cluster count boundary before | |
e48edee2 | 1399 | * moving cpos - we will need to adjust clusters after |
dcd0538f MF |
1400 | * updating e_cpos to keep the same highest cluster count. |
1401 | */ | |
1402 | right_end = le32_to_cpu(right_rec->e_cpos); | |
e48edee2 | 1403 | right_end += le32_to_cpu(right_rec->e_int_clusters); |
dcd0538f MF |
1404 | |
1405 | right_rec->e_cpos = left_rec->e_cpos; | |
1406 | le32_add_cpu(&right_rec->e_cpos, left_clusters); | |
1407 | ||
1408 | right_end -= le32_to_cpu(right_rec->e_cpos); | |
e48edee2 | 1409 | right_rec->e_int_clusters = cpu_to_le32(right_end); |
dcd0538f MF |
1410 | } |
1411 | ||
1412 | /* | |
1413 | * Adjust the adjacent root node records involved in a | |
1414 | * rotation. left_el_blkno is passed in as a key so that we can easily | |
1415 | * find it's index in the root list. | |
1416 | */ | |
1417 | static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, | |
1418 | struct ocfs2_extent_list *left_el, | |
1419 | struct ocfs2_extent_list *right_el, | |
1420 | u64 left_el_blkno) | |
1421 | { | |
1422 | int i; | |
1423 | ||
1424 | BUG_ON(le16_to_cpu(root_el->l_tree_depth) <= | |
1425 | le16_to_cpu(left_el->l_tree_depth)); | |
1426 | ||
1427 | for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) { | |
1428 | if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno) | |
1429 | break; | |
1430 | } | |
1431 | ||
1432 | /* | |
1433 | * The path walking code should have never returned a root and | |
1434 | * two paths which are not adjacent. | |
1435 | */ | |
1436 | BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1)); | |
1437 | ||
1438 | ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el, | |
1439 | &root_el->l_recs[i + 1], right_el); | |
1440 | } | |
1441 | ||
1442 | /* | |
1443 | * We've changed a leaf block (in right_path) and need to reflect that | |
1444 | * change back up the subtree. | |
1445 | * | |
1446 | * This happens in multiple places: | |
1447 | * - When we've moved an extent record from the left path leaf to the right | |
1448 | * path leaf to make room for an empty extent in the left path leaf. | |
1449 | * - When our insert into the right path leaf is at the leftmost edge | |
1450 | * and requires an update of the path immediately to it's left. This | |
1451 | * can occur at the end of some types of rotation and appending inserts. | |
677b9752 TM |
1452 | * - When we've adjusted the last extent record in the left path leaf and the |
1453 | * 1st extent record in the right path leaf during cross extent block merge. | |
dcd0538f MF |
1454 | */ |
1455 | static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle, | |
1456 | struct ocfs2_path *left_path, | |
1457 | struct ocfs2_path *right_path, | |
1458 | int subtree_index) | |
1459 | { | |
1460 | int ret, i, idx; | |
1461 | struct ocfs2_extent_list *el, *left_el, *right_el; | |
1462 | struct ocfs2_extent_rec *left_rec, *right_rec; | |
1463 | struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; | |
1464 | ||
1465 | /* | |
1466 | * Update the counts and position values within all the | |
1467 | * interior nodes to reflect the leaf rotation we just did. | |
1468 | * | |
1469 | * The root node is handled below the loop. | |
1470 | * | |
1471 | * We begin the loop with right_el and left_el pointing to the | |
1472 | * leaf lists and work our way up. | |
1473 | * | |
1474 | * NOTE: within this loop, left_el and right_el always refer | |
1475 | * to the *child* lists. | |
1476 | */ | |
1477 | left_el = path_leaf_el(left_path); | |
1478 | right_el = path_leaf_el(right_path); | |
1479 | for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { | |
1480 | mlog(0, "Adjust records at index %u\n", i); | |
1481 | ||
1482 | /* | |
1483 | * One nice property of knowing that all of these | |
1484 | * nodes are below the root is that we only deal with | |
1485 | * the leftmost right node record and the rightmost | |
1486 | * left node record. | |
1487 | */ | |
1488 | el = left_path->p_node[i].el; | |
1489 | idx = le16_to_cpu(left_el->l_next_free_rec) - 1; | |
1490 | left_rec = &el->l_recs[idx]; | |
1491 | ||
1492 | el = right_path->p_node[i].el; | |
1493 | right_rec = &el->l_recs[0]; | |
1494 | ||
1495 | ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec, | |
1496 | right_el); | |
1497 | ||
1498 | ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh); | |
1499 | if (ret) | |
1500 | mlog_errno(ret); | |
1501 | ||
1502 | ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh); | |
1503 | if (ret) | |
1504 | mlog_errno(ret); | |
1505 | ||
1506 | /* | |
1507 | * Setup our list pointers now so that the current | |
1508 | * parents become children in the next iteration. | |
1509 | */ | |
1510 | left_el = left_path->p_node[i].el; | |
1511 | right_el = right_path->p_node[i].el; | |
1512 | } | |
1513 | ||
1514 | /* | |
1515 | * At the root node, adjust the two adjacent records which | |
1516 | * begin our path to the leaves. | |
1517 | */ | |
1518 | ||
1519 | el = left_path->p_node[subtree_index].el; | |
1520 | left_el = left_path->p_node[subtree_index + 1].el; | |
1521 | right_el = right_path->p_node[subtree_index + 1].el; | |
1522 | ||
1523 | ocfs2_adjust_root_records(el, left_el, right_el, | |
1524 | left_path->p_node[subtree_index + 1].bh->b_blocknr); | |
1525 | ||
1526 | root_bh = left_path->p_node[subtree_index].bh; | |
1527 | ||
1528 | ret = ocfs2_journal_dirty(handle, root_bh); | |
1529 | if (ret) | |
1530 | mlog_errno(ret); | |
1531 | } | |
1532 | ||
1533 | static int ocfs2_rotate_subtree_right(struct inode *inode, | |
1534 | handle_t *handle, | |
1535 | struct ocfs2_path *left_path, | |
1536 | struct ocfs2_path *right_path, | |
1537 | int subtree_index) | |
1538 | { | |
1539 | int ret, i; | |
1540 | struct buffer_head *right_leaf_bh; | |
1541 | struct buffer_head *left_leaf_bh = NULL; | |
1542 | struct buffer_head *root_bh; | |
1543 | struct ocfs2_extent_list *right_el, *left_el; | |
1544 | struct ocfs2_extent_rec move_rec; | |
1545 | ||
1546 | left_leaf_bh = path_leaf_bh(left_path); | |
1547 | left_el = path_leaf_el(left_path); | |
1548 | ||
1549 | if (left_el->l_next_free_rec != left_el->l_count) { | |
1550 | ocfs2_error(inode->i_sb, | |
1551 | "Inode %llu has non-full interior leaf node %llu" | |
1552 | "(next free = %u)", | |
1553 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
1554 | (unsigned long long)left_leaf_bh->b_blocknr, | |
1555 | le16_to_cpu(left_el->l_next_free_rec)); | |
1556 | return -EROFS; | |
1557 | } | |
1558 | ||
1559 | /* | |
1560 | * This extent block may already have an empty record, so we | |
1561 | * return early if so. | |
1562 | */ | |
1563 | if (ocfs2_is_empty_extent(&left_el->l_recs[0])) | |
1564 | return 0; | |
1565 | ||
1566 | root_bh = left_path->p_node[subtree_index].bh; | |
1567 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
1568 | ||
1569 | ret = ocfs2_journal_access(handle, inode, root_bh, | |
1570 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1571 | if (ret) { | |
1572 | mlog_errno(ret); | |
1573 | goto out; | |
1574 | } | |
1575 | ||
1576 | for(i = subtree_index + 1; i < path_num_items(right_path); i++) { | |
1577 | ret = ocfs2_journal_access(handle, inode, | |
1578 | right_path->p_node[i].bh, | |
1579 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1580 | if (ret) { | |
1581 | mlog_errno(ret); | |
1582 | goto out; | |
1583 | } | |
1584 | ||
1585 | ret = ocfs2_journal_access(handle, inode, | |
1586 | left_path->p_node[i].bh, | |
1587 | OCFS2_JOURNAL_ACCESS_WRITE); | |
1588 | if (ret) { | |
1589 | mlog_errno(ret); | |
1590 | goto out; | |
1591 | } | |
1592 | } | |
1593 | ||
1594 | right_leaf_bh = path_leaf_bh(right_path); | |
1595 | right_el = path_leaf_el(right_path); | |
1596 | ||
1597 | /* This is a code error, not a disk corruption. */ | |
1598 | mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " | |
1599 | "because rightmost leaf block %llu is empty\n", | |
1600 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
1601 | (unsigned long long)right_leaf_bh->b_blocknr); | |
1602 | ||
1603 | ocfs2_create_empty_extent(right_el); | |
1604 | ||
1605 | ret = ocfs2_journal_dirty(handle, right_leaf_bh); | |
1606 | if (ret) { | |
1607 | mlog_errno(ret); | |
1608 | goto out; | |
1609 | } | |
1610 | ||
1611 | /* Do the copy now. */ | |
1612 | i = le16_to_cpu(left_el->l_next_free_rec) - 1; | |
1613 | move_rec = left_el->l_recs[i]; | |
1614 | right_el->l_recs[0] = move_rec; | |
1615 | ||
1616 | /* | |
1617 | * Clear out the record we just copied and shift everything | |
1618 | * over, leaving an empty extent in the left leaf. | |
1619 | * | |
1620 | * We temporarily subtract from next_free_rec so that the | |
1621 | * shift will lose the tail record (which is now defunct). | |
1622 | */ | |
1623 | le16_add_cpu(&left_el->l_next_free_rec, -1); | |
1624 | ocfs2_shift_records_right(left_el); | |
1625 | memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
1626 | le16_add_cpu(&left_el->l_next_free_rec, 1); | |
1627 | ||
1628 | ret = ocfs2_journal_dirty(handle, left_leaf_bh); | |
1629 | if (ret) { | |
1630 | mlog_errno(ret); | |
1631 | goto out; | |
1632 | } | |
1633 | ||
1634 | ocfs2_complete_edge_insert(inode, handle, left_path, right_path, | |
1635 | subtree_index); | |
1636 | ||
1637 | out: | |
1638 | return ret; | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * Given a full path, determine what cpos value would return us a path | |
1643 | * containing the leaf immediately to the left of the current one. | |
1644 | * | |
1645 | * Will return zero if the path passed in is already the leftmost path. | |
1646 | */ | |
1647 | static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb, | |
1648 | struct ocfs2_path *path, u32 *cpos) | |
1649 | { | |
1650 | int i, j, ret = 0; | |
1651 | u64 blkno; | |
1652 | struct ocfs2_extent_list *el; | |
1653 | ||
e48edee2 MF |
1654 | BUG_ON(path->p_tree_depth == 0); |
1655 | ||
dcd0538f MF |
1656 | *cpos = 0; |
1657 | ||
1658 | blkno = path_leaf_bh(path)->b_blocknr; | |
1659 | ||
1660 | /* Start at the tree node just above the leaf and work our way up. */ | |
1661 | i = path->p_tree_depth - 1; | |
1662 | while (i >= 0) { | |
1663 | el = path->p_node[i].el; | |
1664 | ||
1665 | /* | |
1666 | * Find the extent record just before the one in our | |
1667 | * path. | |
1668 | */ | |
1669 | for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { | |
1670 | if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { | |
1671 | if (j == 0) { | |
1672 | if (i == 0) { | |
1673 | /* | |
1674 | * We've determined that the | |
1675 | * path specified is already | |
1676 | * the leftmost one - return a | |
1677 | * cpos of zero. | |
1678 | */ | |
1679 | goto out; | |
1680 | } | |
1681 | /* | |
1682 | * The leftmost record points to our | |
1683 | * leaf - we need to travel up the | |
1684 | * tree one level. | |
1685 | */ | |
1686 | goto next_node; | |
1687 | } | |
1688 | ||
1689 | *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos); | |
e48edee2 MF |
1690 | *cpos = *cpos + ocfs2_rec_clusters(el, |
1691 | &el->l_recs[j - 1]); | |
1692 | *cpos = *cpos - 1; | |
dcd0538f MF |
1693 | goto out; |
1694 | } | |
1695 | } | |
1696 | ||
1697 | /* | |
1698 | * If we got here, we never found a valid node where | |
1699 | * the tree indicated one should be. | |
1700 | */ | |
1701 | ocfs2_error(sb, | |
1702 | "Invalid extent tree at extent block %llu\n", | |
1703 | (unsigned long long)blkno); | |
1704 | ret = -EROFS; | |
1705 | goto out; | |
1706 | ||
1707 | next_node: | |
1708 | blkno = path->p_node[i].bh->b_blocknr; | |
1709 | i--; | |
1710 | } | |
1711 | ||
1712 | out: | |
1713 | return ret; | |
1714 | } | |
1715 | ||
328d5752 MF |
1716 | /* |
1717 | * Extend the transaction by enough credits to complete the rotation, | |
1718 | * and still leave at least the original number of credits allocated | |
1719 | * to this transaction. | |
1720 | */ | |
dcd0538f | 1721 | static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, |
328d5752 | 1722 | int op_credits, |
dcd0538f MF |
1723 | struct ocfs2_path *path) |
1724 | { | |
328d5752 | 1725 | int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; |
dcd0538f MF |
1726 | |
1727 | if (handle->h_buffer_credits < credits) | |
1728 | return ocfs2_extend_trans(handle, credits); | |
1729 | ||
1730 | return 0; | |
1731 | } | |
1732 | ||
1733 | /* | |
1734 | * Trap the case where we're inserting into the theoretical range past | |
1735 | * the _actual_ left leaf range. Otherwise, we'll rotate a record | |
1736 | * whose cpos is less than ours into the right leaf. | |
1737 | * | |
1738 | * It's only necessary to look at the rightmost record of the left | |
1739 | * leaf because the logic that calls us should ensure that the | |
1740 | * theoretical ranges in the path components above the leaves are | |
1741 | * correct. | |
1742 | */ | |
1743 | static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path, | |
1744 | u32 insert_cpos) | |
1745 | { | |
1746 | struct ocfs2_extent_list *left_el; | |
1747 | struct ocfs2_extent_rec *rec; | |
1748 | int next_free; | |
1749 | ||
1750 | left_el = path_leaf_el(left_path); | |
1751 | next_free = le16_to_cpu(left_el->l_next_free_rec); | |
1752 | rec = &left_el->l_recs[next_free - 1]; | |
1753 | ||
1754 | if (insert_cpos > le32_to_cpu(rec->e_cpos)) | |
1755 | return 1; | |
1756 | return 0; | |
1757 | } | |
1758 | ||
328d5752 MF |
1759 | static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) |
1760 | { | |
1761 | int next_free = le16_to_cpu(el->l_next_free_rec); | |
1762 | unsigned int range; | |
1763 | struct ocfs2_extent_rec *rec; | |
1764 | ||
1765 | if (next_free == 0) | |
1766 | return 0; | |
1767 | ||
1768 | rec = &el->l_recs[0]; | |
1769 | if (ocfs2_is_empty_extent(rec)) { | |
1770 | /* Empty list. */ | |
1771 | if (next_free == 1) | |
1772 | return 0; | |
1773 | rec = &el->l_recs[1]; | |
1774 | } | |
1775 | ||
1776 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
1777 | if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) | |
1778 | return 1; | |
1779 | return 0; | |
1780 | } | |
1781 | ||
dcd0538f MF |
1782 | /* |
1783 | * Rotate all the records in a btree right one record, starting at insert_cpos. | |
1784 | * | |
1785 | * The path to the rightmost leaf should be passed in. | |
1786 | * | |
1787 | * The array is assumed to be large enough to hold an entire path (tree depth). | |
1788 | * | |
1789 | * Upon succesful return from this function: | |
1790 | * | |
1791 | * - The 'right_path' array will contain a path to the leaf block | |
1792 | * whose range contains e_cpos. | |
1793 | * - That leaf block will have a single empty extent in list index 0. | |
1794 | * - In the case that the rotation requires a post-insert update, | |
1795 | * *ret_left_path will contain a valid path which can be passed to | |
1796 | * ocfs2_insert_path(). | |
1797 | */ | |
1798 | static int ocfs2_rotate_tree_right(struct inode *inode, | |
1799 | handle_t *handle, | |
328d5752 | 1800 | enum ocfs2_split_type split, |
dcd0538f MF |
1801 | u32 insert_cpos, |
1802 | struct ocfs2_path *right_path, | |
1803 | struct ocfs2_path **ret_left_path) | |
1804 | { | |
328d5752 | 1805 | int ret, start, orig_credits = handle->h_buffer_credits; |
dcd0538f MF |
1806 | u32 cpos; |
1807 | struct ocfs2_path *left_path = NULL; | |
1808 | ||
1809 | *ret_left_path = NULL; | |
1810 | ||
1811 | left_path = ocfs2_new_path(path_root_bh(right_path), | |
1812 | path_root_el(right_path)); | |
1813 | if (!left_path) { | |
1814 | ret = -ENOMEM; | |
1815 | mlog_errno(ret); | |
1816 | goto out; | |
1817 | } | |
1818 | ||
1819 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos); | |
1820 | if (ret) { | |
1821 | mlog_errno(ret); | |
1822 | goto out; | |
1823 | } | |
1824 | ||
1825 | mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); | |
1826 | ||
1827 | /* | |
1828 | * What we want to do here is: | |
1829 | * | |
1830 | * 1) Start with the rightmost path. | |
1831 | * | |
1832 | * 2) Determine a path to the leaf block directly to the left | |
1833 | * of that leaf. | |
1834 | * | |
1835 | * 3) Determine the 'subtree root' - the lowest level tree node | |
1836 | * which contains a path to both leaves. | |
1837 | * | |
1838 | * 4) Rotate the subtree. | |
1839 | * | |
1840 | * 5) Find the next subtree by considering the left path to be | |
1841 | * the new right path. | |
1842 | * | |
1843 | * The check at the top of this while loop also accepts | |
1844 | * insert_cpos == cpos because cpos is only a _theoretical_ | |
1845 | * value to get us the left path - insert_cpos might very well | |
1846 | * be filling that hole. | |
1847 | * | |
1848 | * Stop at a cpos of '0' because we either started at the | |
1849 | * leftmost branch (i.e., a tree with one branch and a | |
1850 | * rotation inside of it), or we've gone as far as we can in | |
1851 | * rotating subtrees. | |
1852 | */ | |
1853 | while (cpos && insert_cpos <= cpos) { | |
1854 | mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", | |
1855 | insert_cpos, cpos); | |
1856 | ||
1857 | ret = ocfs2_find_path(inode, left_path, cpos); | |
1858 | if (ret) { | |
1859 | mlog_errno(ret); | |
1860 | goto out; | |
1861 | } | |
1862 | ||
1863 | mlog_bug_on_msg(path_leaf_bh(left_path) == | |
1864 | path_leaf_bh(right_path), | |
1865 | "Inode %lu: error during insert of %u " | |
1866 | "(left path cpos %u) results in two identical " | |
1867 | "paths ending at %llu\n", | |
1868 | inode->i_ino, insert_cpos, cpos, | |
1869 | (unsigned long long) | |
1870 | path_leaf_bh(left_path)->b_blocknr); | |
1871 | ||
328d5752 MF |
1872 | if (split == SPLIT_NONE && |
1873 | ocfs2_rotate_requires_path_adjustment(left_path, | |
dcd0538f | 1874 | insert_cpos)) { |
dcd0538f MF |
1875 | |
1876 | /* | |
1877 | * We've rotated the tree as much as we | |
1878 | * should. The rest is up to | |
1879 | * ocfs2_insert_path() to complete, after the | |
1880 | * record insertion. We indicate this | |
1881 | * situation by returning the left path. | |
1882 | * | |
1883 | * The reason we don't adjust the records here | |
1884 | * before the record insert is that an error | |
1885 | * later might break the rule where a parent | |
1886 | * record e_cpos will reflect the actual | |
1887 | * e_cpos of the 1st nonempty record of the | |
1888 | * child list. | |
1889 | */ | |
1890 | *ret_left_path = left_path; | |
1891 | goto out_ret_path; | |
1892 | } | |
1893 | ||
1894 | start = ocfs2_find_subtree_root(inode, left_path, right_path); | |
1895 | ||
1896 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | |
1897 | start, | |
1898 | (unsigned long long) right_path->p_node[start].bh->b_blocknr, | |
1899 | right_path->p_tree_depth); | |
1900 | ||
1901 | ret = ocfs2_extend_rotate_transaction(handle, start, | |
328d5752 | 1902 | orig_credits, right_path); |
dcd0538f MF |
1903 | if (ret) { |
1904 | mlog_errno(ret); | |
1905 | goto out; | |
1906 | } | |
1907 | ||
1908 | ret = ocfs2_rotate_subtree_right(inode, handle, left_path, | |
1909 | right_path, start); | |
1910 | if (ret) { | |
1911 | mlog_errno(ret); | |
1912 | goto out; | |
1913 | } | |
1914 | ||
328d5752 MF |
1915 | if (split != SPLIT_NONE && |
1916 | ocfs2_leftmost_rec_contains(path_leaf_el(right_path), | |
1917 | insert_cpos)) { | |
1918 | /* | |
1919 | * A rotate moves the rightmost left leaf | |
1920 | * record over to the leftmost right leaf | |
1921 | * slot. If we're doing an extent split | |
1922 | * instead of a real insert, then we have to | |
1923 | * check that the extent to be split wasn't | |
1924 | * just moved over. If it was, then we can | |
1925 | * exit here, passing left_path back - | |
1926 | * ocfs2_split_extent() is smart enough to | |
1927 | * search both leaves. | |
1928 | */ | |
1929 | *ret_left_path = left_path; | |
1930 | goto out_ret_path; | |
1931 | } | |
1932 | ||
dcd0538f MF |
1933 | /* |
1934 | * There is no need to re-read the next right path | |
1935 | * as we know that it'll be our current left | |
1936 | * path. Optimize by copying values instead. | |
1937 | */ | |
1938 | ocfs2_mv_path(right_path, left_path); | |
1939 | ||
1940 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, | |
1941 | &cpos); | |
1942 | if (ret) { | |
1943 | mlog_errno(ret); | |
1944 | goto out; | |
1945 | } | |
1946 | } | |
1947 | ||
1948 | out: | |
1949 | ocfs2_free_path(left_path); | |
1950 | ||
1951 | out_ret_path: | |
1952 | return ret; | |
1953 | } | |
1954 | ||
328d5752 MF |
1955 | static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, |
1956 | struct ocfs2_path *path) | |
dcd0538f | 1957 | { |
328d5752 | 1958 | int i, idx; |
dcd0538f | 1959 | struct ocfs2_extent_rec *rec; |
328d5752 MF |
1960 | struct ocfs2_extent_list *el; |
1961 | struct ocfs2_extent_block *eb; | |
1962 | u32 range; | |
dcd0538f | 1963 | |
328d5752 MF |
1964 | /* Path should always be rightmost. */ |
1965 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
1966 | BUG_ON(eb->h_next_leaf_blk != 0ULL); | |
dcd0538f | 1967 | |
328d5752 MF |
1968 | el = &eb->h_list; |
1969 | BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); | |
1970 | idx = le16_to_cpu(el->l_next_free_rec) - 1; | |
1971 | rec = &el->l_recs[idx]; | |
1972 | range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
dcd0538f | 1973 | |
328d5752 MF |
1974 | for (i = 0; i < path->p_tree_depth; i++) { |
1975 | el = path->p_node[i].el; | |
1976 | idx = le16_to_cpu(el->l_next_free_rec) - 1; | |
1977 | rec = &el->l_recs[idx]; | |
dcd0538f | 1978 | |
328d5752 MF |
1979 | rec->e_int_clusters = cpu_to_le32(range); |
1980 | le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); | |
dcd0538f | 1981 | |
328d5752 | 1982 | ocfs2_journal_dirty(handle, path->p_node[i].bh); |
dcd0538f | 1983 | } |
dcd0538f MF |
1984 | } |
1985 | ||
328d5752 MF |
1986 | static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, |
1987 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
1988 | struct ocfs2_path *path, int unlink_start) | |
dcd0538f | 1989 | { |
328d5752 MF |
1990 | int ret, i; |
1991 | struct ocfs2_extent_block *eb; | |
1992 | struct ocfs2_extent_list *el; | |
1993 | struct buffer_head *bh; | |
1994 | ||
1995 | for(i = unlink_start; i < path_num_items(path); i++) { | |
1996 | bh = path->p_node[i].bh; | |
1997 | ||
1998 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
1999 | /* | |
2000 | * Not all nodes might have had their final count | |
2001 | * decremented by the caller - handle this here. | |
2002 | */ | |
2003 | el = &eb->h_list; | |
2004 | if (le16_to_cpu(el->l_next_free_rec) > 1) { | |
2005 | mlog(ML_ERROR, | |
2006 | "Inode %llu, attempted to remove extent block " | |
2007 | "%llu with %u records\n", | |
2008 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
2009 | (unsigned long long)le64_to_cpu(eb->h_blkno), | |
2010 | le16_to_cpu(el->l_next_free_rec)); | |
2011 | ||
2012 | ocfs2_journal_dirty(handle, bh); | |
2013 | ocfs2_remove_from_cache(inode, bh); | |
2014 | continue; | |
2015 | } | |
2016 | ||
2017 | el->l_next_free_rec = 0; | |
2018 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
2019 | ||
2020 | ocfs2_journal_dirty(handle, bh); | |
2021 | ||
2022 | ret = ocfs2_cache_extent_block_free(dealloc, eb); | |
2023 | if (ret) | |
2024 | mlog_errno(ret); | |
2025 | ||
2026 | ocfs2_remove_from_cache(inode, bh); | |
2027 | } | |
dcd0538f MF |
2028 | } |
2029 | ||
328d5752 MF |
2030 | static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle, |
2031 | struct ocfs2_path *left_path, | |
2032 | struct ocfs2_path *right_path, | |
2033 | int subtree_index, | |
2034 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
dcd0538f | 2035 | { |
328d5752 MF |
2036 | int i; |
2037 | struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; | |
2038 | struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el; | |
dcd0538f | 2039 | struct ocfs2_extent_list *el; |
328d5752 | 2040 | struct ocfs2_extent_block *eb; |
dcd0538f | 2041 | |
328d5752 | 2042 | el = path_leaf_el(left_path); |
dcd0538f | 2043 | |
328d5752 | 2044 | eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; |
e48edee2 | 2045 | |
328d5752 MF |
2046 | for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) |
2047 | if (root_el->l_recs[i].e_blkno == eb->h_blkno) | |
2048 | break; | |
dcd0538f | 2049 | |
328d5752 | 2050 | BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec)); |
dcd0538f | 2051 | |
328d5752 MF |
2052 | memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); |
2053 | le16_add_cpu(&root_el->l_next_free_rec, -1); | |
2054 | ||
2055 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
2056 | eb->h_next_leaf_blk = 0; | |
2057 | ||
2058 | ocfs2_journal_dirty(handle, root_bh); | |
2059 | ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
2060 | ||
2061 | ocfs2_unlink_path(inode, handle, dealloc, right_path, | |
2062 | subtree_index + 1); | |
2063 | } | |
2064 | ||
2065 | static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, | |
2066 | struct ocfs2_path *left_path, | |
2067 | struct ocfs2_path *right_path, | |
2068 | int subtree_index, | |
2069 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
2070 | int *deleted) | |
2071 | { | |
2072 | int ret, i, del_right_subtree = 0, right_has_empty = 0; | |
2073 | struct buffer_head *root_bh, *di_bh = path_root_bh(right_path); | |
2074 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
2075 | struct ocfs2_extent_list *right_leaf_el, *left_leaf_el; | |
2076 | struct ocfs2_extent_block *eb; | |
2077 | ||
2078 | *deleted = 0; | |
2079 | ||
2080 | right_leaf_el = path_leaf_el(right_path); | |
2081 | left_leaf_el = path_leaf_el(left_path); | |
2082 | root_bh = left_path->p_node[subtree_index].bh; | |
2083 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
2084 | ||
2085 | if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0])) | |
2086 | return 0; | |
dcd0538f | 2087 | |
328d5752 MF |
2088 | eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; |
2089 | if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) { | |
dcd0538f | 2090 | /* |
328d5752 MF |
2091 | * It's legal for us to proceed if the right leaf is |
2092 | * the rightmost one and it has an empty extent. There | |
2093 | * are two cases to handle - whether the leaf will be | |
2094 | * empty after removal or not. If the leaf isn't empty | |
2095 | * then just remove the empty extent up front. The | |
2096 | * next block will handle empty leaves by flagging | |
2097 | * them for unlink. | |
2098 | * | |
2099 | * Non rightmost leaves will throw -EAGAIN and the | |
2100 | * caller can manually move the subtree and retry. | |
dcd0538f | 2101 | */ |
dcd0538f | 2102 | |
328d5752 MF |
2103 | if (eb->h_next_leaf_blk != 0ULL) |
2104 | return -EAGAIN; | |
2105 | ||
2106 | if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { | |
2107 | ret = ocfs2_journal_access(handle, inode, | |
2108 | path_leaf_bh(right_path), | |
2109 | OCFS2_JOURNAL_ACCESS_WRITE); | |
dcd0538f MF |
2110 | if (ret) { |
2111 | mlog_errno(ret); | |
2112 | goto out; | |
2113 | } | |
2114 | ||
328d5752 MF |
2115 | ocfs2_remove_empty_extent(right_leaf_el); |
2116 | } else | |
2117 | right_has_empty = 1; | |
dcd0538f MF |
2118 | } |
2119 | ||
328d5752 MF |
2120 | if (eb->h_next_leaf_blk == 0ULL && |
2121 | le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) { | |
2122 | /* | |
2123 | * We have to update i_last_eb_blk during the meta | |
2124 | * data delete. | |
2125 | */ | |
2126 | ret = ocfs2_journal_access(handle, inode, di_bh, | |
2127 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2128 | if (ret) { | |
2129 | mlog_errno(ret); | |
2130 | goto out; | |
2131 | } | |
2132 | ||
2133 | del_right_subtree = 1; | |
2134 | } | |
2135 | ||
2136 | /* | |
2137 | * Getting here with an empty extent in the right path implies | |
2138 | * that it's the rightmost path and will be deleted. | |
2139 | */ | |
2140 | BUG_ON(right_has_empty && !del_right_subtree); | |
2141 | ||
2142 | ret = ocfs2_journal_access(handle, inode, root_bh, | |
2143 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2144 | if (ret) { | |
2145 | mlog_errno(ret); | |
2146 | goto out; | |
2147 | } | |
2148 | ||
2149 | for(i = subtree_index + 1; i < path_num_items(right_path); i++) { | |
2150 | ret = ocfs2_journal_access(handle, inode, | |
2151 | right_path->p_node[i].bh, | |
2152 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2153 | if (ret) { | |
2154 | mlog_errno(ret); | |
2155 | goto out; | |
2156 | } | |
2157 | ||
2158 | ret = ocfs2_journal_access(handle, inode, | |
2159 | left_path->p_node[i].bh, | |
2160 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2161 | if (ret) { | |
2162 | mlog_errno(ret); | |
2163 | goto out; | |
2164 | } | |
2165 | } | |
2166 | ||
2167 | if (!right_has_empty) { | |
2168 | /* | |
2169 | * Only do this if we're moving a real | |
2170 | * record. Otherwise, the action is delayed until | |
2171 | * after removal of the right path in which case we | |
2172 | * can do a simple shift to remove the empty extent. | |
2173 | */ | |
2174 | ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]); | |
2175 | memset(&right_leaf_el->l_recs[0], 0, | |
2176 | sizeof(struct ocfs2_extent_rec)); | |
2177 | } | |
2178 | if (eb->h_next_leaf_blk == 0ULL) { | |
2179 | /* | |
2180 | * Move recs over to get rid of empty extent, decrease | |
2181 | * next_free. This is allowed to remove the last | |
2182 | * extent in our leaf (setting l_next_free_rec to | |
2183 | * zero) - the delete code below won't care. | |
2184 | */ | |
2185 | ocfs2_remove_empty_extent(right_leaf_el); | |
2186 | } | |
2187 | ||
2188 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
2189 | if (ret) | |
2190 | mlog_errno(ret); | |
2191 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); | |
2192 | if (ret) | |
2193 | mlog_errno(ret); | |
2194 | ||
2195 | if (del_right_subtree) { | |
2196 | ocfs2_unlink_subtree(inode, handle, left_path, right_path, | |
2197 | subtree_index, dealloc); | |
2198 | ocfs2_update_edge_lengths(inode, handle, left_path); | |
2199 | ||
2200 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
2201 | di->i_last_eb_blk = eb->h_blkno; | |
2202 | ||
2203 | /* | |
2204 | * Removal of the extent in the left leaf was skipped | |
2205 | * above so we could delete the right path | |
2206 | * 1st. | |
2207 | */ | |
2208 | if (right_has_empty) | |
2209 | ocfs2_remove_empty_extent(left_leaf_el); | |
2210 | ||
2211 | ret = ocfs2_journal_dirty(handle, di_bh); | |
2212 | if (ret) | |
2213 | mlog_errno(ret); | |
2214 | ||
2215 | *deleted = 1; | |
2216 | } else | |
2217 | ocfs2_complete_edge_insert(inode, handle, left_path, right_path, | |
2218 | subtree_index); | |
2219 | ||
2220 | out: | |
2221 | return ret; | |
2222 | } | |
2223 | ||
2224 | /* | |
2225 | * Given a full path, determine what cpos value would return us a path | |
2226 | * containing the leaf immediately to the right of the current one. | |
2227 | * | |
2228 | * Will return zero if the path passed in is already the rightmost path. | |
2229 | * | |
2230 | * This looks similar, but is subtly different to | |
2231 | * ocfs2_find_cpos_for_left_leaf(). | |
2232 | */ | |
2233 | static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, | |
2234 | struct ocfs2_path *path, u32 *cpos) | |
2235 | { | |
2236 | int i, j, ret = 0; | |
2237 | u64 blkno; | |
2238 | struct ocfs2_extent_list *el; | |
2239 | ||
2240 | *cpos = 0; | |
2241 | ||
2242 | if (path->p_tree_depth == 0) | |
2243 | return 0; | |
2244 | ||
2245 | blkno = path_leaf_bh(path)->b_blocknr; | |
2246 | ||
2247 | /* Start at the tree node just above the leaf and work our way up. */ | |
2248 | i = path->p_tree_depth - 1; | |
2249 | while (i >= 0) { | |
2250 | int next_free; | |
2251 | ||
2252 | el = path->p_node[i].el; | |
2253 | ||
2254 | /* | |
2255 | * Find the extent record just after the one in our | |
2256 | * path. | |
2257 | */ | |
2258 | next_free = le16_to_cpu(el->l_next_free_rec); | |
2259 | for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { | |
2260 | if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { | |
2261 | if (j == (next_free - 1)) { | |
2262 | if (i == 0) { | |
2263 | /* | |
2264 | * We've determined that the | |
2265 | * path specified is already | |
2266 | * the rightmost one - return a | |
2267 | * cpos of zero. | |
2268 | */ | |
2269 | goto out; | |
2270 | } | |
2271 | /* | |
2272 | * The rightmost record points to our | |
2273 | * leaf - we need to travel up the | |
2274 | * tree one level. | |
2275 | */ | |
2276 | goto next_node; | |
2277 | } | |
2278 | ||
2279 | *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos); | |
2280 | goto out; | |
2281 | } | |
2282 | } | |
2283 | ||
2284 | /* | |
2285 | * If we got here, we never found a valid node where | |
2286 | * the tree indicated one should be. | |
2287 | */ | |
2288 | ocfs2_error(sb, | |
2289 | "Invalid extent tree at extent block %llu\n", | |
2290 | (unsigned long long)blkno); | |
2291 | ret = -EROFS; | |
2292 | goto out; | |
2293 | ||
2294 | next_node: | |
2295 | blkno = path->p_node[i].bh->b_blocknr; | |
2296 | i--; | |
2297 | } | |
2298 | ||
2299 | out: | |
2300 | return ret; | |
2301 | } | |
2302 | ||
2303 | static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, | |
2304 | handle_t *handle, | |
2305 | struct buffer_head *bh, | |
2306 | struct ocfs2_extent_list *el) | |
2307 | { | |
2308 | int ret; | |
2309 | ||
2310 | if (!ocfs2_is_empty_extent(&el->l_recs[0])) | |
2311 | return 0; | |
2312 | ||
2313 | ret = ocfs2_journal_access(handle, inode, bh, | |
2314 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2315 | if (ret) { | |
2316 | mlog_errno(ret); | |
2317 | goto out; | |
2318 | } | |
2319 | ||
2320 | ocfs2_remove_empty_extent(el); | |
2321 | ||
2322 | ret = ocfs2_journal_dirty(handle, bh); | |
2323 | if (ret) | |
2324 | mlog_errno(ret); | |
2325 | ||
2326 | out: | |
2327 | return ret; | |
2328 | } | |
2329 | ||
2330 | static int __ocfs2_rotate_tree_left(struct inode *inode, | |
2331 | handle_t *handle, int orig_credits, | |
2332 | struct ocfs2_path *path, | |
2333 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
2334 | struct ocfs2_path **empty_extent_path) | |
2335 | { | |
2336 | int ret, subtree_root, deleted; | |
2337 | u32 right_cpos; | |
2338 | struct ocfs2_path *left_path = NULL; | |
2339 | struct ocfs2_path *right_path = NULL; | |
2340 | ||
2341 | BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); | |
2342 | ||
2343 | *empty_extent_path = NULL; | |
2344 | ||
2345 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path, | |
2346 | &right_cpos); | |
2347 | if (ret) { | |
2348 | mlog_errno(ret); | |
2349 | goto out; | |
2350 | } | |
2351 | ||
2352 | left_path = ocfs2_new_path(path_root_bh(path), | |
2353 | path_root_el(path)); | |
2354 | if (!left_path) { | |
2355 | ret = -ENOMEM; | |
2356 | mlog_errno(ret); | |
2357 | goto out; | |
2358 | } | |
2359 | ||
2360 | ocfs2_cp_path(left_path, path); | |
2361 | ||
2362 | right_path = ocfs2_new_path(path_root_bh(path), | |
2363 | path_root_el(path)); | |
2364 | if (!right_path) { | |
2365 | ret = -ENOMEM; | |
2366 | mlog_errno(ret); | |
2367 | goto out; | |
2368 | } | |
2369 | ||
2370 | while (right_cpos) { | |
2371 | ret = ocfs2_find_path(inode, right_path, right_cpos); | |
2372 | if (ret) { | |
2373 | mlog_errno(ret); | |
2374 | goto out; | |
2375 | } | |
2376 | ||
2377 | subtree_root = ocfs2_find_subtree_root(inode, left_path, | |
2378 | right_path); | |
2379 | ||
2380 | mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", | |
2381 | subtree_root, | |
2382 | (unsigned long long) | |
2383 | right_path->p_node[subtree_root].bh->b_blocknr, | |
2384 | right_path->p_tree_depth); | |
2385 | ||
2386 | ret = ocfs2_extend_rotate_transaction(handle, subtree_root, | |
2387 | orig_credits, left_path); | |
2388 | if (ret) { | |
2389 | mlog_errno(ret); | |
2390 | goto out; | |
2391 | } | |
2392 | ||
e8aed345 MF |
2393 | /* |
2394 | * Caller might still want to make changes to the | |
2395 | * tree root, so re-add it to the journal here. | |
2396 | */ | |
2397 | ret = ocfs2_journal_access(handle, inode, | |
2398 | path_root_bh(left_path), | |
2399 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2400 | if (ret) { | |
2401 | mlog_errno(ret); | |
2402 | goto out; | |
2403 | } | |
2404 | ||
328d5752 MF |
2405 | ret = ocfs2_rotate_subtree_left(inode, handle, left_path, |
2406 | right_path, subtree_root, | |
2407 | dealloc, &deleted); | |
2408 | if (ret == -EAGAIN) { | |
2409 | /* | |
2410 | * The rotation has to temporarily stop due to | |
2411 | * the right subtree having an empty | |
2412 | * extent. Pass it back to the caller for a | |
2413 | * fixup. | |
2414 | */ | |
2415 | *empty_extent_path = right_path; | |
2416 | right_path = NULL; | |
2417 | goto out; | |
2418 | } | |
2419 | if (ret) { | |
2420 | mlog_errno(ret); | |
2421 | goto out; | |
2422 | } | |
2423 | ||
2424 | /* | |
2425 | * The subtree rotate might have removed records on | |
2426 | * the rightmost edge. If so, then rotation is | |
2427 | * complete. | |
2428 | */ | |
2429 | if (deleted) | |
2430 | break; | |
2431 | ||
2432 | ocfs2_mv_path(left_path, right_path); | |
2433 | ||
2434 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, | |
2435 | &right_cpos); | |
2436 | if (ret) { | |
2437 | mlog_errno(ret); | |
2438 | goto out; | |
2439 | } | |
2440 | } | |
2441 | ||
2442 | out: | |
2443 | ocfs2_free_path(right_path); | |
2444 | ocfs2_free_path(left_path); | |
2445 | ||
2446 | return ret; | |
2447 | } | |
2448 | ||
2449 | static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, | |
2450 | struct ocfs2_path *path, | |
2451 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
2452 | { | |
2453 | int ret, subtree_index; | |
2454 | u32 cpos; | |
2455 | struct ocfs2_path *left_path = NULL; | |
2456 | struct ocfs2_dinode *di; | |
2457 | struct ocfs2_extent_block *eb; | |
2458 | struct ocfs2_extent_list *el; | |
2459 | ||
2460 | /* | |
2461 | * XXX: This code assumes that the root is an inode, which is | |
2462 | * true for now but may change as tree code gets generic. | |
2463 | */ | |
2464 | di = (struct ocfs2_dinode *)path_root_bh(path)->b_data; | |
2465 | if (!OCFS2_IS_VALID_DINODE(di)) { | |
2466 | ret = -EIO; | |
2467 | ocfs2_error(inode->i_sb, | |
2468 | "Inode %llu has invalid path root", | |
2469 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
2470 | goto out; | |
2471 | } | |
2472 | ||
2473 | /* | |
2474 | * There's two ways we handle this depending on | |
2475 | * whether path is the only existing one. | |
2476 | */ | |
2477 | ret = ocfs2_extend_rotate_transaction(handle, 0, | |
2478 | handle->h_buffer_credits, | |
2479 | path); | |
2480 | if (ret) { | |
2481 | mlog_errno(ret); | |
2482 | goto out; | |
2483 | } | |
2484 | ||
2485 | ret = ocfs2_journal_access_path(inode, handle, path); | |
2486 | if (ret) { | |
2487 | mlog_errno(ret); | |
2488 | goto out; | |
2489 | } | |
2490 | ||
2491 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); | |
2492 | if (ret) { | |
2493 | mlog_errno(ret); | |
2494 | goto out; | |
2495 | } | |
2496 | ||
2497 | if (cpos) { | |
2498 | /* | |
2499 | * We have a path to the left of this one - it needs | |
2500 | * an update too. | |
2501 | */ | |
2502 | left_path = ocfs2_new_path(path_root_bh(path), | |
2503 | path_root_el(path)); | |
2504 | if (!left_path) { | |
2505 | ret = -ENOMEM; | |
2506 | mlog_errno(ret); | |
2507 | goto out; | |
2508 | } | |
2509 | ||
2510 | ret = ocfs2_find_path(inode, left_path, cpos); | |
2511 | if (ret) { | |
2512 | mlog_errno(ret); | |
2513 | goto out; | |
2514 | } | |
2515 | ||
2516 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
2517 | if (ret) { | |
2518 | mlog_errno(ret); | |
2519 | goto out; | |
2520 | } | |
2521 | ||
2522 | subtree_index = ocfs2_find_subtree_root(inode, left_path, path); | |
2523 | ||
2524 | ocfs2_unlink_subtree(inode, handle, left_path, path, | |
2525 | subtree_index, dealloc); | |
2526 | ocfs2_update_edge_lengths(inode, handle, left_path); | |
2527 | ||
2528 | eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; | |
2529 | di->i_last_eb_blk = eb->h_blkno; | |
2530 | } else { | |
2531 | /* | |
2532 | * 'path' is also the leftmost path which | |
2533 | * means it must be the only one. This gets | |
2534 | * handled differently because we want to | |
2535 | * revert the inode back to having extents | |
2536 | * in-line. | |
2537 | */ | |
2538 | ocfs2_unlink_path(inode, handle, dealloc, path, 1); | |
2539 | ||
2540 | el = &di->id2.i_list; | |
2541 | el->l_tree_depth = 0; | |
2542 | el->l_next_free_rec = 0; | |
2543 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
2544 | ||
2545 | di->i_last_eb_blk = 0; | |
2546 | } | |
2547 | ||
2548 | ocfs2_journal_dirty(handle, path_root_bh(path)); | |
2549 | ||
2550 | out: | |
2551 | ocfs2_free_path(left_path); | |
2552 | return ret; | |
2553 | } | |
2554 | ||
2555 | /* | |
2556 | * Left rotation of btree records. | |
2557 | * | |
2558 | * In many ways, this is (unsurprisingly) the opposite of right | |
2559 | * rotation. We start at some non-rightmost path containing an empty | |
2560 | * extent in the leaf block. The code works its way to the rightmost | |
2561 | * path by rotating records to the left in every subtree. | |
2562 | * | |
2563 | * This is used by any code which reduces the number of extent records | |
2564 | * in a leaf. After removal, an empty record should be placed in the | |
2565 | * leftmost list position. | |
2566 | * | |
2567 | * This won't handle a length update of the rightmost path records if | |
2568 | * the rightmost tree leaf record is removed so the caller is | |
2569 | * responsible for detecting and correcting that. | |
2570 | */ | |
2571 | static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle, | |
2572 | struct ocfs2_path *path, | |
2573 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
2574 | { | |
2575 | int ret, orig_credits = handle->h_buffer_credits; | |
2576 | struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; | |
2577 | struct ocfs2_extent_block *eb; | |
2578 | struct ocfs2_extent_list *el; | |
2579 | ||
2580 | el = path_leaf_el(path); | |
2581 | if (!ocfs2_is_empty_extent(&el->l_recs[0])) | |
2582 | return 0; | |
2583 | ||
2584 | if (path->p_tree_depth == 0) { | |
2585 | rightmost_no_delete: | |
2586 | /* | |
2587 | * In-inode extents. This is trivially handled, so do | |
2588 | * it up front. | |
2589 | */ | |
2590 | ret = ocfs2_rotate_rightmost_leaf_left(inode, handle, | |
2591 | path_leaf_bh(path), | |
2592 | path_leaf_el(path)); | |
2593 | if (ret) | |
2594 | mlog_errno(ret); | |
2595 | goto out; | |
2596 | } | |
2597 | ||
2598 | /* | |
2599 | * Handle rightmost branch now. There's several cases: | |
2600 | * 1) simple rotation leaving records in there. That's trivial. | |
2601 | * 2) rotation requiring a branch delete - there's no more | |
2602 | * records left. Two cases of this: | |
2603 | * a) There are branches to the left. | |
2604 | * b) This is also the leftmost (the only) branch. | |
2605 | * | |
2606 | * 1) is handled via ocfs2_rotate_rightmost_leaf_left() | |
2607 | * 2a) we need the left branch so that we can update it with the unlink | |
2608 | * 2b) we need to bring the inode back to inline extents. | |
2609 | */ | |
2610 | ||
2611 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
2612 | el = &eb->h_list; | |
2613 | if (eb->h_next_leaf_blk == 0) { | |
2614 | /* | |
2615 | * This gets a bit tricky if we're going to delete the | |
2616 | * rightmost path. Get the other cases out of the way | |
2617 | * 1st. | |
2618 | */ | |
2619 | if (le16_to_cpu(el->l_next_free_rec) > 1) | |
2620 | goto rightmost_no_delete; | |
2621 | ||
2622 | if (le16_to_cpu(el->l_next_free_rec) == 0) { | |
2623 | ret = -EIO; | |
2624 | ocfs2_error(inode->i_sb, | |
2625 | "Inode %llu has empty extent block at %llu", | |
2626 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
2627 | (unsigned long long)le64_to_cpu(eb->h_blkno)); | |
2628 | goto out; | |
2629 | } | |
2630 | ||
2631 | /* | |
2632 | * XXX: The caller can not trust "path" any more after | |
2633 | * this as it will have been deleted. What do we do? | |
2634 | * | |
2635 | * In theory the rotate-for-merge code will never get | |
2636 | * here because it'll always ask for a rotate in a | |
2637 | * nonempty list. | |
2638 | */ | |
2639 | ||
2640 | ret = ocfs2_remove_rightmost_path(inode, handle, path, | |
2641 | dealloc); | |
2642 | if (ret) | |
2643 | mlog_errno(ret); | |
2644 | goto out; | |
2645 | } | |
2646 | ||
2647 | /* | |
2648 | * Now we can loop, remembering the path we get from -EAGAIN | |
2649 | * and restarting from there. | |
2650 | */ | |
2651 | try_rotate: | |
2652 | ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path, | |
2653 | dealloc, &restart_path); | |
2654 | if (ret && ret != -EAGAIN) { | |
2655 | mlog_errno(ret); | |
2656 | goto out; | |
2657 | } | |
2658 | ||
2659 | while (ret == -EAGAIN) { | |
2660 | tmp_path = restart_path; | |
2661 | restart_path = NULL; | |
2662 | ||
2663 | ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, | |
2664 | tmp_path, dealloc, | |
2665 | &restart_path); | |
2666 | if (ret && ret != -EAGAIN) { | |
2667 | mlog_errno(ret); | |
2668 | goto out; | |
2669 | } | |
2670 | ||
2671 | ocfs2_free_path(tmp_path); | |
2672 | tmp_path = NULL; | |
2673 | ||
2674 | if (ret == 0) | |
2675 | goto try_rotate; | |
2676 | } | |
2677 | ||
2678 | out: | |
2679 | ocfs2_free_path(tmp_path); | |
2680 | ocfs2_free_path(restart_path); | |
2681 | return ret; | |
2682 | } | |
2683 | ||
2684 | static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, | |
2685 | int index) | |
2686 | { | |
2687 | struct ocfs2_extent_rec *rec = &el->l_recs[index]; | |
2688 | unsigned int size; | |
2689 | ||
2690 | if (rec->e_leaf_clusters == 0) { | |
2691 | /* | |
2692 | * We consumed all of the merged-from record. An empty | |
2693 | * extent cannot exist anywhere but the 1st array | |
2694 | * position, so move things over if the merged-from | |
2695 | * record doesn't occupy that position. | |
2696 | * | |
2697 | * This creates a new empty extent so the caller | |
2698 | * should be smart enough to have removed any existing | |
2699 | * ones. | |
2700 | */ | |
2701 | if (index > 0) { | |
2702 | BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); | |
2703 | size = index * sizeof(struct ocfs2_extent_rec); | |
2704 | memmove(&el->l_recs[1], &el->l_recs[0], size); | |
2705 | } | |
2706 | ||
2707 | /* | |
2708 | * Always memset - the caller doesn't check whether it | |
2709 | * created an empty extent, so there could be junk in | |
2710 | * the other fields. | |
2711 | */ | |
2712 | memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); | |
2713 | } | |
2714 | } | |
2715 | ||
677b9752 TM |
2716 | static int ocfs2_get_right_path(struct inode *inode, |
2717 | struct ocfs2_path *left_path, | |
2718 | struct ocfs2_path **ret_right_path) | |
2719 | { | |
2720 | int ret; | |
2721 | u32 right_cpos; | |
2722 | struct ocfs2_path *right_path = NULL; | |
2723 | struct ocfs2_extent_list *left_el; | |
2724 | ||
2725 | *ret_right_path = NULL; | |
2726 | ||
2727 | /* This function shouldn't be called for non-trees. */ | |
2728 | BUG_ON(left_path->p_tree_depth == 0); | |
2729 | ||
2730 | left_el = path_leaf_el(left_path); | |
2731 | BUG_ON(left_el->l_next_free_rec != left_el->l_count); | |
2732 | ||
2733 | ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, | |
2734 | &right_cpos); | |
2735 | if (ret) { | |
2736 | mlog_errno(ret); | |
2737 | goto out; | |
2738 | } | |
2739 | ||
2740 | /* This function shouldn't be called for the rightmost leaf. */ | |
2741 | BUG_ON(right_cpos == 0); | |
2742 | ||
2743 | right_path = ocfs2_new_path(path_root_bh(left_path), | |
2744 | path_root_el(left_path)); | |
2745 | if (!right_path) { | |
2746 | ret = -ENOMEM; | |
2747 | mlog_errno(ret); | |
2748 | goto out; | |
2749 | } | |
2750 | ||
2751 | ret = ocfs2_find_path(inode, right_path, right_cpos); | |
2752 | if (ret) { | |
2753 | mlog_errno(ret); | |
2754 | goto out; | |
2755 | } | |
2756 | ||
2757 | *ret_right_path = right_path; | |
2758 | out: | |
2759 | if (ret) | |
2760 | ocfs2_free_path(right_path); | |
2761 | return ret; | |
2762 | } | |
2763 | ||
328d5752 MF |
2764 | /* |
2765 | * Remove split_rec clusters from the record at index and merge them | |
677b9752 TM |
2766 | * onto the beginning of the record "next" to it. |
2767 | * For index < l_count - 1, the next means the extent rec at index + 1. | |
2768 | * For index == l_count - 1, the "next" means the 1st extent rec of the | |
2769 | * next extent block. | |
328d5752 | 2770 | */ |
677b9752 TM |
2771 | static int ocfs2_merge_rec_right(struct inode *inode, |
2772 | struct ocfs2_path *left_path, | |
2773 | handle_t *handle, | |
2774 | struct ocfs2_extent_rec *split_rec, | |
2775 | int index) | |
328d5752 | 2776 | { |
677b9752 | 2777 | int ret, next_free, i; |
328d5752 MF |
2778 | unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); |
2779 | struct ocfs2_extent_rec *left_rec; | |
2780 | struct ocfs2_extent_rec *right_rec; | |
677b9752 TM |
2781 | struct ocfs2_extent_list *right_el; |
2782 | struct ocfs2_path *right_path = NULL; | |
2783 | int subtree_index = 0; | |
2784 | struct ocfs2_extent_list *el = path_leaf_el(left_path); | |
2785 | struct buffer_head *bh = path_leaf_bh(left_path); | |
2786 | struct buffer_head *root_bh = NULL; | |
328d5752 MF |
2787 | |
2788 | BUG_ON(index >= le16_to_cpu(el->l_next_free_rec)); | |
328d5752 | 2789 | left_rec = &el->l_recs[index]; |
677b9752 | 2790 | |
9d8df6aa | 2791 | if (index == le16_to_cpu(el->l_next_free_rec) - 1 && |
677b9752 TM |
2792 | le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { |
2793 | /* we meet with a cross extent block merge. */ | |
2794 | ret = ocfs2_get_right_path(inode, left_path, &right_path); | |
2795 | if (ret) { | |
2796 | mlog_errno(ret); | |
2797 | goto out; | |
2798 | } | |
2799 | ||
2800 | right_el = path_leaf_el(right_path); | |
2801 | next_free = le16_to_cpu(right_el->l_next_free_rec); | |
2802 | BUG_ON(next_free <= 0); | |
2803 | right_rec = &right_el->l_recs[0]; | |
2804 | if (ocfs2_is_empty_extent(right_rec)) { | |
9d8df6aa | 2805 | BUG_ON(next_free <= 1); |
677b9752 TM |
2806 | right_rec = &right_el->l_recs[1]; |
2807 | } | |
2808 | ||
2809 | BUG_ON(le32_to_cpu(left_rec->e_cpos) + | |
2810 | le16_to_cpu(left_rec->e_leaf_clusters) != | |
2811 | le32_to_cpu(right_rec->e_cpos)); | |
2812 | ||
2813 | subtree_index = ocfs2_find_subtree_root(inode, | |
2814 | left_path, right_path); | |
2815 | ||
2816 | ret = ocfs2_extend_rotate_transaction(handle, subtree_index, | |
2817 | handle->h_buffer_credits, | |
2818 | right_path); | |
2819 | if (ret) { | |
2820 | mlog_errno(ret); | |
2821 | goto out; | |
2822 | } | |
2823 | ||
2824 | root_bh = left_path->p_node[subtree_index].bh; | |
2825 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
2826 | ||
2827 | ret = ocfs2_journal_access(handle, inode, root_bh, | |
2828 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2829 | if (ret) { | |
2830 | mlog_errno(ret); | |
2831 | goto out; | |
2832 | } | |
2833 | ||
2834 | for (i = subtree_index + 1; | |
2835 | i < path_num_items(right_path); i++) { | |
2836 | ret = ocfs2_journal_access(handle, inode, | |
2837 | right_path->p_node[i].bh, | |
2838 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2839 | if (ret) { | |
2840 | mlog_errno(ret); | |
2841 | goto out; | |
2842 | } | |
2843 | ||
2844 | ret = ocfs2_journal_access(handle, inode, | |
2845 | left_path->p_node[i].bh, | |
2846 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2847 | if (ret) { | |
2848 | mlog_errno(ret); | |
2849 | goto out; | |
2850 | } | |
2851 | } | |
2852 | ||
2853 | } else { | |
2854 | BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1); | |
2855 | right_rec = &el->l_recs[index + 1]; | |
2856 | } | |
328d5752 MF |
2857 | |
2858 | ret = ocfs2_journal_access(handle, inode, bh, | |
2859 | OCFS2_JOURNAL_ACCESS_WRITE); | |
2860 | if (ret) { | |
2861 | mlog_errno(ret); | |
2862 | goto out; | |
2863 | } | |
2864 | ||
2865 | le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters); | |
2866 | ||
2867 | le32_add_cpu(&right_rec->e_cpos, -split_clusters); | |
2868 | le64_add_cpu(&right_rec->e_blkno, | |
2869 | -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); | |
2870 | le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); | |
2871 | ||
2872 | ocfs2_cleanup_merge(el, index); | |
2873 | ||
2874 | ret = ocfs2_journal_dirty(handle, bh); | |
2875 | if (ret) | |
2876 | mlog_errno(ret); | |
2877 | ||
677b9752 TM |
2878 | if (right_path) { |
2879 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); | |
2880 | if (ret) | |
2881 | mlog_errno(ret); | |
2882 | ||
2883 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
2884 | right_path, subtree_index); | |
2885 | } | |
2886 | out: | |
2887 | if (right_path) | |
2888 | ocfs2_free_path(right_path); | |
2889 | return ret; | |
2890 | } | |
2891 | ||
2892 | static int ocfs2_get_left_path(struct inode *inode, | |
2893 | struct ocfs2_path *right_path, | |
2894 | struct ocfs2_path **ret_left_path) | |
2895 | { | |
2896 | int ret; | |
2897 | u32 left_cpos; | |
2898 | struct ocfs2_path *left_path = NULL; | |
2899 | ||
2900 | *ret_left_path = NULL; | |
2901 | ||
2902 | /* This function shouldn't be called for non-trees. */ | |
2903 | BUG_ON(right_path->p_tree_depth == 0); | |
2904 | ||
2905 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, | |
2906 | right_path, &left_cpos); | |
2907 | if (ret) { | |
2908 | mlog_errno(ret); | |
2909 | goto out; | |
2910 | } | |
2911 | ||
2912 | /* This function shouldn't be called for the leftmost leaf. */ | |
2913 | BUG_ON(left_cpos == 0); | |
2914 | ||
2915 | left_path = ocfs2_new_path(path_root_bh(right_path), | |
2916 | path_root_el(right_path)); | |
2917 | if (!left_path) { | |
2918 | ret = -ENOMEM; | |
2919 | mlog_errno(ret); | |
2920 | goto out; | |
2921 | } | |
2922 | ||
2923 | ret = ocfs2_find_path(inode, left_path, left_cpos); | |
2924 | if (ret) { | |
2925 | mlog_errno(ret); | |
2926 | goto out; | |
2927 | } | |
2928 | ||
2929 | *ret_left_path = left_path; | |
328d5752 | 2930 | out: |
677b9752 TM |
2931 | if (ret) |
2932 | ocfs2_free_path(left_path); | |
328d5752 MF |
2933 | return ret; |
2934 | } | |
2935 | ||
2936 | /* | |
2937 | * Remove split_rec clusters from the record at index and merge them | |
677b9752 TM |
2938 | * onto the tail of the record "before" it. |
2939 | * For index > 0, the "before" means the extent rec at index - 1. | |
2940 | * | |
2941 | * For index == 0, the "before" means the last record of the previous | |
2942 | * extent block. And there is also a situation that we may need to | |
2943 | * remove the rightmost leaf extent block in the right_path and change | |
2944 | * the right path to indicate the new rightmost path. | |
328d5752 | 2945 | */ |
677b9752 TM |
2946 | static int ocfs2_merge_rec_left(struct inode *inode, |
2947 | struct ocfs2_path *right_path, | |
328d5752 MF |
2948 | handle_t *handle, |
2949 | struct ocfs2_extent_rec *split_rec, | |
677b9752 TM |
2950 | struct ocfs2_cached_dealloc_ctxt *dealloc, |
2951 | int index) | |
328d5752 | 2952 | { |
677b9752 | 2953 | int ret, i, subtree_index = 0, has_empty_extent = 0; |
328d5752 MF |
2954 | unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); |
2955 | struct ocfs2_extent_rec *left_rec; | |
2956 | struct ocfs2_extent_rec *right_rec; | |
677b9752 TM |
2957 | struct ocfs2_extent_list *el = path_leaf_el(right_path); |
2958 | struct buffer_head *bh = path_leaf_bh(right_path); | |
2959 | struct buffer_head *root_bh = NULL; | |
2960 | struct ocfs2_path *left_path = NULL; | |
2961 | struct ocfs2_extent_list *left_el; | |
328d5752 | 2962 | |
677b9752 | 2963 | BUG_ON(index < 0); |
328d5752 | 2964 | |
328d5752 | 2965 | right_rec = &el->l_recs[index]; |
677b9752 TM |
2966 | if (index == 0) { |
2967 | /* we meet with a cross extent block merge. */ | |
2968 | ret = ocfs2_get_left_path(inode, right_path, &left_path); | |
2969 | if (ret) { | |
2970 | mlog_errno(ret); | |
2971 | goto out; | |
2972 | } | |
2973 | ||
2974 | left_el = path_leaf_el(left_path); | |
2975 | BUG_ON(le16_to_cpu(left_el->l_next_free_rec) != | |
2976 | le16_to_cpu(left_el->l_count)); | |
2977 | ||
2978 | left_rec = &left_el->l_recs[ | |
2979 | le16_to_cpu(left_el->l_next_free_rec) - 1]; | |
2980 | BUG_ON(le32_to_cpu(left_rec->e_cpos) + | |
2981 | le16_to_cpu(left_rec->e_leaf_clusters) != | |
2982 | le32_to_cpu(split_rec->e_cpos)); | |
2983 | ||
2984 | subtree_index = ocfs2_find_subtree_root(inode, | |
2985 | left_path, right_path); | |
2986 | ||
2987 | ret = ocfs2_extend_rotate_transaction(handle, subtree_index, | |
2988 | handle->h_buffer_credits, | |
2989 | left_path); | |
2990 | if (ret) { | |
2991 | mlog_errno(ret); | |
2992 | goto out; | |
2993 | } | |
2994 | ||
2995 | root_bh = left_path->p_node[subtree_index].bh; | |
2996 | BUG_ON(root_bh != right_path->p_node[subtree_index].bh); | |
2997 | ||
2998 | ret = ocfs2_journal_access(handle, inode, root_bh, | |
2999 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3000 | if (ret) { | |
3001 | mlog_errno(ret); | |
3002 | goto out; | |
3003 | } | |
3004 | ||
3005 | for (i = subtree_index + 1; | |
3006 | i < path_num_items(right_path); i++) { | |
3007 | ret = ocfs2_journal_access(handle, inode, | |
3008 | right_path->p_node[i].bh, | |
3009 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3010 | if (ret) { | |
3011 | mlog_errno(ret); | |
3012 | goto out; | |
3013 | } | |
3014 | ||
3015 | ret = ocfs2_journal_access(handle, inode, | |
3016 | left_path->p_node[i].bh, | |
3017 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3018 | if (ret) { | |
3019 | mlog_errno(ret); | |
3020 | goto out; | |
3021 | } | |
3022 | } | |
3023 | } else { | |
3024 | left_rec = &el->l_recs[index - 1]; | |
3025 | if (ocfs2_is_empty_extent(&el->l_recs[0])) | |
3026 | has_empty_extent = 1; | |
3027 | } | |
328d5752 MF |
3028 | |
3029 | ret = ocfs2_journal_access(handle, inode, bh, | |
3030 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3031 | if (ret) { | |
3032 | mlog_errno(ret); | |
3033 | goto out; | |
3034 | } | |
3035 | ||
3036 | if (has_empty_extent && index == 1) { | |
3037 | /* | |
3038 | * The easy case - we can just plop the record right in. | |
3039 | */ | |
3040 | *left_rec = *split_rec; | |
3041 | ||
3042 | has_empty_extent = 0; | |
677b9752 | 3043 | } else |
328d5752 | 3044 | le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); |
328d5752 MF |
3045 | |
3046 | le32_add_cpu(&right_rec->e_cpos, split_clusters); | |
3047 | le64_add_cpu(&right_rec->e_blkno, | |
3048 | ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); | |
3049 | le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); | |
3050 | ||
3051 | ocfs2_cleanup_merge(el, index); | |
3052 | ||
3053 | ret = ocfs2_journal_dirty(handle, bh); | |
3054 | if (ret) | |
3055 | mlog_errno(ret); | |
3056 | ||
677b9752 TM |
3057 | if (left_path) { |
3058 | ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); | |
3059 | if (ret) | |
3060 | mlog_errno(ret); | |
3061 | ||
3062 | /* | |
3063 | * In the situation that the right_rec is empty and the extent | |
3064 | * block is empty also, ocfs2_complete_edge_insert can't handle | |
3065 | * it and we need to delete the right extent block. | |
3066 | */ | |
3067 | if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && | |
3068 | le16_to_cpu(el->l_next_free_rec) == 1) { | |
3069 | ||
3070 | ret = ocfs2_remove_rightmost_path(inode, handle, | |
3071 | right_path, dealloc); | |
3072 | if (ret) { | |
3073 | mlog_errno(ret); | |
3074 | goto out; | |
3075 | } | |
3076 | ||
3077 | /* Now the rightmost extent block has been deleted. | |
3078 | * So we use the new rightmost path. | |
3079 | */ | |
3080 | ocfs2_mv_path(right_path, left_path); | |
3081 | left_path = NULL; | |
3082 | } else | |
3083 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
3084 | right_path, subtree_index); | |
3085 | } | |
328d5752 | 3086 | out: |
677b9752 TM |
3087 | if (left_path) |
3088 | ocfs2_free_path(left_path); | |
328d5752 MF |
3089 | return ret; |
3090 | } | |
3091 | ||
3092 | static int ocfs2_try_to_merge_extent(struct inode *inode, | |
3093 | handle_t *handle, | |
677b9752 | 3094 | struct ocfs2_path *path, |
328d5752 MF |
3095 | int split_index, |
3096 | struct ocfs2_extent_rec *split_rec, | |
3097 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
3098 | struct ocfs2_merge_ctxt *ctxt) | |
3099 | ||
3100 | { | |
518d7269 | 3101 | int ret = 0; |
677b9752 | 3102 | struct ocfs2_extent_list *el = path_leaf_el(path); |
328d5752 MF |
3103 | struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; |
3104 | ||
3105 | BUG_ON(ctxt->c_contig_type == CONTIG_NONE); | |
3106 | ||
518d7269 TM |
3107 | if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { |
3108 | /* | |
3109 | * The merge code will need to create an empty | |
3110 | * extent to take the place of the newly | |
3111 | * emptied slot. Remove any pre-existing empty | |
3112 | * extents - having more than one in a leaf is | |
3113 | * illegal. | |
3114 | */ | |
677b9752 | 3115 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
518d7269 TM |
3116 | dealloc); |
3117 | if (ret) { | |
3118 | mlog_errno(ret); | |
3119 | goto out; | |
328d5752 | 3120 | } |
518d7269 TM |
3121 | split_index--; |
3122 | rec = &el->l_recs[split_index]; | |
328d5752 MF |
3123 | } |
3124 | ||
3125 | if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { | |
3126 | /* | |
3127 | * Left-right contig implies this. | |
3128 | */ | |
3129 | BUG_ON(!ctxt->c_split_covers_rec); | |
328d5752 MF |
3130 | |
3131 | /* | |
3132 | * Since the leftright insert always covers the entire | |
3133 | * extent, this call will delete the insert record | |
3134 | * entirely, resulting in an empty extent record added to | |
3135 | * the extent block. | |
3136 | * | |
3137 | * Since the adding of an empty extent shifts | |
3138 | * everything back to the right, there's no need to | |
3139 | * update split_index here. | |
677b9752 TM |
3140 | * |
3141 | * When the split_index is zero, we need to merge it to the | |
3142 | * prevoius extent block. It is more efficient and easier | |
3143 | * if we do merge_right first and merge_left later. | |
328d5752 | 3144 | */ |
677b9752 TM |
3145 | ret = ocfs2_merge_rec_right(inode, path, |
3146 | handle, split_rec, | |
3147 | split_index); | |
328d5752 MF |
3148 | if (ret) { |
3149 | mlog_errno(ret); | |
3150 | goto out; | |
3151 | } | |
3152 | ||
3153 | /* | |
3154 | * We can only get this from logic error above. | |
3155 | */ | |
3156 | BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); | |
3157 | ||
677b9752 TM |
3158 | /* The merge left us with an empty extent, remove it. */ |
3159 | ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc); | |
328d5752 MF |
3160 | if (ret) { |
3161 | mlog_errno(ret); | |
3162 | goto out; | |
3163 | } | |
677b9752 | 3164 | |
328d5752 MF |
3165 | rec = &el->l_recs[split_index]; |
3166 | ||
3167 | /* | |
3168 | * Note that we don't pass split_rec here on purpose - | |
677b9752 | 3169 | * we've merged it into the rec already. |
328d5752 | 3170 | */ |
677b9752 TM |
3171 | ret = ocfs2_merge_rec_left(inode, path, |
3172 | handle, rec, | |
3173 | dealloc, | |
3174 | split_index); | |
3175 | ||
328d5752 MF |
3176 | if (ret) { |
3177 | mlog_errno(ret); | |
3178 | goto out; | |
3179 | } | |
3180 | ||
677b9752 | 3181 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
328d5752 MF |
3182 | dealloc); |
3183 | /* | |
3184 | * Error from this last rotate is not critical, so | |
3185 | * print but don't bubble it up. | |
3186 | */ | |
3187 | if (ret) | |
3188 | mlog_errno(ret); | |
3189 | ret = 0; | |
3190 | } else { | |
3191 | /* | |
3192 | * Merge a record to the left or right. | |
3193 | * | |
3194 | * 'contig_type' is relative to the existing record, | |
3195 | * so for example, if we're "right contig", it's to | |
3196 | * the record on the left (hence the left merge). | |
3197 | */ | |
3198 | if (ctxt->c_contig_type == CONTIG_RIGHT) { | |
3199 | ret = ocfs2_merge_rec_left(inode, | |
677b9752 TM |
3200 | path, |
3201 | handle, split_rec, | |
3202 | dealloc, | |
328d5752 MF |
3203 | split_index); |
3204 | if (ret) { | |
3205 | mlog_errno(ret); | |
3206 | goto out; | |
3207 | } | |
3208 | } else { | |
3209 | ret = ocfs2_merge_rec_right(inode, | |
677b9752 TM |
3210 | path, |
3211 | handle, split_rec, | |
328d5752 MF |
3212 | split_index); |
3213 | if (ret) { | |
3214 | mlog_errno(ret); | |
3215 | goto out; | |
3216 | } | |
3217 | } | |
3218 | ||
3219 | if (ctxt->c_split_covers_rec) { | |
3220 | /* | |
3221 | * The merge may have left an empty extent in | |
3222 | * our leaf. Try to rotate it away. | |
3223 | */ | |
677b9752 | 3224 | ret = ocfs2_rotate_tree_left(inode, handle, path, |
328d5752 MF |
3225 | dealloc); |
3226 | if (ret) | |
3227 | mlog_errno(ret); | |
3228 | ret = 0; | |
3229 | } | |
3230 | } | |
3231 | ||
3232 | out: | |
3233 | return ret; | |
3234 | } | |
3235 | ||
3236 | static void ocfs2_subtract_from_rec(struct super_block *sb, | |
3237 | enum ocfs2_split_type split, | |
3238 | struct ocfs2_extent_rec *rec, | |
3239 | struct ocfs2_extent_rec *split_rec) | |
3240 | { | |
3241 | u64 len_blocks; | |
3242 | ||
3243 | len_blocks = ocfs2_clusters_to_blocks(sb, | |
3244 | le16_to_cpu(split_rec->e_leaf_clusters)); | |
3245 | ||
3246 | if (split == SPLIT_LEFT) { | |
3247 | /* | |
3248 | * Region is on the left edge of the existing | |
3249 | * record. | |
3250 | */ | |
3251 | le32_add_cpu(&rec->e_cpos, | |
3252 | le16_to_cpu(split_rec->e_leaf_clusters)); | |
3253 | le64_add_cpu(&rec->e_blkno, len_blocks); | |
3254 | le16_add_cpu(&rec->e_leaf_clusters, | |
3255 | -le16_to_cpu(split_rec->e_leaf_clusters)); | |
3256 | } else { | |
3257 | /* | |
3258 | * Region is on the right edge of the existing | |
3259 | * record. | |
3260 | */ | |
3261 | le16_add_cpu(&rec->e_leaf_clusters, | |
3262 | -le16_to_cpu(split_rec->e_leaf_clusters)); | |
3263 | } | |
3264 | } | |
3265 | ||
3266 | /* | |
3267 | * Do the final bits of extent record insertion at the target leaf | |
3268 | * list. If this leaf is part of an allocation tree, it is assumed | |
3269 | * that the tree above has been prepared. | |
3270 | */ | |
3271 | static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, | |
3272 | struct ocfs2_extent_list *el, | |
3273 | struct ocfs2_insert_type *insert, | |
3274 | struct inode *inode) | |
3275 | { | |
3276 | int i = insert->ins_contig_index; | |
3277 | unsigned int range; | |
3278 | struct ocfs2_extent_rec *rec; | |
3279 | ||
3280 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); | |
3281 | ||
3282 | if (insert->ins_split != SPLIT_NONE) { | |
3283 | i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); | |
3284 | BUG_ON(i == -1); | |
3285 | rec = &el->l_recs[i]; | |
3286 | ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec, | |
3287 | insert_rec); | |
3288 | goto rotate; | |
3289 | } | |
3290 | ||
3291 | /* | |
3292 | * Contiguous insert - either left or right. | |
3293 | */ | |
3294 | if (insert->ins_contig != CONTIG_NONE) { | |
3295 | rec = &el->l_recs[i]; | |
3296 | if (insert->ins_contig == CONTIG_LEFT) { | |
3297 | rec->e_blkno = insert_rec->e_blkno; | |
3298 | rec->e_cpos = insert_rec->e_cpos; | |
3299 | } | |
3300 | le16_add_cpu(&rec->e_leaf_clusters, | |
3301 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3302 | return; | |
3303 | } | |
3304 | ||
3305 | /* | |
3306 | * Handle insert into an empty leaf. | |
3307 | */ | |
3308 | if (le16_to_cpu(el->l_next_free_rec) == 0 || | |
3309 | ((le16_to_cpu(el->l_next_free_rec) == 1) && | |
3310 | ocfs2_is_empty_extent(&el->l_recs[0]))) { | |
3311 | el->l_recs[0] = *insert_rec; | |
3312 | el->l_next_free_rec = cpu_to_le16(1); | |
3313 | return; | |
3314 | } | |
3315 | ||
3316 | /* | |
3317 | * Appending insert. | |
3318 | */ | |
3319 | if (insert->ins_appending == APPEND_TAIL) { | |
3320 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
3321 | rec = &el->l_recs[i]; | |
3322 | range = le32_to_cpu(rec->e_cpos) | |
3323 | + le16_to_cpu(rec->e_leaf_clusters); | |
3324 | BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range); | |
3325 | ||
3326 | mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= | |
3327 | le16_to_cpu(el->l_count), | |
3328 | "inode %lu, depth %u, count %u, next free %u, " | |
3329 | "rec.cpos %u, rec.clusters %u, " | |
3330 | "insert.cpos %u, insert.clusters %u\n", | |
3331 | inode->i_ino, | |
3332 | le16_to_cpu(el->l_tree_depth), | |
3333 | le16_to_cpu(el->l_count), | |
3334 | le16_to_cpu(el->l_next_free_rec), | |
3335 | le32_to_cpu(el->l_recs[i].e_cpos), | |
3336 | le16_to_cpu(el->l_recs[i].e_leaf_clusters), | |
3337 | le32_to_cpu(insert_rec->e_cpos), | |
3338 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3339 | i++; | |
3340 | el->l_recs[i] = *insert_rec; | |
3341 | le16_add_cpu(&el->l_next_free_rec, 1); | |
3342 | return; | |
3343 | } | |
3344 | ||
3345 | rotate: | |
3346 | /* | |
3347 | * Ok, we have to rotate. | |
3348 | * | |
3349 | * At this point, it is safe to assume that inserting into an | |
3350 | * empty leaf and appending to a leaf have both been handled | |
3351 | * above. | |
3352 | * | |
3353 | * This leaf needs to have space, either by the empty 1st | |
3354 | * extent record, or by virtue of an l_next_rec < l_count. | |
3355 | */ | |
3356 | ocfs2_rotate_leaf(el, insert_rec); | |
3357 | } | |
3358 | ||
3359 | static inline void ocfs2_update_dinode_clusters(struct inode *inode, | |
3360 | struct ocfs2_dinode *di, | |
3361 | u32 clusters) | |
3362 | { | |
3363 | le32_add_cpu(&di->i_clusters, clusters); | |
3364 | spin_lock(&OCFS2_I(inode)->ip_lock); | |
3365 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters); | |
3366 | spin_unlock(&OCFS2_I(inode)->ip_lock); | |
3367 | } | |
3368 | ||
3369 | static void ocfs2_adjust_rightmost_records(struct inode *inode, | |
3370 | handle_t *handle, | |
3371 | struct ocfs2_path *path, | |
3372 | struct ocfs2_extent_rec *insert_rec) | |
3373 | { | |
3374 | int ret, i, next_free; | |
3375 | struct buffer_head *bh; | |
3376 | struct ocfs2_extent_list *el; | |
3377 | struct ocfs2_extent_rec *rec; | |
3378 | ||
3379 | /* | |
3380 | * Update everything except the leaf block. | |
3381 | */ | |
3382 | for (i = 0; i < path->p_tree_depth; i++) { | |
3383 | bh = path->p_node[i].bh; | |
3384 | el = path->p_node[i].el; | |
3385 | ||
dcd0538f MF |
3386 | next_free = le16_to_cpu(el->l_next_free_rec); |
3387 | if (next_free == 0) { | |
3388 | ocfs2_error(inode->i_sb, | |
3389 | "Dinode %llu has a bad extent list", | |
3390 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
3391 | ret = -EIO; | |
328d5752 MF |
3392 | return; |
3393 | } | |
3394 | ||
3395 | rec = &el->l_recs[next_free - 1]; | |
3396 | ||
3397 | rec->e_int_clusters = insert_rec->e_cpos; | |
3398 | le32_add_cpu(&rec->e_int_clusters, | |
3399 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
3400 | le32_add_cpu(&rec->e_int_clusters, | |
3401 | -le32_to_cpu(rec->e_cpos)); | |
3402 | ||
3403 | ret = ocfs2_journal_dirty(handle, bh); | |
3404 | if (ret) | |
3405 | mlog_errno(ret); | |
3406 | ||
3407 | } | |
3408 | } | |
3409 | ||
3410 | static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, | |
3411 | struct ocfs2_extent_rec *insert_rec, | |
3412 | struct ocfs2_path *right_path, | |
3413 | struct ocfs2_path **ret_left_path) | |
3414 | { | |
3415 | int ret, next_free; | |
3416 | struct ocfs2_extent_list *el; | |
3417 | struct ocfs2_path *left_path = NULL; | |
3418 | ||
3419 | *ret_left_path = NULL; | |
3420 | ||
3421 | /* | |
3422 | * This shouldn't happen for non-trees. The extent rec cluster | |
3423 | * count manipulation below only works for interior nodes. | |
3424 | */ | |
3425 | BUG_ON(right_path->p_tree_depth == 0); | |
3426 | ||
3427 | /* | |
3428 | * If our appending insert is at the leftmost edge of a leaf, | |
3429 | * then we might need to update the rightmost records of the | |
3430 | * neighboring path. | |
3431 | */ | |
3432 | el = path_leaf_el(right_path); | |
3433 | next_free = le16_to_cpu(el->l_next_free_rec); | |
3434 | if (next_free == 0 || | |
3435 | (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { | |
3436 | u32 left_cpos; | |
3437 | ||
3438 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, | |
3439 | &left_cpos); | |
3440 | if (ret) { | |
3441 | mlog_errno(ret); | |
dcd0538f MF |
3442 | goto out; |
3443 | } | |
3444 | ||
328d5752 MF |
3445 | mlog(0, "Append may need a left path update. cpos: %u, " |
3446 | "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), | |
3447 | left_cpos); | |
e48edee2 | 3448 | |
328d5752 MF |
3449 | /* |
3450 | * No need to worry if the append is already in the | |
3451 | * leftmost leaf. | |
3452 | */ | |
3453 | if (left_cpos) { | |
3454 | left_path = ocfs2_new_path(path_root_bh(right_path), | |
3455 | path_root_el(right_path)); | |
3456 | if (!left_path) { | |
3457 | ret = -ENOMEM; | |
3458 | mlog_errno(ret); | |
3459 | goto out; | |
3460 | } | |
dcd0538f | 3461 | |
328d5752 MF |
3462 | ret = ocfs2_find_path(inode, left_path, left_cpos); |
3463 | if (ret) { | |
3464 | mlog_errno(ret); | |
3465 | goto out; | |
3466 | } | |
dcd0538f | 3467 | |
328d5752 MF |
3468 | /* |
3469 | * ocfs2_insert_path() will pass the left_path to the | |
3470 | * journal for us. | |
3471 | */ | |
3472 | } | |
3473 | } | |
dcd0538f | 3474 | |
328d5752 MF |
3475 | ret = ocfs2_journal_access_path(inode, handle, right_path); |
3476 | if (ret) { | |
3477 | mlog_errno(ret); | |
3478 | goto out; | |
dcd0538f MF |
3479 | } |
3480 | ||
328d5752 MF |
3481 | ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec); |
3482 | ||
dcd0538f MF |
3483 | *ret_left_path = left_path; |
3484 | ret = 0; | |
3485 | out: | |
3486 | if (ret != 0) | |
3487 | ocfs2_free_path(left_path); | |
3488 | ||
3489 | return ret; | |
3490 | } | |
3491 | ||
328d5752 MF |
3492 | static void ocfs2_split_record(struct inode *inode, |
3493 | struct ocfs2_path *left_path, | |
3494 | struct ocfs2_path *right_path, | |
3495 | struct ocfs2_extent_rec *split_rec, | |
3496 | enum ocfs2_split_type split) | |
3497 | { | |
3498 | int index; | |
3499 | u32 cpos = le32_to_cpu(split_rec->e_cpos); | |
3500 | struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; | |
3501 | struct ocfs2_extent_rec *rec, *tmprec; | |
3502 | ||
3503 | right_el = path_leaf_el(right_path);; | |
3504 | if (left_path) | |
3505 | left_el = path_leaf_el(left_path); | |
3506 | ||
3507 | el = right_el; | |
3508 | insert_el = right_el; | |
3509 | index = ocfs2_search_extent_list(el, cpos); | |
3510 | if (index != -1) { | |
3511 | if (index == 0 && left_path) { | |
3512 | BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); | |
3513 | ||
3514 | /* | |
3515 | * This typically means that the record | |
3516 | * started in the left path but moved to the | |
3517 | * right as a result of rotation. We either | |
3518 | * move the existing record to the left, or we | |
3519 | * do the later insert there. | |
3520 | * | |
3521 | * In this case, the left path should always | |
3522 | * exist as the rotate code will have passed | |
3523 | * it back for a post-insert update. | |
3524 | */ | |
3525 | ||
3526 | if (split == SPLIT_LEFT) { | |
3527 | /* | |
3528 | * It's a left split. Since we know | |
3529 | * that the rotate code gave us an | |
3530 | * empty extent in the left path, we | |
3531 | * can just do the insert there. | |
3532 | */ | |
3533 | insert_el = left_el; | |
3534 | } else { | |
3535 | /* | |
3536 | * Right split - we have to move the | |
3537 | * existing record over to the left | |
3538 | * leaf. The insert will be into the | |
3539 | * newly created empty extent in the | |
3540 | * right leaf. | |
3541 | */ | |
3542 | tmprec = &right_el->l_recs[index]; | |
3543 | ocfs2_rotate_leaf(left_el, tmprec); | |
3544 | el = left_el; | |
3545 | ||
3546 | memset(tmprec, 0, sizeof(*tmprec)); | |
3547 | index = ocfs2_search_extent_list(left_el, cpos); | |
3548 | BUG_ON(index == -1); | |
3549 | } | |
3550 | } | |
3551 | } else { | |
3552 | BUG_ON(!left_path); | |
3553 | BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0])); | |
3554 | /* | |
3555 | * Left path is easy - we can just allow the insert to | |
3556 | * happen. | |
3557 | */ | |
3558 | el = left_el; | |
3559 | insert_el = left_el; | |
3560 | index = ocfs2_search_extent_list(el, cpos); | |
3561 | BUG_ON(index == -1); | |
3562 | } | |
3563 | ||
3564 | rec = &el->l_recs[index]; | |
3565 | ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec); | |
3566 | ocfs2_rotate_leaf(insert_el, split_rec); | |
3567 | } | |
3568 | ||
dcd0538f MF |
3569 | /* |
3570 | * This function only does inserts on an allocation b-tree. For dinode | |
3571 | * lists, ocfs2_insert_at_leaf() is called directly. | |
3572 | * | |
3573 | * right_path is the path we want to do the actual insert | |
3574 | * in. left_path should only be passed in if we need to update that | |
3575 | * portion of the tree after an edge insert. | |
3576 | */ | |
3577 | static int ocfs2_insert_path(struct inode *inode, | |
3578 | handle_t *handle, | |
3579 | struct ocfs2_path *left_path, | |
3580 | struct ocfs2_path *right_path, | |
3581 | struct ocfs2_extent_rec *insert_rec, | |
3582 | struct ocfs2_insert_type *insert) | |
3583 | { | |
3584 | int ret, subtree_index; | |
3585 | struct buffer_head *leaf_bh = path_leaf_bh(right_path); | |
dcd0538f | 3586 | |
dcd0538f MF |
3587 | if (left_path) { |
3588 | int credits = handle->h_buffer_credits; | |
3589 | ||
3590 | /* | |
3591 | * There's a chance that left_path got passed back to | |
3592 | * us without being accounted for in the | |
3593 | * journal. Extend our transaction here to be sure we | |
3594 | * can change those blocks. | |
3595 | */ | |
3596 | credits += left_path->p_tree_depth; | |
3597 | ||
3598 | ret = ocfs2_extend_trans(handle, credits); | |
3599 | if (ret < 0) { | |
3600 | mlog_errno(ret); | |
3601 | goto out; | |
3602 | } | |
3603 | ||
3604 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
3605 | if (ret < 0) { | |
3606 | mlog_errno(ret); | |
3607 | goto out; | |
3608 | } | |
3609 | } | |
3610 | ||
e8aed345 MF |
3611 | /* |
3612 | * Pass both paths to the journal. The majority of inserts | |
3613 | * will be touching all components anyway. | |
3614 | */ | |
3615 | ret = ocfs2_journal_access_path(inode, handle, right_path); | |
3616 | if (ret < 0) { | |
3617 | mlog_errno(ret); | |
3618 | goto out; | |
3619 | } | |
3620 | ||
328d5752 MF |
3621 | if (insert->ins_split != SPLIT_NONE) { |
3622 | /* | |
3623 | * We could call ocfs2_insert_at_leaf() for some types | |
c78bad11 | 3624 | * of splits, but it's easier to just let one separate |
328d5752 MF |
3625 | * function sort it all out. |
3626 | */ | |
3627 | ocfs2_split_record(inode, left_path, right_path, | |
3628 | insert_rec, insert->ins_split); | |
e8aed345 MF |
3629 | |
3630 | /* | |
3631 | * Split might have modified either leaf and we don't | |
3632 | * have a guarantee that the later edge insert will | |
3633 | * dirty this for us. | |
3634 | */ | |
3635 | if (left_path) | |
3636 | ret = ocfs2_journal_dirty(handle, | |
3637 | path_leaf_bh(left_path)); | |
3638 | if (ret) | |
3639 | mlog_errno(ret); | |
328d5752 MF |
3640 | } else |
3641 | ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path), | |
3642 | insert, inode); | |
dcd0538f | 3643 | |
dcd0538f MF |
3644 | ret = ocfs2_journal_dirty(handle, leaf_bh); |
3645 | if (ret) | |
3646 | mlog_errno(ret); | |
3647 | ||
3648 | if (left_path) { | |
3649 | /* | |
3650 | * The rotate code has indicated that we need to fix | |
3651 | * up portions of the tree after the insert. | |
3652 | * | |
3653 | * XXX: Should we extend the transaction here? | |
3654 | */ | |
3655 | subtree_index = ocfs2_find_subtree_root(inode, left_path, | |
3656 | right_path); | |
3657 | ocfs2_complete_edge_insert(inode, handle, left_path, | |
3658 | right_path, subtree_index); | |
3659 | } | |
3660 | ||
3661 | ret = 0; | |
3662 | out: | |
3663 | return ret; | |
3664 | } | |
3665 | ||
3666 | static int ocfs2_do_insert_extent(struct inode *inode, | |
3667 | handle_t *handle, | |
3668 | struct buffer_head *di_bh, | |
3669 | struct ocfs2_extent_rec *insert_rec, | |
3670 | struct ocfs2_insert_type *type) | |
3671 | { | |
3672 | int ret, rotate = 0; | |
3673 | u32 cpos; | |
3674 | struct ocfs2_path *right_path = NULL; | |
3675 | struct ocfs2_path *left_path = NULL; | |
3676 | struct ocfs2_dinode *di; | |
3677 | struct ocfs2_extent_list *el; | |
3678 | ||
3679 | di = (struct ocfs2_dinode *) di_bh->b_data; | |
3680 | el = &di->id2.i_list; | |
3681 | ||
3682 | ret = ocfs2_journal_access(handle, inode, di_bh, | |
3683 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3684 | if (ret) { | |
3685 | mlog_errno(ret); | |
3686 | goto out; | |
3687 | } | |
3688 | ||
3689 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
3690 | ocfs2_insert_at_leaf(insert_rec, el, type, inode); | |
3691 | goto out_update_clusters; | |
3692 | } | |
3693 | ||
3694 | right_path = ocfs2_new_inode_path(di_bh); | |
3695 | if (!right_path) { | |
3696 | ret = -ENOMEM; | |
3697 | mlog_errno(ret); | |
3698 | goto out; | |
3699 | } | |
3700 | ||
3701 | /* | |
3702 | * Determine the path to start with. Rotations need the | |
3703 | * rightmost path, everything else can go directly to the | |
3704 | * target leaf. | |
3705 | */ | |
3706 | cpos = le32_to_cpu(insert_rec->e_cpos); | |
3707 | if (type->ins_appending == APPEND_NONE && | |
3708 | type->ins_contig == CONTIG_NONE) { | |
3709 | rotate = 1; | |
3710 | cpos = UINT_MAX; | |
3711 | } | |
3712 | ||
3713 | ret = ocfs2_find_path(inode, right_path, cpos); | |
3714 | if (ret) { | |
3715 | mlog_errno(ret); | |
3716 | goto out; | |
3717 | } | |
3718 | ||
3719 | /* | |
3720 | * Rotations and appends need special treatment - they modify | |
3721 | * parts of the tree's above them. | |
3722 | * | |
3723 | * Both might pass back a path immediate to the left of the | |
3724 | * one being inserted to. This will be cause | |
3725 | * ocfs2_insert_path() to modify the rightmost records of | |
3726 | * left_path to account for an edge insert. | |
3727 | * | |
3728 | * XXX: When modifying this code, keep in mind that an insert | |
3729 | * can wind up skipping both of these two special cases... | |
3730 | */ | |
3731 | if (rotate) { | |
328d5752 | 3732 | ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split, |
dcd0538f MF |
3733 | le32_to_cpu(insert_rec->e_cpos), |
3734 | right_path, &left_path); | |
3735 | if (ret) { | |
3736 | mlog_errno(ret); | |
3737 | goto out; | |
3738 | } | |
e8aed345 MF |
3739 | |
3740 | /* | |
3741 | * ocfs2_rotate_tree_right() might have extended the | |
3742 | * transaction without re-journaling our tree root. | |
3743 | */ | |
3744 | ret = ocfs2_journal_access(handle, inode, di_bh, | |
3745 | OCFS2_JOURNAL_ACCESS_WRITE); | |
3746 | if (ret) { | |
3747 | mlog_errno(ret); | |
3748 | goto out; | |
3749 | } | |
dcd0538f MF |
3750 | } else if (type->ins_appending == APPEND_TAIL |
3751 | && type->ins_contig != CONTIG_LEFT) { | |
3752 | ret = ocfs2_append_rec_to_path(inode, handle, insert_rec, | |
3753 | right_path, &left_path); | |
3754 | if (ret) { | |
3755 | mlog_errno(ret); | |
3756 | goto out; | |
3757 | } | |
3758 | } | |
3759 | ||
3760 | ret = ocfs2_insert_path(inode, handle, left_path, right_path, | |
3761 | insert_rec, type); | |
3762 | if (ret) { | |
3763 | mlog_errno(ret); | |
3764 | goto out; | |
3765 | } | |
3766 | ||
3767 | out_update_clusters: | |
328d5752 MF |
3768 | if (type->ins_split == SPLIT_NONE) |
3769 | ocfs2_update_dinode_clusters(inode, di, | |
3770 | le16_to_cpu(insert_rec->e_leaf_clusters)); | |
dcd0538f MF |
3771 | |
3772 | ret = ocfs2_journal_dirty(handle, di_bh); | |
3773 | if (ret) | |
3774 | mlog_errno(ret); | |
3775 | ||
3776 | out: | |
3777 | ocfs2_free_path(left_path); | |
3778 | ocfs2_free_path(right_path); | |
3779 | ||
3780 | return ret; | |
3781 | } | |
3782 | ||
328d5752 | 3783 | static enum ocfs2_contig_type |
ad5a4d70 | 3784 | ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, |
328d5752 MF |
3785 | struct ocfs2_extent_list *el, int index, |
3786 | struct ocfs2_extent_rec *split_rec) | |
3787 | { | |
ad5a4d70 | 3788 | int status; |
328d5752 | 3789 | enum ocfs2_contig_type ret = CONTIG_NONE; |
ad5a4d70 TM |
3790 | u32 left_cpos, right_cpos; |
3791 | struct ocfs2_extent_rec *rec = NULL; | |
3792 | struct ocfs2_extent_list *new_el; | |
3793 | struct ocfs2_path *left_path = NULL, *right_path = NULL; | |
3794 | struct buffer_head *bh; | |
3795 | struct ocfs2_extent_block *eb; | |
3796 | ||
3797 | if (index > 0) { | |
3798 | rec = &el->l_recs[index - 1]; | |
3799 | } else if (path->p_tree_depth > 0) { | |
3800 | status = ocfs2_find_cpos_for_left_leaf(inode->i_sb, | |
3801 | path, &left_cpos); | |
3802 | if (status) | |
3803 | goto out; | |
3804 | ||
3805 | if (left_cpos != 0) { | |
3806 | left_path = ocfs2_new_path(path_root_bh(path), | |
3807 | path_root_el(path)); | |
3808 | if (!left_path) | |
3809 | goto out; | |
3810 | ||
3811 | status = ocfs2_find_path(inode, left_path, left_cpos); | |
3812 | if (status) | |
3813 | goto out; | |
3814 | ||
3815 | new_el = path_leaf_el(left_path); | |
3816 | ||
3817 | if (le16_to_cpu(new_el->l_next_free_rec) != | |
3818 | le16_to_cpu(new_el->l_count)) { | |
3819 | bh = path_leaf_bh(left_path); | |
3820 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
3821 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, | |
3822 | eb); | |
3823 | goto out; | |
3824 | } | |
3825 | rec = &new_el->l_recs[ | |
3826 | le16_to_cpu(new_el->l_next_free_rec) - 1]; | |
3827 | } | |
3828 | } | |
328d5752 MF |
3829 | |
3830 | /* | |
3831 | * We're careful to check for an empty extent record here - | |
3832 | * the merge code will know what to do if it sees one. | |
3833 | */ | |
ad5a4d70 | 3834 | if (rec) { |
328d5752 MF |
3835 | if (index == 1 && ocfs2_is_empty_extent(rec)) { |
3836 | if (split_rec->e_cpos == el->l_recs[index].e_cpos) | |
3837 | ret = CONTIG_RIGHT; | |
3838 | } else { | |
3839 | ret = ocfs2_extent_contig(inode, rec, split_rec); | |
3840 | } | |
3841 | } | |
3842 | ||
ad5a4d70 TM |
3843 | rec = NULL; |
3844 | if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) | |
3845 | rec = &el->l_recs[index + 1]; | |
3846 | else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && | |
3847 | path->p_tree_depth > 0) { | |
3848 | status = ocfs2_find_cpos_for_right_leaf(inode->i_sb, | |
3849 | path, &right_cpos); | |
3850 | if (status) | |
3851 | goto out; | |
3852 | ||
3853 | if (right_cpos == 0) | |
3854 | goto out; | |
3855 | ||
3856 | right_path = ocfs2_new_path(path_root_bh(path), | |
3857 | path_root_el(path)); | |
3858 | if (!right_path) | |
3859 | goto out; | |
3860 | ||
3861 | status = ocfs2_find_path(inode, right_path, right_cpos); | |
3862 | if (status) | |
3863 | goto out; | |
3864 | ||
3865 | new_el = path_leaf_el(right_path); | |
3866 | rec = &new_el->l_recs[0]; | |
3867 | if (ocfs2_is_empty_extent(rec)) { | |
3868 | if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { | |
3869 | bh = path_leaf_bh(right_path); | |
3870 | eb = (struct ocfs2_extent_block *)bh->b_data; | |
3871 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, | |
3872 | eb); | |
3873 | goto out; | |
3874 | } | |
3875 | rec = &new_el->l_recs[1]; | |
3876 | } | |
3877 | } | |
3878 | ||
3879 | if (rec) { | |
328d5752 MF |
3880 | enum ocfs2_contig_type contig_type; |
3881 | ||
328d5752 MF |
3882 | contig_type = ocfs2_extent_contig(inode, rec, split_rec); |
3883 | ||
3884 | if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) | |
3885 | ret = CONTIG_LEFTRIGHT; | |
3886 | else if (ret == CONTIG_NONE) | |
3887 | ret = contig_type; | |
3888 | } | |
3889 | ||
ad5a4d70 TM |
3890 | out: |
3891 | if (left_path) | |
3892 | ocfs2_free_path(left_path); | |
3893 | if (right_path) | |
3894 | ocfs2_free_path(right_path); | |
3895 | ||
328d5752 MF |
3896 | return ret; |
3897 | } | |
3898 | ||
dcd0538f MF |
3899 | static void ocfs2_figure_contig_type(struct inode *inode, |
3900 | struct ocfs2_insert_type *insert, | |
3901 | struct ocfs2_extent_list *el, | |
3902 | struct ocfs2_extent_rec *insert_rec) | |
3903 | { | |
3904 | int i; | |
3905 | enum ocfs2_contig_type contig_type = CONTIG_NONE; | |
3906 | ||
e48edee2 MF |
3907 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
3908 | ||
dcd0538f MF |
3909 | for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { |
3910 | contig_type = ocfs2_extent_contig(inode, &el->l_recs[i], | |
3911 | insert_rec); | |
3912 | if (contig_type != CONTIG_NONE) { | |
3913 | insert->ins_contig_index = i; | |
3914 | break; | |
3915 | } | |
3916 | } | |
3917 | insert->ins_contig = contig_type; | |
3918 | } | |
3919 | ||
3920 | /* | |
3921 | * This should only be called against the righmost leaf extent list. | |
3922 | * | |
3923 | * ocfs2_figure_appending_type() will figure out whether we'll have to | |
3924 | * insert at the tail of the rightmost leaf. | |
3925 | * | |
3926 | * This should also work against the dinode list for tree's with 0 | |
3927 | * depth. If we consider the dinode list to be the rightmost leaf node | |
3928 | * then the logic here makes sense. | |
3929 | */ | |
3930 | static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert, | |
3931 | struct ocfs2_extent_list *el, | |
3932 | struct ocfs2_extent_rec *insert_rec) | |
3933 | { | |
3934 | int i; | |
3935 | u32 cpos = le32_to_cpu(insert_rec->e_cpos); | |
3936 | struct ocfs2_extent_rec *rec; | |
3937 | ||
3938 | insert->ins_appending = APPEND_NONE; | |
3939 | ||
e48edee2 | 3940 | BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); |
dcd0538f MF |
3941 | |
3942 | if (!el->l_next_free_rec) | |
3943 | goto set_tail_append; | |
3944 | ||
3945 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
3946 | /* Were all records empty? */ | |
3947 | if (le16_to_cpu(el->l_next_free_rec) == 1) | |
3948 | goto set_tail_append; | |
3949 | } | |
3950 | ||
3951 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
3952 | rec = &el->l_recs[i]; | |
3953 | ||
e48edee2 MF |
3954 | if (cpos >= |
3955 | (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters))) | |
dcd0538f MF |
3956 | goto set_tail_append; |
3957 | ||
3958 | return; | |
3959 | ||
3960 | set_tail_append: | |
3961 | insert->ins_appending = APPEND_TAIL; | |
3962 | } | |
3963 | ||
3964 | /* | |
3965 | * Helper function called at the begining of an insert. | |
3966 | * | |
3967 | * This computes a few things that are commonly used in the process of | |
3968 | * inserting into the btree: | |
3969 | * - Whether the new extent is contiguous with an existing one. | |
3970 | * - The current tree depth. | |
3971 | * - Whether the insert is an appending one. | |
3972 | * - The total # of free records in the tree. | |
3973 | * | |
3974 | * All of the information is stored on the ocfs2_insert_type | |
3975 | * structure. | |
3976 | */ | |
3977 | static int ocfs2_figure_insert_type(struct inode *inode, | |
3978 | struct buffer_head *di_bh, | |
3979 | struct buffer_head **last_eb_bh, | |
3980 | struct ocfs2_extent_rec *insert_rec, | |
c77534f6 | 3981 | int *free_records, |
dcd0538f MF |
3982 | struct ocfs2_insert_type *insert) |
3983 | { | |
3984 | int ret; | |
3985 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
3986 | struct ocfs2_extent_block *eb; | |
3987 | struct ocfs2_extent_list *el; | |
3988 | struct ocfs2_path *path = NULL; | |
3989 | struct buffer_head *bh = NULL; | |
3990 | ||
328d5752 MF |
3991 | insert->ins_split = SPLIT_NONE; |
3992 | ||
dcd0538f MF |
3993 | el = &di->id2.i_list; |
3994 | insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth); | |
3995 | ||
3996 | if (el->l_tree_depth) { | |
3997 | /* | |
3998 | * If we have tree depth, we read in the | |
3999 | * rightmost extent block ahead of time as | |
4000 | * ocfs2_figure_insert_type() and ocfs2_add_branch() | |
4001 | * may want it later. | |
4002 | */ | |
4003 | ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), | |
4004 | le64_to_cpu(di->i_last_eb_blk), &bh, | |
4005 | OCFS2_BH_CACHED, inode); | |
4006 | if (ret) { | |
4007 | mlog_exit(ret); | |
4008 | goto out; | |
4009 | } | |
ccd979bd | 4010 | eb = (struct ocfs2_extent_block *) bh->b_data; |
ccd979bd | 4011 | el = &eb->h_list; |
dcd0538f | 4012 | } |
ccd979bd | 4013 | |
dcd0538f MF |
4014 | /* |
4015 | * Unless we have a contiguous insert, we'll need to know if | |
4016 | * there is room left in our allocation tree for another | |
4017 | * extent record. | |
4018 | * | |
4019 | * XXX: This test is simplistic, we can search for empty | |
4020 | * extent records too. | |
4021 | */ | |
c77534f6 | 4022 | *free_records = le16_to_cpu(el->l_count) - |
dcd0538f MF |
4023 | le16_to_cpu(el->l_next_free_rec); |
4024 | ||
4025 | if (!insert->ins_tree_depth) { | |
4026 | ocfs2_figure_contig_type(inode, insert, el, insert_rec); | |
4027 | ocfs2_figure_appending_type(insert, el, insert_rec); | |
4028 | return 0; | |
ccd979bd MF |
4029 | } |
4030 | ||
dcd0538f MF |
4031 | path = ocfs2_new_inode_path(di_bh); |
4032 | if (!path) { | |
4033 | ret = -ENOMEM; | |
4034 | mlog_errno(ret); | |
4035 | goto out; | |
4036 | } | |
ccd979bd | 4037 | |
dcd0538f MF |
4038 | /* |
4039 | * In the case that we're inserting past what the tree | |
4040 | * currently accounts for, ocfs2_find_path() will return for | |
4041 | * us the rightmost tree path. This is accounted for below in | |
4042 | * the appending code. | |
4043 | */ | |
4044 | ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos)); | |
4045 | if (ret) { | |
4046 | mlog_errno(ret); | |
4047 | goto out; | |
4048 | } | |
ccd979bd | 4049 | |
dcd0538f MF |
4050 | el = path_leaf_el(path); |
4051 | ||
4052 | /* | |
4053 | * Now that we have the path, there's two things we want to determine: | |
4054 | * 1) Contiguousness (also set contig_index if this is so) | |
4055 | * | |
4056 | * 2) Are we doing an append? We can trivially break this up | |
4057 | * into two types of appends: simple record append, or a | |
4058 | * rotate inside the tail leaf. | |
4059 | */ | |
4060 | ocfs2_figure_contig_type(inode, insert, el, insert_rec); | |
4061 | ||
4062 | /* | |
4063 | * The insert code isn't quite ready to deal with all cases of | |
4064 | * left contiguousness. Specifically, if it's an insert into | |
4065 | * the 1st record in a leaf, it will require the adjustment of | |
e48edee2 | 4066 | * cluster count on the last record of the path directly to it's |
dcd0538f MF |
4067 | * left. For now, just catch that case and fool the layers |
4068 | * above us. This works just fine for tree_depth == 0, which | |
4069 | * is why we allow that above. | |
4070 | */ | |
4071 | if (insert->ins_contig == CONTIG_LEFT && | |
4072 | insert->ins_contig_index == 0) | |
4073 | insert->ins_contig = CONTIG_NONE; | |
4074 | ||
4075 | /* | |
4076 | * Ok, so we can simply compare against last_eb to figure out | |
4077 | * whether the path doesn't exist. This will only happen in | |
4078 | * the case that we're doing a tail append, so maybe we can | |
4079 | * take advantage of that information somehow. | |
4080 | */ | |
4081 | if (le64_to_cpu(di->i_last_eb_blk) == path_leaf_bh(path)->b_blocknr) { | |
4082 | /* | |
4083 | * Ok, ocfs2_find_path() returned us the rightmost | |
4084 | * tree path. This might be an appending insert. There are | |
4085 | * two cases: | |
4086 | * 1) We're doing a true append at the tail: | |
4087 | * -This might even be off the end of the leaf | |
4088 | * 2) We're "appending" by rotating in the tail | |
4089 | */ | |
4090 | ocfs2_figure_appending_type(insert, el, insert_rec); | |
4091 | } | |
4092 | ||
4093 | out: | |
4094 | ocfs2_free_path(path); | |
4095 | ||
4096 | if (ret == 0) | |
4097 | *last_eb_bh = bh; | |
4098 | else | |
4099 | brelse(bh); | |
4100 | return ret; | |
ccd979bd MF |
4101 | } |
4102 | ||
dcd0538f MF |
4103 | /* |
4104 | * Insert an extent into an inode btree. | |
4105 | * | |
4106 | * The caller needs to update fe->i_clusters | |
4107 | */ | |
ccd979bd | 4108 | int ocfs2_insert_extent(struct ocfs2_super *osb, |
1fabe148 | 4109 | handle_t *handle, |
ccd979bd MF |
4110 | struct inode *inode, |
4111 | struct buffer_head *fe_bh, | |
dcd0538f | 4112 | u32 cpos, |
ccd979bd MF |
4113 | u64 start_blk, |
4114 | u32 new_clusters, | |
2ae99a60 | 4115 | u8 flags, |
ccd979bd MF |
4116 | struct ocfs2_alloc_context *meta_ac) |
4117 | { | |
c3afcbb3 | 4118 | int status; |
c77534f6 | 4119 | int uninitialized_var(free_records); |
ccd979bd | 4120 | struct buffer_head *last_eb_bh = NULL; |
dcd0538f MF |
4121 | struct ocfs2_insert_type insert = {0, }; |
4122 | struct ocfs2_extent_rec rec; | |
4123 | ||
1afc32b9 MF |
4124 | BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL); |
4125 | ||
dcd0538f MF |
4126 | mlog(0, "add %u clusters at position %u to inode %llu\n", |
4127 | new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
4128 | ||
4129 | mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && | |
4130 | (OCFS2_I(inode)->ip_clusters != cpos), | |
4131 | "Device %s, asking for sparse allocation: inode %llu, " | |
4132 | "cpos %u, clusters %u\n", | |
4133 | osb->dev_str, | |
4134 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, | |
4135 | OCFS2_I(inode)->ip_clusters); | |
4136 | ||
e48edee2 | 4137 | memset(&rec, 0, sizeof(rec)); |
dcd0538f MF |
4138 | rec.e_cpos = cpu_to_le32(cpos); |
4139 | rec.e_blkno = cpu_to_le64(start_blk); | |
e48edee2 | 4140 | rec.e_leaf_clusters = cpu_to_le16(new_clusters); |
2ae99a60 | 4141 | rec.e_flags = flags; |
dcd0538f MF |
4142 | |
4143 | status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec, | |
c77534f6 | 4144 | &free_records, &insert); |
dcd0538f MF |
4145 | if (status < 0) { |
4146 | mlog_errno(status); | |
4147 | goto bail; | |
ccd979bd MF |
4148 | } |
4149 | ||
dcd0538f MF |
4150 | mlog(0, "Insert.appending: %u, Insert.Contig: %u, " |
4151 | "Insert.contig_index: %d, Insert.free_records: %d, " | |
4152 | "Insert.tree_depth: %d\n", | |
4153 | insert.ins_appending, insert.ins_contig, insert.ins_contig_index, | |
c77534f6 | 4154 | free_records, insert.ins_tree_depth); |
ccd979bd | 4155 | |
c77534f6 | 4156 | if (insert.ins_contig == CONTIG_NONE && free_records == 0) { |
c3afcbb3 | 4157 | status = ocfs2_grow_tree(inode, handle, fe_bh, |
328d5752 | 4158 | &insert.ins_tree_depth, &last_eb_bh, |
c3afcbb3 MF |
4159 | meta_ac); |
4160 | if (status) { | |
ccd979bd MF |
4161 | mlog_errno(status); |
4162 | goto bail; | |
4163 | } | |
ccd979bd MF |
4164 | } |
4165 | ||
dcd0538f MF |
4166 | /* Finally, we can add clusters. This might rotate the tree for us. */ |
4167 | status = ocfs2_do_insert_extent(inode, handle, fe_bh, &rec, &insert); | |
ccd979bd MF |
4168 | if (status < 0) |
4169 | mlog_errno(status); | |
83418978 MF |
4170 | else |
4171 | ocfs2_extent_map_insert_rec(inode, &rec); | |
ccd979bd MF |
4172 | |
4173 | bail: | |
ccd979bd MF |
4174 | if (last_eb_bh) |
4175 | brelse(last_eb_bh); | |
4176 | ||
4177 | mlog_exit(status); | |
4178 | return status; | |
4179 | } | |
4180 | ||
328d5752 MF |
4181 | static void ocfs2_make_right_split_rec(struct super_block *sb, |
4182 | struct ocfs2_extent_rec *split_rec, | |
4183 | u32 cpos, | |
4184 | struct ocfs2_extent_rec *rec) | |
4185 | { | |
4186 | u32 rec_cpos = le32_to_cpu(rec->e_cpos); | |
4187 | u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters); | |
4188 | ||
4189 | memset(split_rec, 0, sizeof(struct ocfs2_extent_rec)); | |
4190 | ||
4191 | split_rec->e_cpos = cpu_to_le32(cpos); | |
4192 | split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos); | |
4193 | ||
4194 | split_rec->e_blkno = rec->e_blkno; | |
4195 | le64_add_cpu(&split_rec->e_blkno, | |
4196 | ocfs2_clusters_to_blocks(sb, cpos - rec_cpos)); | |
4197 | ||
4198 | split_rec->e_flags = rec->e_flags; | |
4199 | } | |
4200 | ||
4201 | static int ocfs2_split_and_insert(struct inode *inode, | |
4202 | handle_t *handle, | |
4203 | struct ocfs2_path *path, | |
4204 | struct buffer_head *di_bh, | |
4205 | struct buffer_head **last_eb_bh, | |
4206 | int split_index, | |
4207 | struct ocfs2_extent_rec *orig_split_rec, | |
4208 | struct ocfs2_alloc_context *meta_ac) | |
4209 | { | |
4210 | int ret = 0, depth; | |
4211 | unsigned int insert_range, rec_range, do_leftright = 0; | |
4212 | struct ocfs2_extent_rec tmprec; | |
4213 | struct ocfs2_extent_list *rightmost_el; | |
4214 | struct ocfs2_extent_rec rec; | |
4215 | struct ocfs2_extent_rec split_rec = *orig_split_rec; | |
4216 | struct ocfs2_insert_type insert; | |
4217 | struct ocfs2_extent_block *eb; | |
4218 | struct ocfs2_dinode *di; | |
4219 | ||
4220 | leftright: | |
4221 | /* | |
4222 | * Store a copy of the record on the stack - it might move | |
4223 | * around as the tree is manipulated below. | |
4224 | */ | |
4225 | rec = path_leaf_el(path)->l_recs[split_index]; | |
4226 | ||
4227 | di = (struct ocfs2_dinode *)di_bh->b_data; | |
4228 | rightmost_el = &di->id2.i_list; | |
4229 | ||
4230 | depth = le16_to_cpu(rightmost_el->l_tree_depth); | |
4231 | if (depth) { | |
4232 | BUG_ON(!(*last_eb_bh)); | |
4233 | eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; | |
4234 | rightmost_el = &eb->h_list; | |
4235 | } | |
4236 | ||
4237 | if (le16_to_cpu(rightmost_el->l_next_free_rec) == | |
4238 | le16_to_cpu(rightmost_el->l_count)) { | |
328d5752 MF |
4239 | ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, last_eb_bh, |
4240 | meta_ac); | |
4241 | if (ret) { | |
4242 | mlog_errno(ret); | |
4243 | goto out; | |
4244 | } | |
328d5752 MF |
4245 | } |
4246 | ||
4247 | memset(&insert, 0, sizeof(struct ocfs2_insert_type)); | |
4248 | insert.ins_appending = APPEND_NONE; | |
4249 | insert.ins_contig = CONTIG_NONE; | |
328d5752 MF |
4250 | insert.ins_tree_depth = depth; |
4251 | ||
4252 | insert_range = le32_to_cpu(split_rec.e_cpos) + | |
4253 | le16_to_cpu(split_rec.e_leaf_clusters); | |
4254 | rec_range = le32_to_cpu(rec.e_cpos) + | |
4255 | le16_to_cpu(rec.e_leaf_clusters); | |
4256 | ||
4257 | if (split_rec.e_cpos == rec.e_cpos) { | |
4258 | insert.ins_split = SPLIT_LEFT; | |
4259 | } else if (insert_range == rec_range) { | |
4260 | insert.ins_split = SPLIT_RIGHT; | |
4261 | } else { | |
4262 | /* | |
4263 | * Left/right split. We fake this as a right split | |
4264 | * first and then make a second pass as a left split. | |
4265 | */ | |
4266 | insert.ins_split = SPLIT_RIGHT; | |
4267 | ||
4268 | ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range, | |
4269 | &rec); | |
4270 | ||
4271 | split_rec = tmprec; | |
4272 | ||
4273 | BUG_ON(do_leftright); | |
4274 | do_leftright = 1; | |
4275 | } | |
4276 | ||
4277 | ret = ocfs2_do_insert_extent(inode, handle, di_bh, &split_rec, | |
4278 | &insert); | |
4279 | if (ret) { | |
4280 | mlog_errno(ret); | |
4281 | goto out; | |
4282 | } | |
4283 | ||
4284 | if (do_leftright == 1) { | |
4285 | u32 cpos; | |
4286 | struct ocfs2_extent_list *el; | |
4287 | ||
4288 | do_leftright++; | |
4289 | split_rec = *orig_split_rec; | |
4290 | ||
4291 | ocfs2_reinit_path(path, 1); | |
4292 | ||
4293 | cpos = le32_to_cpu(split_rec.e_cpos); | |
4294 | ret = ocfs2_find_path(inode, path, cpos); | |
4295 | if (ret) { | |
4296 | mlog_errno(ret); | |
4297 | goto out; | |
4298 | } | |
4299 | ||
4300 | el = path_leaf_el(path); | |
4301 | split_index = ocfs2_search_extent_list(el, cpos); | |
4302 | goto leftright; | |
4303 | } | |
4304 | out: | |
4305 | ||
4306 | return ret; | |
4307 | } | |
4308 | ||
4309 | /* | |
4310 | * Mark part or all of the extent record at split_index in the leaf | |
4311 | * pointed to by path as written. This removes the unwritten | |
4312 | * extent flag. | |
4313 | * | |
4314 | * Care is taken to handle contiguousness so as to not grow the tree. | |
4315 | * | |
4316 | * meta_ac is not strictly necessary - we only truly need it if growth | |
4317 | * of the tree is required. All other cases will degrade into a less | |
4318 | * optimal tree layout. | |
4319 | * | |
4320 | * last_eb_bh should be the rightmost leaf block for any inode with a | |
4321 | * btree. Since a split may grow the tree or a merge might shrink it, the caller cannot trust the contents of that buffer after this call. | |
4322 | * | |
4323 | * This code is optimized for readability - several passes might be | |
4324 | * made over certain portions of the tree. All of those blocks will | |
4325 | * have been brought into cache (and pinned via the journal), so the | |
4326 | * extra overhead is not expressed in terms of disk reads. | |
4327 | */ | |
4328 | static int __ocfs2_mark_extent_written(struct inode *inode, | |
4329 | struct buffer_head *di_bh, | |
4330 | handle_t *handle, | |
4331 | struct ocfs2_path *path, | |
4332 | int split_index, | |
4333 | struct ocfs2_extent_rec *split_rec, | |
4334 | struct ocfs2_alloc_context *meta_ac, | |
4335 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
4336 | { | |
4337 | int ret = 0; | |
4338 | struct ocfs2_extent_list *el = path_leaf_el(path); | |
e8aed345 | 4339 | struct buffer_head *last_eb_bh = NULL; |
328d5752 MF |
4340 | struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; |
4341 | struct ocfs2_merge_ctxt ctxt; | |
4342 | struct ocfs2_extent_list *rightmost_el; | |
4343 | ||
3cf0c507 | 4344 | if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) { |
328d5752 MF |
4345 | ret = -EIO; |
4346 | mlog_errno(ret); | |
4347 | goto out; | |
4348 | } | |
4349 | ||
4350 | if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || | |
4351 | ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < | |
4352 | (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { | |
4353 | ret = -EIO; | |
4354 | mlog_errno(ret); | |
4355 | goto out; | |
4356 | } | |
4357 | ||
ad5a4d70 | 4358 | ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el, |
328d5752 MF |
4359 | split_index, |
4360 | split_rec); | |
4361 | ||
4362 | /* | |
4363 | * The core merge / split code wants to know how much room is | |
4364 | * left in this inodes allocation tree, so we pass the | |
4365 | * rightmost extent list. | |
4366 | */ | |
4367 | if (path->p_tree_depth) { | |
4368 | struct ocfs2_extent_block *eb; | |
4369 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
4370 | ||
4371 | ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), | |
4372 | le64_to_cpu(di->i_last_eb_blk), | |
4373 | &last_eb_bh, OCFS2_BH_CACHED, inode); | |
4374 | if (ret) { | |
4375 | mlog_exit(ret); | |
4376 | goto out; | |
4377 | } | |
4378 | ||
4379 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
4380 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
4381 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
4382 | ret = -EROFS; | |
4383 | goto out; | |
4384 | } | |
4385 | ||
4386 | rightmost_el = &eb->h_list; | |
4387 | } else | |
4388 | rightmost_el = path_root_el(path); | |
4389 | ||
328d5752 MF |
4390 | if (rec->e_cpos == split_rec->e_cpos && |
4391 | rec->e_leaf_clusters == split_rec->e_leaf_clusters) | |
4392 | ctxt.c_split_covers_rec = 1; | |
4393 | else | |
4394 | ctxt.c_split_covers_rec = 0; | |
4395 | ||
4396 | ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); | |
4397 | ||
015452b1 MF |
4398 | mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", |
4399 | split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, | |
4400 | ctxt.c_split_covers_rec); | |
328d5752 MF |
4401 | |
4402 | if (ctxt.c_contig_type == CONTIG_NONE) { | |
4403 | if (ctxt.c_split_covers_rec) | |
4404 | el->l_recs[split_index] = *split_rec; | |
4405 | else | |
4406 | ret = ocfs2_split_and_insert(inode, handle, path, di_bh, | |
4407 | &last_eb_bh, split_index, | |
4408 | split_rec, meta_ac); | |
4409 | if (ret) | |
4410 | mlog_errno(ret); | |
4411 | } else { | |
4412 | ret = ocfs2_try_to_merge_extent(inode, handle, path, | |
4413 | split_index, split_rec, | |
4414 | dealloc, &ctxt); | |
4415 | if (ret) | |
4416 | mlog_errno(ret); | |
4417 | } | |
4418 | ||
328d5752 MF |
4419 | out: |
4420 | brelse(last_eb_bh); | |
4421 | return ret; | |
4422 | } | |
4423 | ||
4424 | /* | |
4425 | * Mark the already-existing extent at cpos as written for len clusters. | |
4426 | * | |
4427 | * If the existing extent is larger than the request, initiate a | |
4428 | * split. An attempt will be made at merging with adjacent extents. | |
4429 | * | |
4430 | * The caller is responsible for passing down meta_ac if we'll need it. | |
4431 | */ | |
4432 | int ocfs2_mark_extent_written(struct inode *inode, struct buffer_head *di_bh, | |
4433 | handle_t *handle, u32 cpos, u32 len, u32 phys, | |
4434 | struct ocfs2_alloc_context *meta_ac, | |
4435 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
4436 | { | |
4437 | int ret, index; | |
4438 | u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys); | |
4439 | struct ocfs2_extent_rec split_rec; | |
4440 | struct ocfs2_path *left_path = NULL; | |
4441 | struct ocfs2_extent_list *el; | |
4442 | ||
4443 | mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n", | |
4444 | inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno); | |
4445 | ||
4446 | if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { | |
4447 | ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " | |
4448 | "that are being written to, but the feature bit " | |
4449 | "is not set in the super block.", | |
4450 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | |
4451 | ret = -EROFS; | |
4452 | goto out; | |
4453 | } | |
4454 | ||
4455 | /* | |
4456 | * XXX: This should be fixed up so that we just re-insert the | |
4457 | * next extent records. | |
4458 | */ | |
4459 | ocfs2_extent_map_trunc(inode, 0); | |
4460 | ||
4461 | left_path = ocfs2_new_inode_path(di_bh); | |
4462 | if (!left_path) { | |
4463 | ret = -ENOMEM; | |
4464 | mlog_errno(ret); | |
4465 | goto out; | |
4466 | } | |
4467 | ||
4468 | ret = ocfs2_find_path(inode, left_path, cpos); | |
4469 | if (ret) { | |
4470 | mlog_errno(ret); | |
4471 | goto out; | |
4472 | } | |
4473 | el = path_leaf_el(left_path); | |
4474 | ||
4475 | index = ocfs2_search_extent_list(el, cpos); | |
4476 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
4477 | ocfs2_error(inode->i_sb, | |
4478 | "Inode %llu has an extent at cpos %u which can no " | |
4479 | "longer be found.\n", | |
4480 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); | |
4481 | ret = -EROFS; | |
4482 | goto out; | |
4483 | } | |
4484 | ||
4485 | memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); | |
4486 | split_rec.e_cpos = cpu_to_le32(cpos); | |
4487 | split_rec.e_leaf_clusters = cpu_to_le16(len); | |
4488 | split_rec.e_blkno = cpu_to_le64(start_blkno); | |
4489 | split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags; | |
4490 | split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN; | |
4491 | ||
4492 | ret = __ocfs2_mark_extent_written(inode, di_bh, handle, left_path, | |
4493 | index, &split_rec, meta_ac, dealloc); | |
4494 | if (ret) | |
4495 | mlog_errno(ret); | |
4496 | ||
4497 | out: | |
4498 | ocfs2_free_path(left_path); | |
4499 | return ret; | |
4500 | } | |
4501 | ||
d0c7d708 MF |
4502 | static int ocfs2_split_tree(struct inode *inode, struct buffer_head *di_bh, |
4503 | handle_t *handle, struct ocfs2_path *path, | |
4504 | int index, u32 new_range, | |
4505 | struct ocfs2_alloc_context *meta_ac) | |
4506 | { | |
4507 | int ret, depth, credits = handle->h_buffer_credits; | |
4508 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
4509 | struct buffer_head *last_eb_bh = NULL; | |
4510 | struct ocfs2_extent_block *eb; | |
4511 | struct ocfs2_extent_list *rightmost_el, *el; | |
4512 | struct ocfs2_extent_rec split_rec; | |
4513 | struct ocfs2_extent_rec *rec; | |
4514 | struct ocfs2_insert_type insert; | |
4515 | ||
4516 | /* | |
4517 | * Setup the record to split before we grow the tree. | |
4518 | */ | |
4519 | el = path_leaf_el(path); | |
4520 | rec = &el->l_recs[index]; | |
4521 | ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec); | |
4522 | ||
4523 | depth = path->p_tree_depth; | |
4524 | if (depth > 0) { | |
4525 | ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), | |
4526 | le64_to_cpu(di->i_last_eb_blk), | |
4527 | &last_eb_bh, OCFS2_BH_CACHED, inode); | |
4528 | if (ret < 0) { | |
4529 | mlog_errno(ret); | |
4530 | goto out; | |
4531 | } | |
4532 | ||
4533 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
4534 | rightmost_el = &eb->h_list; | |
4535 | } else | |
4536 | rightmost_el = path_leaf_el(path); | |
4537 | ||
4538 | credits += path->p_tree_depth + ocfs2_extend_meta_needed(di); | |
4539 | ret = ocfs2_extend_trans(handle, credits); | |
4540 | if (ret) { | |
4541 | mlog_errno(ret); | |
4542 | goto out; | |
4543 | } | |
4544 | ||
4545 | if (le16_to_cpu(rightmost_el->l_next_free_rec) == | |
4546 | le16_to_cpu(rightmost_el->l_count)) { | |
d0c7d708 MF |
4547 | ret = ocfs2_grow_tree(inode, handle, di_bh, &depth, &last_eb_bh, |
4548 | meta_ac); | |
4549 | if (ret) { | |
4550 | mlog_errno(ret); | |
4551 | goto out; | |
4552 | } | |
d0c7d708 MF |
4553 | } |
4554 | ||
4555 | memset(&insert, 0, sizeof(struct ocfs2_insert_type)); | |
4556 | insert.ins_appending = APPEND_NONE; | |
4557 | insert.ins_contig = CONTIG_NONE; | |
4558 | insert.ins_split = SPLIT_RIGHT; | |
d0c7d708 MF |
4559 | insert.ins_tree_depth = depth; |
4560 | ||
4561 | ret = ocfs2_do_insert_extent(inode, handle, di_bh, &split_rec, &insert); | |
4562 | if (ret) | |
4563 | mlog_errno(ret); | |
4564 | ||
4565 | out: | |
4566 | brelse(last_eb_bh); | |
4567 | return ret; | |
4568 | } | |
4569 | ||
4570 | static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, | |
4571 | struct ocfs2_path *path, int index, | |
4572 | struct ocfs2_cached_dealloc_ctxt *dealloc, | |
4573 | u32 cpos, u32 len) | |
4574 | { | |
4575 | int ret; | |
4576 | u32 left_cpos, rec_range, trunc_range; | |
4577 | int wants_rotate = 0, is_rightmost_tree_rec = 0; | |
4578 | struct super_block *sb = inode->i_sb; | |
4579 | struct ocfs2_path *left_path = NULL; | |
4580 | struct ocfs2_extent_list *el = path_leaf_el(path); | |
4581 | struct ocfs2_extent_rec *rec; | |
4582 | struct ocfs2_extent_block *eb; | |
4583 | ||
4584 | if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { | |
4585 | ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc); | |
4586 | if (ret) { | |
4587 | mlog_errno(ret); | |
4588 | goto out; | |
4589 | } | |
4590 | ||
4591 | index--; | |
4592 | } | |
4593 | ||
4594 | if (index == (le16_to_cpu(el->l_next_free_rec) - 1) && | |
4595 | path->p_tree_depth) { | |
4596 | /* | |
4597 | * Check whether this is the rightmost tree record. If | |
4598 | * we remove all of this record or part of its right | |
4599 | * edge then an update of the record lengths above it | |
4600 | * will be required. | |
4601 | */ | |
4602 | eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; | |
4603 | if (eb->h_next_leaf_blk == 0) | |
4604 | is_rightmost_tree_rec = 1; | |
4605 | } | |
4606 | ||
4607 | rec = &el->l_recs[index]; | |
4608 | if (index == 0 && path->p_tree_depth && | |
4609 | le32_to_cpu(rec->e_cpos) == cpos) { | |
4610 | /* | |
4611 | * Changing the leftmost offset (via partial or whole | |
4612 | * record truncate) of an interior (or rightmost) path | |
4613 | * means we have to update the subtree that is formed | |
4614 | * by this leaf and the one to it's left. | |
4615 | * | |
4616 | * There are two cases we can skip: | |
4617 | * 1) Path is the leftmost one in our inode tree. | |
4618 | * 2) The leaf is rightmost and will be empty after | |
4619 | * we remove the extent record - the rotate code | |
4620 | * knows how to update the newly formed edge. | |
4621 | */ | |
4622 | ||
4623 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, | |
4624 | &left_cpos); | |
4625 | if (ret) { | |
4626 | mlog_errno(ret); | |
4627 | goto out; | |
4628 | } | |
4629 | ||
4630 | if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { | |
4631 | left_path = ocfs2_new_path(path_root_bh(path), | |
4632 | path_root_el(path)); | |
4633 | if (!left_path) { | |
4634 | ret = -ENOMEM; | |
4635 | mlog_errno(ret); | |
4636 | goto out; | |
4637 | } | |
4638 | ||
4639 | ret = ocfs2_find_path(inode, left_path, left_cpos); | |
4640 | if (ret) { | |
4641 | mlog_errno(ret); | |
4642 | goto out; | |
4643 | } | |
4644 | } | |
4645 | } | |
4646 | ||
4647 | ret = ocfs2_extend_rotate_transaction(handle, 0, | |
4648 | handle->h_buffer_credits, | |
4649 | path); | |
4650 | if (ret) { | |
4651 | mlog_errno(ret); | |
4652 | goto out; | |
4653 | } | |
4654 | ||
4655 | ret = ocfs2_journal_access_path(inode, handle, path); | |
4656 | if (ret) { | |
4657 | mlog_errno(ret); | |
4658 | goto out; | |
4659 | } | |
4660 | ||
4661 | ret = ocfs2_journal_access_path(inode, handle, left_path); | |
4662 | if (ret) { | |
4663 | mlog_errno(ret); | |
4664 | goto out; | |
4665 | } | |
4666 | ||
4667 | rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
4668 | trunc_range = cpos + len; | |
4669 | ||
4670 | if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) { | |
4671 | int next_free; | |
4672 | ||
4673 | memset(rec, 0, sizeof(*rec)); | |
4674 | ocfs2_cleanup_merge(el, index); | |
4675 | wants_rotate = 1; | |
4676 | ||
4677 | next_free = le16_to_cpu(el->l_next_free_rec); | |
4678 | if (is_rightmost_tree_rec && next_free > 1) { | |
4679 | /* | |
4680 | * We skip the edge update if this path will | |
4681 | * be deleted by the rotate code. | |
4682 | */ | |
4683 | rec = &el->l_recs[next_free - 1]; | |
4684 | ocfs2_adjust_rightmost_records(inode, handle, path, | |
4685 | rec); | |
4686 | } | |
4687 | } else if (le32_to_cpu(rec->e_cpos) == cpos) { | |
4688 | /* Remove leftmost portion of the record. */ | |
4689 | le32_add_cpu(&rec->e_cpos, len); | |
4690 | le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len)); | |
4691 | le16_add_cpu(&rec->e_leaf_clusters, -len); | |
4692 | } else if (rec_range == trunc_range) { | |
4693 | /* Remove rightmost portion of the record */ | |
4694 | le16_add_cpu(&rec->e_leaf_clusters, -len); | |
4695 | if (is_rightmost_tree_rec) | |
4696 | ocfs2_adjust_rightmost_records(inode, handle, path, rec); | |
4697 | } else { | |
4698 | /* Caller should have trapped this. */ | |
4699 | mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) " | |
4700 | "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
4701 | le32_to_cpu(rec->e_cpos), | |
4702 | le16_to_cpu(rec->e_leaf_clusters), cpos, len); | |
4703 | BUG(); | |
4704 | } | |
4705 | ||
4706 | if (left_path) { | |
4707 | int subtree_index; | |
4708 | ||
4709 | subtree_index = ocfs2_find_subtree_root(inode, left_path, path); | |
4710 | ocfs2_complete_edge_insert(inode, handle, left_path, path, | |
4711 | subtree_index); | |
4712 | } | |
4713 | ||
4714 | ocfs2_journal_dirty(handle, path_leaf_bh(path)); | |
4715 | ||
4716 | ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc); | |
4717 | if (ret) { | |
4718 | mlog_errno(ret); | |
4719 | goto out; | |
4720 | } | |
4721 | ||
4722 | out: | |
4723 | ocfs2_free_path(left_path); | |
4724 | return ret; | |
4725 | } | |
4726 | ||
063c4561 MF |
4727 | int ocfs2_remove_extent(struct inode *inode, struct buffer_head *di_bh, |
4728 | u32 cpos, u32 len, handle_t *handle, | |
4729 | struct ocfs2_alloc_context *meta_ac, | |
4730 | struct ocfs2_cached_dealloc_ctxt *dealloc) | |
d0c7d708 MF |
4731 | { |
4732 | int ret, index; | |
4733 | u32 rec_range, trunc_range; | |
4734 | struct ocfs2_extent_rec *rec; | |
4735 | struct ocfs2_extent_list *el; | |
4736 | struct ocfs2_path *path; | |
4737 | ||
4738 | ocfs2_extent_map_trunc(inode, 0); | |
4739 | ||
4740 | path = ocfs2_new_inode_path(di_bh); | |
4741 | if (!path) { | |
4742 | ret = -ENOMEM; | |
4743 | mlog_errno(ret); | |
4744 | goto out; | |
4745 | } | |
4746 | ||
4747 | ret = ocfs2_find_path(inode, path, cpos); | |
4748 | if (ret) { | |
4749 | mlog_errno(ret); | |
4750 | goto out; | |
4751 | } | |
4752 | ||
4753 | el = path_leaf_el(path); | |
4754 | index = ocfs2_search_extent_list(el, cpos); | |
4755 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
4756 | ocfs2_error(inode->i_sb, | |
4757 | "Inode %llu has an extent at cpos %u which can no " | |
4758 | "longer be found.\n", | |
4759 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); | |
4760 | ret = -EROFS; | |
4761 | goto out; | |
4762 | } | |
4763 | ||
4764 | /* | |
4765 | * We have 3 cases of extent removal: | |
4766 | * 1) Range covers the entire extent rec | |
4767 | * 2) Range begins or ends on one edge of the extent rec | |
4768 | * 3) Range is in the middle of the extent rec (no shared edges) | |
4769 | * | |
4770 | * For case 1 we remove the extent rec and left rotate to | |
4771 | * fill the hole. | |
4772 | * | |
4773 | * For case 2 we just shrink the existing extent rec, with a | |
4774 | * tree update if the shrinking edge is also the edge of an | |
4775 | * extent block. | |
4776 | * | |
4777 | * For case 3 we do a right split to turn the extent rec into | |
4778 | * something case 2 can handle. | |
4779 | */ | |
4780 | rec = &el->l_recs[index]; | |
4781 | rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); | |
4782 | trunc_range = cpos + len; | |
4783 | ||
4784 | BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); | |
4785 | ||
4786 | mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d " | |
4787 | "(cpos %u, len %u)\n", | |
4788 | (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index, | |
4789 | le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); | |
4790 | ||
4791 | if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { | |
4792 | ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, | |
4793 | cpos, len); | |
4794 | if (ret) { | |
4795 | mlog_errno(ret); | |
4796 | goto out; | |
4797 | } | |
4798 | } else { | |
4799 | ret = ocfs2_split_tree(inode, di_bh, handle, path, index, | |
4800 | trunc_range, meta_ac); | |
4801 | if (ret) { | |
4802 | mlog_errno(ret); | |
4803 | goto out; | |
4804 | } | |
4805 | ||
4806 | /* | |
4807 | * The split could have manipulated the tree enough to | |
4808 | * move the record location, so we have to look for it again. | |
4809 | */ | |
4810 | ocfs2_reinit_path(path, 1); | |
4811 | ||
4812 | ret = ocfs2_find_path(inode, path, cpos); | |
4813 | if (ret) { | |
4814 | mlog_errno(ret); | |
4815 | goto out; | |
4816 | } | |
4817 | ||
4818 | el = path_leaf_el(path); | |
4819 | index = ocfs2_search_extent_list(el, cpos); | |
4820 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | |
4821 | ocfs2_error(inode->i_sb, | |
4822 | "Inode %llu: split at cpos %u lost record.", | |
4823 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
4824 | cpos); | |
4825 | ret = -EROFS; | |
4826 | goto out; | |
4827 | } | |
4828 | ||
4829 | /* | |
4830 | * Double check our values here. If anything is fishy, | |
4831 | * it's easier to catch it at the top level. | |
4832 | */ | |
4833 | rec = &el->l_recs[index]; | |
4834 | rec_range = le32_to_cpu(rec->e_cpos) + | |
4835 | ocfs2_rec_clusters(el, rec); | |
4836 | if (rec_range != trunc_range) { | |
4837 | ocfs2_error(inode->i_sb, | |
4838 | "Inode %llu: error after split at cpos %u" | |
4839 | "trunc len %u, existing record is (%u,%u)", | |
4840 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
4841 | cpos, len, le32_to_cpu(rec->e_cpos), | |
4842 | ocfs2_rec_clusters(el, rec)); | |
4843 | ret = -EROFS; | |
4844 | goto out; | |
4845 | } | |
4846 | ||
4847 | ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, | |
4848 | cpos, len); | |
4849 | if (ret) { | |
4850 | mlog_errno(ret); | |
4851 | goto out; | |
4852 | } | |
4853 | } | |
4854 | ||
4855 | out: | |
4856 | ocfs2_free_path(path); | |
4857 | return ret; | |
4858 | } | |
4859 | ||
063c4561 | 4860 | int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) |
ccd979bd MF |
4861 | { |
4862 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
4863 | struct ocfs2_dinode *di; | |
4864 | struct ocfs2_truncate_log *tl; | |
4865 | ||
4866 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
4867 | tl = &di->id2.i_dealloc; | |
4868 | ||
4869 | mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count), | |
4870 | "slot %d, invalid truncate log parameters: used = " | |
4871 | "%u, count = %u\n", osb->slot_num, | |
4872 | le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count)); | |
4873 | return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count); | |
4874 | } | |
4875 | ||
4876 | static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, | |
4877 | unsigned int new_start) | |
4878 | { | |
4879 | unsigned int tail_index; | |
4880 | unsigned int current_tail; | |
4881 | ||
4882 | /* No records, nothing to coalesce */ | |
4883 | if (!le16_to_cpu(tl->tl_used)) | |
4884 | return 0; | |
4885 | ||
4886 | tail_index = le16_to_cpu(tl->tl_used) - 1; | |
4887 | current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start); | |
4888 | current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters); | |
4889 | ||
4890 | return current_tail == new_start; | |
4891 | } | |
4892 | ||
063c4561 MF |
4893 | int ocfs2_truncate_log_append(struct ocfs2_super *osb, |
4894 | handle_t *handle, | |
4895 | u64 start_blk, | |
4896 | unsigned int num_clusters) | |
ccd979bd MF |
4897 | { |
4898 | int status, index; | |
4899 | unsigned int start_cluster, tl_count; | |
4900 | struct inode *tl_inode = osb->osb_tl_inode; | |
4901 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
4902 | struct ocfs2_dinode *di; | |
4903 | struct ocfs2_truncate_log *tl; | |
4904 | ||
b0697053 MF |
4905 | mlog_entry("start_blk = %llu, num_clusters = %u\n", |
4906 | (unsigned long long)start_blk, num_clusters); | |
ccd979bd | 4907 | |
1b1dcc1b | 4908 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
ccd979bd MF |
4909 | |
4910 | start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); | |
4911 | ||
4912 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
4913 | tl = &di->id2.i_dealloc; | |
4914 | if (!OCFS2_IS_VALID_DINODE(di)) { | |
4915 | OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); | |
4916 | status = -EIO; | |
4917 | goto bail; | |
4918 | } | |
4919 | ||
4920 | tl_count = le16_to_cpu(tl->tl_count); | |
4921 | mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || | |
4922 | tl_count == 0, | |
b0697053 MF |
4923 | "Truncate record count on #%llu invalid " |
4924 | "wanted %u, actual %u\n", | |
4925 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, | |
ccd979bd MF |
4926 | ocfs2_truncate_recs_per_inode(osb->sb), |
4927 | le16_to_cpu(tl->tl_count)); | |
4928 | ||
4929 | /* Caller should have known to flush before calling us. */ | |
4930 | index = le16_to_cpu(tl->tl_used); | |
4931 | if (index >= tl_count) { | |
4932 | status = -ENOSPC; | |
4933 | mlog_errno(status); | |
4934 | goto bail; | |
4935 | } | |
4936 | ||
4937 | status = ocfs2_journal_access(handle, tl_inode, tl_bh, | |
4938 | OCFS2_JOURNAL_ACCESS_WRITE); | |
4939 | if (status < 0) { | |
4940 | mlog_errno(status); | |
4941 | goto bail; | |
4942 | } | |
4943 | ||
4944 | mlog(0, "Log truncate of %u clusters starting at cluster %u to " | |
b0697053 MF |
4945 | "%llu (index = %d)\n", num_clusters, start_cluster, |
4946 | (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); | |
ccd979bd MF |
4947 | |
4948 | if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { | |
4949 | /* | |
4950 | * Move index back to the record we are coalescing with. | |
4951 | * ocfs2_truncate_log_can_coalesce() guarantees nonzero | |
4952 | */ | |
4953 | index--; | |
4954 | ||
4955 | num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); | |
4956 | mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", | |
4957 | index, le32_to_cpu(tl->tl_recs[index].t_start), | |
4958 | num_clusters); | |
4959 | } else { | |
4960 | tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); | |
4961 | tl->tl_used = cpu_to_le16(index + 1); | |
4962 | } | |
4963 | tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); | |
4964 | ||
4965 | status = ocfs2_journal_dirty(handle, tl_bh); | |
4966 | if (status < 0) { | |
4967 | mlog_errno(status); | |
4968 | goto bail; | |
4969 | } | |
4970 | ||
4971 | bail: | |
4972 | mlog_exit(status); | |
4973 | return status; | |
4974 | } | |
4975 | ||
4976 | static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |
1fabe148 | 4977 | handle_t *handle, |
ccd979bd MF |
4978 | struct inode *data_alloc_inode, |
4979 | struct buffer_head *data_alloc_bh) | |
4980 | { | |
4981 | int status = 0; | |
4982 | int i; | |
4983 | unsigned int num_clusters; | |
4984 | u64 start_blk; | |
4985 | struct ocfs2_truncate_rec rec; | |
4986 | struct ocfs2_dinode *di; | |
4987 | struct ocfs2_truncate_log *tl; | |
4988 | struct inode *tl_inode = osb->osb_tl_inode; | |
4989 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
4990 | ||
4991 | mlog_entry_void(); | |
4992 | ||
4993 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
4994 | tl = &di->id2.i_dealloc; | |
4995 | i = le16_to_cpu(tl->tl_used) - 1; | |
4996 | while (i >= 0) { | |
4997 | /* Caller has given us at least enough credits to | |
4998 | * update the truncate log dinode */ | |
4999 | status = ocfs2_journal_access(handle, tl_inode, tl_bh, | |
5000 | OCFS2_JOURNAL_ACCESS_WRITE); | |
5001 | if (status < 0) { | |
5002 | mlog_errno(status); | |
5003 | goto bail; | |
5004 | } | |
5005 | ||
5006 | tl->tl_used = cpu_to_le16(i); | |
5007 | ||
5008 | status = ocfs2_journal_dirty(handle, tl_bh); | |
5009 | if (status < 0) { | |
5010 | mlog_errno(status); | |
5011 | goto bail; | |
5012 | } | |
5013 | ||
5014 | /* TODO: Perhaps we can calculate the bulk of the | |
5015 | * credits up front rather than extending like | |
5016 | * this. */ | |
5017 | status = ocfs2_extend_trans(handle, | |
5018 | OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); | |
5019 | if (status < 0) { | |
5020 | mlog_errno(status); | |
5021 | goto bail; | |
5022 | } | |
5023 | ||
5024 | rec = tl->tl_recs[i]; | |
5025 | start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, | |
5026 | le32_to_cpu(rec.t_start)); | |
5027 | num_clusters = le32_to_cpu(rec.t_clusters); | |
5028 | ||
5029 | /* if start_blk is not set, we ignore the record as | |
5030 | * invalid. */ | |
5031 | if (start_blk) { | |
5032 | mlog(0, "free record %d, start = %u, clusters = %u\n", | |
5033 | i, le32_to_cpu(rec.t_start), num_clusters); | |
5034 | ||
5035 | status = ocfs2_free_clusters(handle, data_alloc_inode, | |
5036 | data_alloc_bh, start_blk, | |
5037 | num_clusters); | |
5038 | if (status < 0) { | |
5039 | mlog_errno(status); | |
5040 | goto bail; | |
5041 | } | |
5042 | } | |
5043 | i--; | |
5044 | } | |
5045 | ||
5046 | bail: | |
5047 | mlog_exit(status); | |
5048 | return status; | |
5049 | } | |
5050 | ||
1b1dcc1b | 5051 | /* Expects you to already be holding tl_inode->i_mutex */ |
063c4561 | 5052 | int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) |
ccd979bd MF |
5053 | { |
5054 | int status; | |
5055 | unsigned int num_to_flush; | |
1fabe148 | 5056 | handle_t *handle; |
ccd979bd MF |
5057 | struct inode *tl_inode = osb->osb_tl_inode; |
5058 | struct inode *data_alloc_inode = NULL; | |
5059 | struct buffer_head *tl_bh = osb->osb_tl_bh; | |
5060 | struct buffer_head *data_alloc_bh = NULL; | |
5061 | struct ocfs2_dinode *di; | |
5062 | struct ocfs2_truncate_log *tl; | |
5063 | ||
5064 | mlog_entry_void(); | |
5065 | ||
1b1dcc1b | 5066 | BUG_ON(mutex_trylock(&tl_inode->i_mutex)); |
ccd979bd MF |
5067 | |
5068 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
5069 | tl = &di->id2.i_dealloc; | |
5070 | if (!OCFS2_IS_VALID_DINODE(di)) { | |
5071 | OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); | |
5072 | status = -EIO; | |
e08dc8b9 | 5073 | goto out; |
ccd979bd MF |
5074 | } |
5075 | ||
5076 | num_to_flush = le16_to_cpu(tl->tl_used); | |
b0697053 MF |
5077 | mlog(0, "Flush %u records from truncate log #%llu\n", |
5078 | num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); | |
ccd979bd MF |
5079 | if (!num_to_flush) { |
5080 | status = 0; | |
e08dc8b9 | 5081 | goto out; |
ccd979bd MF |
5082 | } |
5083 | ||
5084 | data_alloc_inode = ocfs2_get_system_file_inode(osb, | |
5085 | GLOBAL_BITMAP_SYSTEM_INODE, | |
5086 | OCFS2_INVALID_SLOT); | |
5087 | if (!data_alloc_inode) { | |
5088 | status = -EINVAL; | |
5089 | mlog(ML_ERROR, "Could not get bitmap inode!\n"); | |
e08dc8b9 | 5090 | goto out; |
ccd979bd MF |
5091 | } |
5092 | ||
e08dc8b9 MF |
5093 | mutex_lock(&data_alloc_inode->i_mutex); |
5094 | ||
e63aecb6 | 5095 | status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1); |
ccd979bd MF |
5096 | if (status < 0) { |
5097 | mlog_errno(status); | |
e08dc8b9 | 5098 | goto out_mutex; |
ccd979bd MF |
5099 | } |
5100 | ||
65eff9cc | 5101 | handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); |
ccd979bd MF |
5102 | if (IS_ERR(handle)) { |
5103 | status = PTR_ERR(handle); | |
ccd979bd | 5104 | mlog_errno(status); |
e08dc8b9 | 5105 | goto out_unlock; |
ccd979bd MF |
5106 | } |
5107 | ||
5108 | status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, | |
5109 | data_alloc_bh); | |
e08dc8b9 | 5110 | if (status < 0) |
ccd979bd | 5111 | mlog_errno(status); |
ccd979bd | 5112 | |
02dc1af4 | 5113 | ocfs2_commit_trans(osb, handle); |
ccd979bd | 5114 | |
e08dc8b9 MF |
5115 | out_unlock: |
5116 | brelse(data_alloc_bh); | |
e63aecb6 | 5117 | ocfs2_inode_unlock(data_alloc_inode, 1); |
ccd979bd | 5118 | |
e08dc8b9 MF |
5119 | out_mutex: |
5120 | mutex_unlock(&data_alloc_inode->i_mutex); | |
5121 | iput(data_alloc_inode); | |
ccd979bd | 5122 | |
e08dc8b9 | 5123 | out: |
ccd979bd MF |
5124 | mlog_exit(status); |
5125 | return status; | |
5126 | } | |
5127 | ||
5128 | int ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |
5129 | { | |
5130 | int status; | |
5131 | struct inode *tl_inode = osb->osb_tl_inode; | |
5132 | ||
1b1dcc1b | 5133 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd | 5134 | status = __ocfs2_flush_truncate_log(osb); |
1b1dcc1b | 5135 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
5136 | |
5137 | return status; | |
5138 | } | |
5139 | ||
c4028958 | 5140 | static void ocfs2_truncate_log_worker(struct work_struct *work) |
ccd979bd MF |
5141 | { |
5142 | int status; | |
c4028958 DH |
5143 | struct ocfs2_super *osb = |
5144 | container_of(work, struct ocfs2_super, | |
5145 | osb_truncate_log_wq.work); | |
ccd979bd MF |
5146 | |
5147 | mlog_entry_void(); | |
5148 | ||
5149 | status = ocfs2_flush_truncate_log(osb); | |
5150 | if (status < 0) | |
5151 | mlog_errno(status); | |
4d0ddb2c TM |
5152 | else |
5153 | ocfs2_init_inode_steal_slot(osb); | |
ccd979bd MF |
5154 | |
5155 | mlog_exit(status); | |
5156 | } | |
5157 | ||
5158 | #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) | |
5159 | void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, | |
5160 | int cancel) | |
5161 | { | |
5162 | if (osb->osb_tl_inode) { | |
5163 | /* We want to push off log flushes while truncates are | |
5164 | * still running. */ | |
5165 | if (cancel) | |
5166 | cancel_delayed_work(&osb->osb_truncate_log_wq); | |
5167 | ||
5168 | queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, | |
5169 | OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); | |
5170 | } | |
5171 | } | |
5172 | ||
5173 | static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, | |
5174 | int slot_num, | |
5175 | struct inode **tl_inode, | |
5176 | struct buffer_head **tl_bh) | |
5177 | { | |
5178 | int status; | |
5179 | struct inode *inode = NULL; | |
5180 | struct buffer_head *bh = NULL; | |
5181 | ||
5182 | inode = ocfs2_get_system_file_inode(osb, | |
5183 | TRUNCATE_LOG_SYSTEM_INODE, | |
5184 | slot_num); | |
5185 | if (!inode) { | |
5186 | status = -EINVAL; | |
5187 | mlog(ML_ERROR, "Could not get load truncate log inode!\n"); | |
5188 | goto bail; | |
5189 | } | |
5190 | ||
5191 | status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh, | |
5192 | OCFS2_BH_CACHED, inode); | |
5193 | if (status < 0) { | |
5194 | iput(inode); | |
5195 | mlog_errno(status); | |
5196 | goto bail; | |
5197 | } | |
5198 | ||
5199 | *tl_inode = inode; | |
5200 | *tl_bh = bh; | |
5201 | bail: | |
5202 | mlog_exit(status); | |
5203 | return status; | |
5204 | } | |
5205 | ||
5206 | /* called during the 1st stage of node recovery. we stamp a clean | |
5207 | * truncate log and pass back a copy for processing later. if the | |
5208 | * truncate log does not require processing, a *tl_copy is set to | |
5209 | * NULL. */ | |
5210 | int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, | |
5211 | int slot_num, | |
5212 | struct ocfs2_dinode **tl_copy) | |
5213 | { | |
5214 | int status; | |
5215 | struct inode *tl_inode = NULL; | |
5216 | struct buffer_head *tl_bh = NULL; | |
5217 | struct ocfs2_dinode *di; | |
5218 | struct ocfs2_truncate_log *tl; | |
5219 | ||
5220 | *tl_copy = NULL; | |
5221 | ||
5222 | mlog(0, "recover truncate log from slot %d\n", slot_num); | |
5223 | ||
5224 | status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); | |
5225 | if (status < 0) { | |
5226 | mlog_errno(status); | |
5227 | goto bail; | |
5228 | } | |
5229 | ||
5230 | di = (struct ocfs2_dinode *) tl_bh->b_data; | |
5231 | tl = &di->id2.i_dealloc; | |
5232 | if (!OCFS2_IS_VALID_DINODE(di)) { | |
5233 | OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di); | |
5234 | status = -EIO; | |
5235 | goto bail; | |
5236 | } | |
5237 | ||
5238 | if (le16_to_cpu(tl->tl_used)) { | |
5239 | mlog(0, "We'll have %u logs to recover\n", | |
5240 | le16_to_cpu(tl->tl_used)); | |
5241 | ||
5242 | *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); | |
5243 | if (!(*tl_copy)) { | |
5244 | status = -ENOMEM; | |
5245 | mlog_errno(status); | |
5246 | goto bail; | |
5247 | } | |
5248 | ||
5249 | /* Assuming the write-out below goes well, this copy | |
5250 | * will be passed back to recovery for processing. */ | |
5251 | memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); | |
5252 | ||
5253 | /* All we need to do to clear the truncate log is set | |
5254 | * tl_used. */ | |
5255 | tl->tl_used = 0; | |
5256 | ||
5257 | status = ocfs2_write_block(osb, tl_bh, tl_inode); | |
5258 | if (status < 0) { | |
5259 | mlog_errno(status); | |
5260 | goto bail; | |
5261 | } | |
5262 | } | |
5263 | ||
5264 | bail: | |
5265 | if (tl_inode) | |
5266 | iput(tl_inode); | |
5267 | if (tl_bh) | |
5268 | brelse(tl_bh); | |
5269 | ||
5270 | if (status < 0 && (*tl_copy)) { | |
5271 | kfree(*tl_copy); | |
5272 | *tl_copy = NULL; | |
5273 | } | |
5274 | ||
5275 | mlog_exit(status); | |
5276 | return status; | |
5277 | } | |
5278 | ||
5279 | int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, | |
5280 | struct ocfs2_dinode *tl_copy) | |
5281 | { | |
5282 | int status = 0; | |
5283 | int i; | |
5284 | unsigned int clusters, num_recs, start_cluster; | |
5285 | u64 start_blk; | |
1fabe148 | 5286 | handle_t *handle; |
ccd979bd MF |
5287 | struct inode *tl_inode = osb->osb_tl_inode; |
5288 | struct ocfs2_truncate_log *tl; | |
5289 | ||
5290 | mlog_entry_void(); | |
5291 | ||
5292 | if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { | |
5293 | mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); | |
5294 | return -EINVAL; | |
5295 | } | |
5296 | ||
5297 | tl = &tl_copy->id2.i_dealloc; | |
5298 | num_recs = le16_to_cpu(tl->tl_used); | |
b0697053 | 5299 | mlog(0, "cleanup %u records from %llu\n", num_recs, |
1ca1a111 | 5300 | (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); |
ccd979bd | 5301 | |
1b1dcc1b | 5302 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd MF |
5303 | for(i = 0; i < num_recs; i++) { |
5304 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
5305 | status = __ocfs2_flush_truncate_log(osb); | |
5306 | if (status < 0) { | |
5307 | mlog_errno(status); | |
5308 | goto bail_up; | |
5309 | } | |
5310 | } | |
5311 | ||
65eff9cc | 5312 | handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); |
ccd979bd MF |
5313 | if (IS_ERR(handle)) { |
5314 | status = PTR_ERR(handle); | |
5315 | mlog_errno(status); | |
5316 | goto bail_up; | |
5317 | } | |
5318 | ||
5319 | clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); | |
5320 | start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); | |
5321 | start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); | |
5322 | ||
5323 | status = ocfs2_truncate_log_append(osb, handle, | |
5324 | start_blk, clusters); | |
02dc1af4 | 5325 | ocfs2_commit_trans(osb, handle); |
ccd979bd MF |
5326 | if (status < 0) { |
5327 | mlog_errno(status); | |
5328 | goto bail_up; | |
5329 | } | |
5330 | } | |
5331 | ||
5332 | bail_up: | |
1b1dcc1b | 5333 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
5334 | |
5335 | mlog_exit(status); | |
5336 | return status; | |
5337 | } | |
5338 | ||
5339 | void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) | |
5340 | { | |
5341 | int status; | |
5342 | struct inode *tl_inode = osb->osb_tl_inode; | |
5343 | ||
5344 | mlog_entry_void(); | |
5345 | ||
5346 | if (tl_inode) { | |
5347 | cancel_delayed_work(&osb->osb_truncate_log_wq); | |
5348 | flush_workqueue(ocfs2_wq); | |
5349 | ||
5350 | status = ocfs2_flush_truncate_log(osb); | |
5351 | if (status < 0) | |
5352 | mlog_errno(status); | |
5353 | ||
5354 | brelse(osb->osb_tl_bh); | |
5355 | iput(osb->osb_tl_inode); | |
5356 | } | |
5357 | ||
5358 | mlog_exit_void(); | |
5359 | } | |
5360 | ||
5361 | int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |
5362 | { | |
5363 | int status; | |
5364 | struct inode *tl_inode = NULL; | |
5365 | struct buffer_head *tl_bh = NULL; | |
5366 | ||
5367 | mlog_entry_void(); | |
5368 | ||
5369 | status = ocfs2_get_truncate_log_info(osb, | |
5370 | osb->slot_num, | |
5371 | &tl_inode, | |
5372 | &tl_bh); | |
5373 | if (status < 0) | |
5374 | mlog_errno(status); | |
5375 | ||
5376 | /* ocfs2_truncate_log_shutdown keys on the existence of | |
5377 | * osb->osb_tl_inode so we don't set any of the osb variables | |
5378 | * until we're sure all is well. */ | |
c4028958 DH |
5379 | INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, |
5380 | ocfs2_truncate_log_worker); | |
ccd979bd MF |
5381 | osb->osb_tl_bh = tl_bh; |
5382 | osb->osb_tl_inode = tl_inode; | |
5383 | ||
5384 | mlog_exit(status); | |
5385 | return status; | |
5386 | } | |
5387 | ||
2b604351 MF |
5388 | /* |
5389 | * Delayed de-allocation of suballocator blocks. | |
5390 | * | |
5391 | * Some sets of block de-allocations might involve multiple suballocator inodes. | |
5392 | * | |
5393 | * The locking for this can get extremely complicated, especially when | |
5394 | * the suballocator inodes to delete from aren't known until deep | |
5395 | * within an unrelated codepath. | |
5396 | * | |
5397 | * ocfs2_extent_block structures are a good example of this - an inode | |
5398 | * btree could have been grown by any number of nodes each allocating | |
5399 | * out of their own suballoc inode. | |
5400 | * | |
5401 | * These structures allow the delay of block de-allocation until a | |
5402 | * later time, when locking of multiple cluster inodes won't cause | |
5403 | * deadlock. | |
5404 | */ | |
5405 | ||
5406 | /* | |
5407 | * Describes a single block free from a suballocator | |
5408 | */ | |
5409 | struct ocfs2_cached_block_free { | |
5410 | struct ocfs2_cached_block_free *free_next; | |
5411 | u64 free_blk; | |
5412 | unsigned int free_bit; | |
5413 | }; | |
5414 | ||
5415 | struct ocfs2_per_slot_free_list { | |
5416 | struct ocfs2_per_slot_free_list *f_next_suballocator; | |
5417 | int f_inode_type; | |
5418 | int f_slot; | |
5419 | struct ocfs2_cached_block_free *f_first; | |
5420 | }; | |
5421 | ||
5422 | static int ocfs2_free_cached_items(struct ocfs2_super *osb, | |
5423 | int sysfile_type, | |
5424 | int slot, | |
5425 | struct ocfs2_cached_block_free *head) | |
5426 | { | |
5427 | int ret; | |
5428 | u64 bg_blkno; | |
5429 | handle_t *handle; | |
5430 | struct inode *inode; | |
5431 | struct buffer_head *di_bh = NULL; | |
5432 | struct ocfs2_cached_block_free *tmp; | |
5433 | ||
5434 | inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot); | |
5435 | if (!inode) { | |
5436 | ret = -EINVAL; | |
5437 | mlog_errno(ret); | |
5438 | goto out; | |
5439 | } | |
5440 | ||
5441 | mutex_lock(&inode->i_mutex); | |
5442 | ||
e63aecb6 | 5443 | ret = ocfs2_inode_lock(inode, &di_bh, 1); |
2b604351 MF |
5444 | if (ret) { |
5445 | mlog_errno(ret); | |
5446 | goto out_mutex; | |
5447 | } | |
5448 | ||
5449 | handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); | |
5450 | if (IS_ERR(handle)) { | |
5451 | ret = PTR_ERR(handle); | |
5452 | mlog_errno(ret); | |
5453 | goto out_unlock; | |
5454 | } | |
5455 | ||
5456 | while (head) { | |
5457 | bg_blkno = ocfs2_which_suballoc_group(head->free_blk, | |
5458 | head->free_bit); | |
5459 | mlog(0, "Free bit: (bit %u, blkno %llu)\n", | |
5460 | head->free_bit, (unsigned long long)head->free_blk); | |
5461 | ||
5462 | ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, | |
5463 | head->free_bit, bg_blkno, 1); | |
5464 | if (ret) { | |
5465 | mlog_errno(ret); | |
5466 | goto out_journal; | |
5467 | } | |
5468 | ||
5469 | ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); | |
5470 | if (ret) { | |
5471 | mlog_errno(ret); | |
5472 | goto out_journal; | |
5473 | } | |
5474 | ||
5475 | tmp = head; | |
5476 | head = head->free_next; | |
5477 | kfree(tmp); | |
5478 | } | |
5479 | ||
5480 | out_journal: | |
5481 | ocfs2_commit_trans(osb, handle); | |
5482 | ||
5483 | out_unlock: | |
e63aecb6 | 5484 | ocfs2_inode_unlock(inode, 1); |
2b604351 MF |
5485 | brelse(di_bh); |
5486 | out_mutex: | |
5487 | mutex_unlock(&inode->i_mutex); | |
5488 | iput(inode); | |
5489 | out: | |
5490 | while(head) { | |
5491 | /* Premature exit may have left some dangling items. */ | |
5492 | tmp = head; | |
5493 | head = head->free_next; | |
5494 | kfree(tmp); | |
5495 | } | |
5496 | ||
5497 | return ret; | |
5498 | } | |
5499 | ||
5500 | int ocfs2_run_deallocs(struct ocfs2_super *osb, | |
5501 | struct ocfs2_cached_dealloc_ctxt *ctxt) | |
5502 | { | |
5503 | int ret = 0, ret2; | |
5504 | struct ocfs2_per_slot_free_list *fl; | |
5505 | ||
5506 | if (!ctxt) | |
5507 | return 0; | |
5508 | ||
5509 | while (ctxt->c_first_suballocator) { | |
5510 | fl = ctxt->c_first_suballocator; | |
5511 | ||
5512 | if (fl->f_first) { | |
5513 | mlog(0, "Free items: (type %u, slot %d)\n", | |
5514 | fl->f_inode_type, fl->f_slot); | |
5515 | ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type, | |
5516 | fl->f_slot, fl->f_first); | |
5517 | if (ret2) | |
5518 | mlog_errno(ret2); | |
5519 | if (!ret) | |
5520 | ret = ret2; | |
5521 | } | |
5522 | ||
5523 | ctxt->c_first_suballocator = fl->f_next_suballocator; | |
5524 | kfree(fl); | |
5525 | } | |
5526 | ||
5527 | return ret; | |
5528 | } | |
5529 | ||
5530 | static struct ocfs2_per_slot_free_list * | |
5531 | ocfs2_find_per_slot_free_list(int type, | |
5532 | int slot, | |
5533 | struct ocfs2_cached_dealloc_ctxt *ctxt) | |
5534 | { | |
5535 | struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; | |
5536 | ||
5537 | while (fl) { | |
5538 | if (fl->f_inode_type == type && fl->f_slot == slot) | |
5539 | return fl; | |
5540 | ||
5541 | fl = fl->f_next_suballocator; | |
5542 | } | |
5543 | ||
5544 | fl = kmalloc(sizeof(*fl), GFP_NOFS); | |
5545 | if (fl) { | |
5546 | fl->f_inode_type = type; | |
5547 | fl->f_slot = slot; | |
5548 | fl->f_first = NULL; | |
5549 | fl->f_next_suballocator = ctxt->c_first_suballocator; | |
5550 | ||
5551 | ctxt->c_first_suballocator = fl; | |
5552 | } | |
5553 | return fl; | |
5554 | } | |
5555 | ||
5556 | static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, | |
5557 | int type, int slot, u64 blkno, | |
5558 | unsigned int bit) | |
5559 | { | |
5560 | int ret; | |
5561 | struct ocfs2_per_slot_free_list *fl; | |
5562 | struct ocfs2_cached_block_free *item; | |
5563 | ||
5564 | fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); | |
5565 | if (fl == NULL) { | |
5566 | ret = -ENOMEM; | |
5567 | mlog_errno(ret); | |
5568 | goto out; | |
5569 | } | |
5570 | ||
5571 | item = kmalloc(sizeof(*item), GFP_NOFS); | |
5572 | if (item == NULL) { | |
5573 | ret = -ENOMEM; | |
5574 | mlog_errno(ret); | |
5575 | goto out; | |
5576 | } | |
5577 | ||
5578 | mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", | |
5579 | type, slot, bit, (unsigned long long)blkno); | |
5580 | ||
5581 | item->free_blk = blkno; | |
5582 | item->free_bit = bit; | |
5583 | item->free_next = fl->f_first; | |
5584 | ||
5585 | fl->f_first = item; | |
5586 | ||
5587 | ret = 0; | |
5588 | out: | |
5589 | return ret; | |
5590 | } | |
5591 | ||
59a5e416 MF |
5592 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, |
5593 | struct ocfs2_extent_block *eb) | |
5594 | { | |
5595 | return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, | |
5596 | le16_to_cpu(eb->h_suballoc_slot), | |
5597 | le64_to_cpu(eb->h_blkno), | |
5598 | le16_to_cpu(eb->h_suballoc_bit)); | |
5599 | } | |
5600 | ||
ccd979bd MF |
5601 | /* This function will figure out whether the currently last extent |
5602 | * block will be deleted, and if it will, what the new last extent | |
5603 | * block will be so we can update his h_next_leaf_blk field, as well | |
5604 | * as the dinodes i_last_eb_blk */ | |
dcd0538f | 5605 | static int ocfs2_find_new_last_ext_blk(struct inode *inode, |
3a0782d0 | 5606 | unsigned int clusters_to_del, |
dcd0538f | 5607 | struct ocfs2_path *path, |
ccd979bd MF |
5608 | struct buffer_head **new_last_eb) |
5609 | { | |
3a0782d0 | 5610 | int next_free, ret = 0; |
dcd0538f | 5611 | u32 cpos; |
3a0782d0 | 5612 | struct ocfs2_extent_rec *rec; |
ccd979bd MF |
5613 | struct ocfs2_extent_block *eb; |
5614 | struct ocfs2_extent_list *el; | |
5615 | struct buffer_head *bh = NULL; | |
5616 | ||
5617 | *new_last_eb = NULL; | |
5618 | ||
ccd979bd | 5619 | /* we have no tree, so of course, no last_eb. */ |
dcd0538f MF |
5620 | if (!path->p_tree_depth) |
5621 | goto out; | |
ccd979bd MF |
5622 | |
5623 | /* trunc to zero special case - this makes tree_depth = 0 | |
5624 | * regardless of what it is. */ | |
3a0782d0 | 5625 | if (OCFS2_I(inode)->ip_clusters == clusters_to_del) |
dcd0538f | 5626 | goto out; |
ccd979bd | 5627 | |
dcd0538f | 5628 | el = path_leaf_el(path); |
ccd979bd MF |
5629 | BUG_ON(!el->l_next_free_rec); |
5630 | ||
3a0782d0 MF |
5631 | /* |
5632 | * Make sure that this extent list will actually be empty | |
5633 | * after we clear away the data. We can shortcut out if | |
5634 | * there's more than one non-empty extent in the | |
5635 | * list. Otherwise, a check of the remaining extent is | |
5636 | * necessary. | |
5637 | */ | |
5638 | next_free = le16_to_cpu(el->l_next_free_rec); | |
5639 | rec = NULL; | |
dcd0538f | 5640 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { |
3a0782d0 | 5641 | if (next_free > 2) |
dcd0538f | 5642 | goto out; |
3a0782d0 MF |
5643 | |
5644 | /* We may have a valid extent in index 1, check it. */ | |
5645 | if (next_free == 2) | |
5646 | rec = &el->l_recs[1]; | |
5647 | ||
5648 | /* | |
5649 | * Fall through - no more nonempty extents, so we want | |
5650 | * to delete this leaf. | |
5651 | */ | |
5652 | } else { | |
5653 | if (next_free > 1) | |
5654 | goto out; | |
5655 | ||
5656 | rec = &el->l_recs[0]; | |
5657 | } | |
5658 | ||
5659 | if (rec) { | |
5660 | /* | |
5661 | * Check it we'll only be trimming off the end of this | |
5662 | * cluster. | |
5663 | */ | |
e48edee2 | 5664 | if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del) |
3a0782d0 MF |
5665 | goto out; |
5666 | } | |
ccd979bd | 5667 | |
dcd0538f MF |
5668 | ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); |
5669 | if (ret) { | |
5670 | mlog_errno(ret); | |
5671 | goto out; | |
5672 | } | |
ccd979bd | 5673 | |
dcd0538f MF |
5674 | ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh); |
5675 | if (ret) { | |
5676 | mlog_errno(ret); | |
5677 | goto out; | |
5678 | } | |
ccd979bd | 5679 | |
dcd0538f MF |
5680 | eb = (struct ocfs2_extent_block *) bh->b_data; |
5681 | el = &eb->h_list; | |
5682 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
5683 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
5684 | ret = -EROFS; | |
5685 | goto out; | |
5686 | } | |
ccd979bd MF |
5687 | |
5688 | *new_last_eb = bh; | |
5689 | get_bh(*new_last_eb); | |
dcd0538f MF |
5690 | mlog(0, "returning block %llu, (cpos: %u)\n", |
5691 | (unsigned long long)le64_to_cpu(eb->h_blkno), cpos); | |
5692 | out: | |
5693 | brelse(bh); | |
ccd979bd | 5694 | |
dcd0538f | 5695 | return ret; |
ccd979bd MF |
5696 | } |
5697 | ||
3a0782d0 MF |
5698 | /* |
5699 | * Trim some clusters off the rightmost edge of a tree. Only called | |
5700 | * during truncate. | |
5701 | * | |
5702 | * The caller needs to: | |
5703 | * - start journaling of each path component. | |
5704 | * - compute and fully set up any new last ext block | |
5705 | */ | |
5706 | static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path, | |
5707 | handle_t *handle, struct ocfs2_truncate_context *tc, | |
5708 | u32 clusters_to_del, u64 *delete_start) | |
5709 | { | |
5710 | int ret, i, index = path->p_tree_depth; | |
5711 | u32 new_edge = 0; | |
5712 | u64 deleted_eb = 0; | |
5713 | struct buffer_head *bh; | |
5714 | struct ocfs2_extent_list *el; | |
5715 | struct ocfs2_extent_rec *rec; | |
5716 | ||
5717 | *delete_start = 0; | |
5718 | ||
5719 | while (index >= 0) { | |
5720 | bh = path->p_node[index].bh; | |
5721 | el = path->p_node[index].el; | |
5722 | ||
5723 | mlog(0, "traveling tree (index = %d, block = %llu)\n", | |
5724 | index, (unsigned long long)bh->b_blocknr); | |
5725 | ||
5726 | BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); | |
5727 | ||
5728 | if (index != | |
5729 | (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) { | |
5730 | ocfs2_error(inode->i_sb, | |
5731 | "Inode %lu has invalid ext. block %llu", | |
5732 | inode->i_ino, | |
5733 | (unsigned long long)bh->b_blocknr); | |
5734 | ret = -EROFS; | |
5735 | goto out; | |
5736 | } | |
5737 | ||
5738 | find_tail_record: | |
5739 | i = le16_to_cpu(el->l_next_free_rec) - 1; | |
5740 | rec = &el->l_recs[i]; | |
5741 | ||
5742 | mlog(0, "Extent list before: record %d: (%u, %u, %llu), " | |
5743 | "next = %u\n", i, le32_to_cpu(rec->e_cpos), | |
e48edee2 | 5744 | ocfs2_rec_clusters(el, rec), |
3a0782d0 MF |
5745 | (unsigned long long)le64_to_cpu(rec->e_blkno), |
5746 | le16_to_cpu(el->l_next_free_rec)); | |
5747 | ||
e48edee2 | 5748 | BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del); |
3a0782d0 MF |
5749 | |
5750 | if (le16_to_cpu(el->l_tree_depth) == 0) { | |
5751 | /* | |
5752 | * If the leaf block contains a single empty | |
5753 | * extent and no records, we can just remove | |
5754 | * the block. | |
5755 | */ | |
5756 | if (i == 0 && ocfs2_is_empty_extent(rec)) { | |
5757 | memset(rec, 0, | |
5758 | sizeof(struct ocfs2_extent_rec)); | |
5759 | el->l_next_free_rec = cpu_to_le16(0); | |
5760 | ||
5761 | goto delete; | |
5762 | } | |
5763 | ||
5764 | /* | |
5765 | * Remove any empty extents by shifting things | |
5766 | * left. That should make life much easier on | |
5767 | * the code below. This condition is rare | |
5768 | * enough that we shouldn't see a performance | |
5769 | * hit. | |
5770 | */ | |
5771 | if (ocfs2_is_empty_extent(&el->l_recs[0])) { | |
5772 | le16_add_cpu(&el->l_next_free_rec, -1); | |
5773 | ||
5774 | for(i = 0; | |
5775 | i < le16_to_cpu(el->l_next_free_rec); i++) | |
5776 | el->l_recs[i] = el->l_recs[i + 1]; | |
5777 | ||
5778 | memset(&el->l_recs[i], 0, | |
5779 | sizeof(struct ocfs2_extent_rec)); | |
5780 | ||
5781 | /* | |
5782 | * We've modified our extent list. The | |
5783 | * simplest way to handle this change | |
5784 | * is to being the search from the | |
5785 | * start again. | |
5786 | */ | |
5787 | goto find_tail_record; | |
5788 | } | |
5789 | ||
e48edee2 | 5790 | le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del); |
3a0782d0 MF |
5791 | |
5792 | /* | |
5793 | * We'll use "new_edge" on our way back up the | |
5794 | * tree to know what our rightmost cpos is. | |
5795 | */ | |
e48edee2 | 5796 | new_edge = le16_to_cpu(rec->e_leaf_clusters); |
3a0782d0 MF |
5797 | new_edge += le32_to_cpu(rec->e_cpos); |
5798 | ||
5799 | /* | |
5800 | * The caller will use this to delete data blocks. | |
5801 | */ | |
5802 | *delete_start = le64_to_cpu(rec->e_blkno) | |
5803 | + ocfs2_clusters_to_blocks(inode->i_sb, | |
e48edee2 | 5804 | le16_to_cpu(rec->e_leaf_clusters)); |
3a0782d0 MF |
5805 | |
5806 | /* | |
5807 | * If it's now empty, remove this record. | |
5808 | */ | |
e48edee2 | 5809 | if (le16_to_cpu(rec->e_leaf_clusters) == 0) { |
3a0782d0 MF |
5810 | memset(rec, 0, |
5811 | sizeof(struct ocfs2_extent_rec)); | |
5812 | le16_add_cpu(&el->l_next_free_rec, -1); | |
5813 | } | |
5814 | } else { | |
5815 | if (le64_to_cpu(rec->e_blkno) == deleted_eb) { | |
5816 | memset(rec, 0, | |
5817 | sizeof(struct ocfs2_extent_rec)); | |
5818 | le16_add_cpu(&el->l_next_free_rec, -1); | |
5819 | ||
5820 | goto delete; | |
5821 | } | |
5822 | ||
5823 | /* Can this actually happen? */ | |
5824 | if (le16_to_cpu(el->l_next_free_rec) == 0) | |
5825 | goto delete; | |
5826 | ||
5827 | /* | |
5828 | * We never actually deleted any clusters | |
5829 | * because our leaf was empty. There's no | |
5830 | * reason to adjust the rightmost edge then. | |
5831 | */ | |
5832 | if (new_edge == 0) | |
5833 | goto delete; | |
5834 | ||
e48edee2 MF |
5835 | rec->e_int_clusters = cpu_to_le32(new_edge); |
5836 | le32_add_cpu(&rec->e_int_clusters, | |
3a0782d0 MF |
5837 | -le32_to_cpu(rec->e_cpos)); |
5838 | ||
5839 | /* | |
5840 | * A deleted child record should have been | |
5841 | * caught above. | |
5842 | */ | |
e48edee2 | 5843 | BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0); |
3a0782d0 MF |
5844 | } |
5845 | ||
5846 | delete: | |
5847 | ret = ocfs2_journal_dirty(handle, bh); | |
5848 | if (ret) { | |
5849 | mlog_errno(ret); | |
5850 | goto out; | |
5851 | } | |
5852 | ||
5853 | mlog(0, "extent list container %llu, after: record %d: " | |
5854 | "(%u, %u, %llu), next = %u.\n", | |
5855 | (unsigned long long)bh->b_blocknr, i, | |
e48edee2 | 5856 | le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec), |
3a0782d0 MF |
5857 | (unsigned long long)le64_to_cpu(rec->e_blkno), |
5858 | le16_to_cpu(el->l_next_free_rec)); | |
5859 | ||
5860 | /* | |
5861 | * We must be careful to only attempt delete of an | |
5862 | * extent block (and not the root inode block). | |
5863 | */ | |
5864 | if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) { | |
5865 | struct ocfs2_extent_block *eb = | |
5866 | (struct ocfs2_extent_block *)bh->b_data; | |
5867 | ||
5868 | /* | |
5869 | * Save this for use when processing the | |
5870 | * parent block. | |
5871 | */ | |
5872 | deleted_eb = le64_to_cpu(eb->h_blkno); | |
5873 | ||
5874 | mlog(0, "deleting this extent block.\n"); | |
5875 | ||
5876 | ocfs2_remove_from_cache(inode, bh); | |
5877 | ||
e48edee2 | 5878 | BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0])); |
3a0782d0 MF |
5879 | BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos)); |
5880 | BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno)); | |
5881 | ||
59a5e416 MF |
5882 | ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb); |
5883 | /* An error here is not fatal. */ | |
5884 | if (ret < 0) | |
5885 | mlog_errno(ret); | |
3a0782d0 MF |
5886 | } else { |
5887 | deleted_eb = 0; | |
5888 | } | |
5889 | ||
5890 | index--; | |
5891 | } | |
5892 | ||
5893 | ret = 0; | |
5894 | out: | |
5895 | return ret; | |
5896 | } | |
5897 | ||
ccd979bd MF |
5898 | static int ocfs2_do_truncate(struct ocfs2_super *osb, |
5899 | unsigned int clusters_to_del, | |
5900 | struct inode *inode, | |
5901 | struct buffer_head *fe_bh, | |
1fabe148 | 5902 | handle_t *handle, |
dcd0538f MF |
5903 | struct ocfs2_truncate_context *tc, |
5904 | struct ocfs2_path *path) | |
ccd979bd | 5905 | { |
3a0782d0 | 5906 | int status; |
ccd979bd | 5907 | struct ocfs2_dinode *fe; |
ccd979bd MF |
5908 | struct ocfs2_extent_block *last_eb = NULL; |
5909 | struct ocfs2_extent_list *el; | |
ccd979bd | 5910 | struct buffer_head *last_eb_bh = NULL; |
ccd979bd MF |
5911 | u64 delete_blk = 0; |
5912 | ||
5913 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
5914 | ||
3a0782d0 | 5915 | status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del, |
dcd0538f | 5916 | path, &last_eb_bh); |
ccd979bd MF |
5917 | if (status < 0) { |
5918 | mlog_errno(status); | |
5919 | goto bail; | |
5920 | } | |
dcd0538f MF |
5921 | |
5922 | /* | |
5923 | * Each component will be touched, so we might as well journal | |
5924 | * here to avoid having to handle errors later. | |
5925 | */ | |
3a0782d0 MF |
5926 | status = ocfs2_journal_access_path(inode, handle, path); |
5927 | if (status < 0) { | |
5928 | mlog_errno(status); | |
5929 | goto bail; | |
dcd0538f MF |
5930 | } |
5931 | ||
5932 | if (last_eb_bh) { | |
5933 | status = ocfs2_journal_access(handle, inode, last_eb_bh, | |
5934 | OCFS2_JOURNAL_ACCESS_WRITE); | |
5935 | if (status < 0) { | |
5936 | mlog_errno(status); | |
5937 | goto bail; | |
5938 | } | |
5939 | ||
ccd979bd | 5940 | last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; |
dcd0538f | 5941 | } |
ccd979bd | 5942 | |
dcd0538f MF |
5943 | el = &(fe->id2.i_list); |
5944 | ||
5945 | /* | |
5946 | * Lower levels depend on this never happening, but it's best | |
5947 | * to check it up here before changing the tree. | |
5948 | */ | |
e48edee2 | 5949 | if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) { |
dcd0538f MF |
5950 | ocfs2_error(inode->i_sb, |
5951 | "Inode %lu has an empty extent record, depth %u\n", | |
5952 | inode->i_ino, le16_to_cpu(el->l_tree_depth)); | |
3a0782d0 | 5953 | status = -EROFS; |
ccd979bd MF |
5954 | goto bail; |
5955 | } | |
ccd979bd MF |
5956 | |
5957 | spin_lock(&OCFS2_I(inode)->ip_lock); | |
5958 | OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - | |
5959 | clusters_to_del; | |
5960 | spin_unlock(&OCFS2_I(inode)->ip_lock); | |
5961 | le32_add_cpu(&fe->i_clusters, -clusters_to_del); | |
e535e2ef | 5962 | inode->i_blocks = ocfs2_inode_sector_count(inode); |
ccd979bd | 5963 | |
3a0782d0 MF |
5964 | status = ocfs2_trim_tree(inode, path, handle, tc, |
5965 | clusters_to_del, &delete_blk); | |
5966 | if (status) { | |
5967 | mlog_errno(status); | |
5968 | goto bail; | |
ccd979bd MF |
5969 | } |
5970 | ||
dcd0538f | 5971 | if (le32_to_cpu(fe->i_clusters) == 0) { |
ccd979bd MF |
5972 | /* trunc to zero is a special case. */ |
5973 | el->l_tree_depth = 0; | |
5974 | fe->i_last_eb_blk = 0; | |
5975 | } else if (last_eb) | |
5976 | fe->i_last_eb_blk = last_eb->h_blkno; | |
5977 | ||
5978 | status = ocfs2_journal_dirty(handle, fe_bh); | |
5979 | if (status < 0) { | |
5980 | mlog_errno(status); | |
5981 | goto bail; | |
5982 | } | |
5983 | ||
5984 | if (last_eb) { | |
5985 | /* If there will be a new last extent block, then by | |
5986 | * definition, there cannot be any leaves to the right of | |
5987 | * him. */ | |
ccd979bd MF |
5988 | last_eb->h_next_leaf_blk = 0; |
5989 | status = ocfs2_journal_dirty(handle, last_eb_bh); | |
5990 | if (status < 0) { | |
5991 | mlog_errno(status); | |
5992 | goto bail; | |
5993 | } | |
5994 | } | |
5995 | ||
3a0782d0 MF |
5996 | if (delete_blk) { |
5997 | status = ocfs2_truncate_log_append(osb, handle, delete_blk, | |
5998 | clusters_to_del); | |
ccd979bd MF |
5999 | if (status < 0) { |
6000 | mlog_errno(status); | |
6001 | goto bail; | |
6002 | } | |
ccd979bd MF |
6003 | } |
6004 | status = 0; | |
6005 | bail: | |
dcd0538f | 6006 | |
ccd979bd MF |
6007 | mlog_exit(status); |
6008 | return status; | |
6009 | } | |
6010 | ||
60b11392 MF |
6011 | static int ocfs2_writeback_zero_func(handle_t *handle, struct buffer_head *bh) |
6012 | { | |
6013 | set_buffer_uptodate(bh); | |
6014 | mark_buffer_dirty(bh); | |
6015 | return 0; | |
6016 | } | |
6017 | ||
6018 | static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh) | |
6019 | { | |
6020 | set_buffer_uptodate(bh); | |
6021 | mark_buffer_dirty(bh); | |
6022 | return ocfs2_journal_dirty_data(handle, bh); | |
6023 | } | |
6024 | ||
1d410a6e MF |
6025 | static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, |
6026 | unsigned int from, unsigned int to, | |
6027 | struct page *page, int zero, u64 *phys) | |
6028 | { | |
6029 | int ret, partial = 0; | |
6030 | ||
6031 | ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); | |
6032 | if (ret) | |
6033 | mlog_errno(ret); | |
6034 | ||
6035 | if (zero) | |
eebd2aa3 | 6036 | zero_user_segment(page, from, to); |
1d410a6e MF |
6037 | |
6038 | /* | |
6039 | * Need to set the buffers we zero'd into uptodate | |
6040 | * here if they aren't - ocfs2_map_page_blocks() | |
6041 | * might've skipped some | |
6042 | */ | |
6043 | if (ocfs2_should_order_data(inode)) { | |
6044 | ret = walk_page_buffers(handle, | |
6045 | page_buffers(page), | |
6046 | from, to, &partial, | |
6047 | ocfs2_ordered_zero_func); | |
6048 | if (ret < 0) | |
6049 | mlog_errno(ret); | |
6050 | } else { | |
6051 | ret = walk_page_buffers(handle, page_buffers(page), | |
6052 | from, to, &partial, | |
6053 | ocfs2_writeback_zero_func); | |
6054 | if (ret < 0) | |
6055 | mlog_errno(ret); | |
6056 | } | |
6057 | ||
6058 | if (!partial) | |
6059 | SetPageUptodate(page); | |
6060 | ||
6061 | flush_dcache_page(page); | |
6062 | } | |
6063 | ||
35edec1d MF |
6064 | static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, |
6065 | loff_t end, struct page **pages, | |
6066 | int numpages, u64 phys, handle_t *handle) | |
60b11392 | 6067 | { |
1d410a6e | 6068 | int i; |
60b11392 MF |
6069 | struct page *page; |
6070 | unsigned int from, to = PAGE_CACHE_SIZE; | |
6071 | struct super_block *sb = inode->i_sb; | |
6072 | ||
6073 | BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); | |
6074 | ||
6075 | if (numpages == 0) | |
6076 | goto out; | |
6077 | ||
35edec1d | 6078 | to = PAGE_CACHE_SIZE; |
60b11392 MF |
6079 | for(i = 0; i < numpages; i++) { |
6080 | page = pages[i]; | |
6081 | ||
35edec1d MF |
6082 | from = start & (PAGE_CACHE_SIZE - 1); |
6083 | if ((end >> PAGE_CACHE_SHIFT) == page->index) | |
6084 | to = end & (PAGE_CACHE_SIZE - 1); | |
6085 | ||
60b11392 MF |
6086 | BUG_ON(from > PAGE_CACHE_SIZE); |
6087 | BUG_ON(to > PAGE_CACHE_SIZE); | |
6088 | ||
1d410a6e MF |
6089 | ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, |
6090 | &phys); | |
60b11392 | 6091 | |
35edec1d | 6092 | start = (page->index + 1) << PAGE_CACHE_SHIFT; |
60b11392 MF |
6093 | } |
6094 | out: | |
1d410a6e MF |
6095 | if (pages) |
6096 | ocfs2_unlock_and_free_pages(pages, numpages); | |
60b11392 MF |
6097 | } |
6098 | ||
35edec1d | 6099 | static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, |
1d410a6e | 6100 | struct page **pages, int *num) |
60b11392 | 6101 | { |
1d410a6e | 6102 | int numpages, ret = 0; |
60b11392 MF |
6103 | struct super_block *sb = inode->i_sb; |
6104 | struct address_space *mapping = inode->i_mapping; | |
6105 | unsigned long index; | |
35edec1d | 6106 | loff_t last_page_bytes; |
60b11392 | 6107 | |
35edec1d | 6108 | BUG_ON(start > end); |
60b11392 | 6109 | |
35edec1d MF |
6110 | BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != |
6111 | (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); | |
6112 | ||
1d410a6e | 6113 | numpages = 0; |
35edec1d MF |
6114 | last_page_bytes = PAGE_ALIGN(end); |
6115 | index = start >> PAGE_CACHE_SHIFT; | |
60b11392 MF |
6116 | do { |
6117 | pages[numpages] = grab_cache_page(mapping, index); | |
6118 | if (!pages[numpages]) { | |
6119 | ret = -ENOMEM; | |
6120 | mlog_errno(ret); | |
6121 | goto out; | |
6122 | } | |
6123 | ||
6124 | numpages++; | |
6125 | index++; | |
35edec1d | 6126 | } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); |
60b11392 MF |
6127 | |
6128 | out: | |
6129 | if (ret != 0) { | |
1d410a6e MF |
6130 | if (pages) |
6131 | ocfs2_unlock_and_free_pages(pages, numpages); | |
60b11392 MF |
6132 | numpages = 0; |
6133 | } | |
6134 | ||
6135 | *num = numpages; | |
6136 | ||
6137 | return ret; | |
6138 | } | |
6139 | ||
6140 | /* | |
6141 | * Zero the area past i_size but still within an allocated | |
6142 | * cluster. This avoids exposing nonzero data on subsequent file | |
6143 | * extends. | |
6144 | * | |
6145 | * We need to call this before i_size is updated on the inode because | |
6146 | * otherwise block_write_full_page() will skip writeout of pages past | |
6147 | * i_size. The new_i_size parameter is passed for this reason. | |
6148 | */ | |
35edec1d MF |
6149 | int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, |
6150 | u64 range_start, u64 range_end) | |
60b11392 | 6151 | { |
1d410a6e | 6152 | int ret = 0, numpages; |
60b11392 MF |
6153 | struct page **pages = NULL; |
6154 | u64 phys; | |
1d410a6e MF |
6155 | unsigned int ext_flags; |
6156 | struct super_block *sb = inode->i_sb; | |
60b11392 MF |
6157 | |
6158 | /* | |
6159 | * File systems which don't support sparse files zero on every | |
6160 | * extend. | |
6161 | */ | |
1d410a6e | 6162 | if (!ocfs2_sparse_alloc(OCFS2_SB(sb))) |
60b11392 MF |
6163 | return 0; |
6164 | ||
1d410a6e | 6165 | pages = kcalloc(ocfs2_pages_per_cluster(sb), |
60b11392 MF |
6166 | sizeof(struct page *), GFP_NOFS); |
6167 | if (pages == NULL) { | |
6168 | ret = -ENOMEM; | |
6169 | mlog_errno(ret); | |
6170 | goto out; | |
6171 | } | |
6172 | ||
1d410a6e MF |
6173 | if (range_start == range_end) |
6174 | goto out; | |
6175 | ||
6176 | ret = ocfs2_extent_map_get_blocks(inode, | |
6177 | range_start >> sb->s_blocksize_bits, | |
6178 | &phys, NULL, &ext_flags); | |
60b11392 MF |
6179 | if (ret) { |
6180 | mlog_errno(ret); | |
6181 | goto out; | |
6182 | } | |
6183 | ||
1d410a6e MF |
6184 | /* |
6185 | * Tail is a hole, or is marked unwritten. In either case, we | |
6186 | * can count on read and write to return/push zero's. | |
6187 | */ | |
6188 | if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) | |
60b11392 MF |
6189 | goto out; |
6190 | ||
1d410a6e MF |
6191 | ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, |
6192 | &numpages); | |
6193 | if (ret) { | |
6194 | mlog_errno(ret); | |
6195 | goto out; | |
6196 | } | |
6197 | ||
35edec1d MF |
6198 | ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, |
6199 | numpages, phys, handle); | |
60b11392 MF |
6200 | |
6201 | /* | |
6202 | * Initiate writeout of the pages we zero'd here. We don't | |
6203 | * wait on them - the truncate_inode_pages() call later will | |
6204 | * do that for us. | |
6205 | */ | |
35edec1d MF |
6206 | ret = do_sync_mapping_range(inode->i_mapping, range_start, |
6207 | range_end - 1, SYNC_FILE_RANGE_WRITE); | |
60b11392 MF |
6208 | if (ret) |
6209 | mlog_errno(ret); | |
6210 | ||
6211 | out: | |
6212 | if (pages) | |
6213 | kfree(pages); | |
6214 | ||
6215 | return ret; | |
6216 | } | |
6217 | ||
1afc32b9 MF |
6218 | static void ocfs2_zero_dinode_id2(struct inode *inode, struct ocfs2_dinode *di) |
6219 | { | |
6220 | unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits; | |
6221 | ||
6222 | memset(&di->id2, 0, blocksize - offsetof(struct ocfs2_dinode, id2)); | |
6223 | } | |
6224 | ||
5b6a3a2b MF |
6225 | void ocfs2_dinode_new_extent_list(struct inode *inode, |
6226 | struct ocfs2_dinode *di) | |
6227 | { | |
6228 | ocfs2_zero_dinode_id2(inode, di); | |
6229 | di->id2.i_list.l_tree_depth = 0; | |
6230 | di->id2.i_list.l_next_free_rec = 0; | |
6231 | di->id2.i_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(inode->i_sb)); | |
6232 | } | |
6233 | ||
1afc32b9 MF |
6234 | void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) |
6235 | { | |
6236 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
6237 | struct ocfs2_inline_data *idata = &di->id2.i_data; | |
6238 | ||
6239 | spin_lock(&oi->ip_lock); | |
6240 | oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL; | |
6241 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | |
6242 | spin_unlock(&oi->ip_lock); | |
6243 | ||
6244 | /* | |
6245 | * We clear the entire i_data structure here so that all | |
6246 | * fields can be properly initialized. | |
6247 | */ | |
6248 | ocfs2_zero_dinode_id2(inode, di); | |
6249 | ||
6250 | idata->id_count = cpu_to_le16(ocfs2_max_inline_data(inode->i_sb)); | |
6251 | } | |
6252 | ||
6253 | int ocfs2_convert_inline_data_to_extents(struct inode *inode, | |
6254 | struct buffer_head *di_bh) | |
6255 | { | |
6256 | int ret, i, has_data, num_pages = 0; | |
6257 | handle_t *handle; | |
6258 | u64 uninitialized_var(block); | |
6259 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | |
6260 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
6261 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
1afc32b9 MF |
6262 | struct ocfs2_alloc_context *data_ac = NULL; |
6263 | struct page **pages = NULL; | |
6264 | loff_t end = osb->s_clustersize; | |
6265 | ||
6266 | has_data = i_size_read(inode) ? 1 : 0; | |
6267 | ||
6268 | if (has_data) { | |
6269 | pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), | |
6270 | sizeof(struct page *), GFP_NOFS); | |
6271 | if (pages == NULL) { | |
6272 | ret = -ENOMEM; | |
6273 | mlog_errno(ret); | |
6274 | goto out; | |
6275 | } | |
6276 | ||
6277 | ret = ocfs2_reserve_clusters(osb, 1, &data_ac); | |
6278 | if (ret) { | |
6279 | mlog_errno(ret); | |
6280 | goto out; | |
6281 | } | |
6282 | } | |
6283 | ||
6284 | handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS); | |
6285 | if (IS_ERR(handle)) { | |
6286 | ret = PTR_ERR(handle); | |
6287 | mlog_errno(ret); | |
6288 | goto out_unlock; | |
6289 | } | |
6290 | ||
6291 | ret = ocfs2_journal_access(handle, inode, di_bh, | |
6292 | OCFS2_JOURNAL_ACCESS_WRITE); | |
6293 | if (ret) { | |
6294 | mlog_errno(ret); | |
6295 | goto out_commit; | |
6296 | } | |
6297 | ||
6298 | if (has_data) { | |
6299 | u32 bit_off, num; | |
6300 | unsigned int page_end; | |
6301 | u64 phys; | |
6302 | ||
6303 | ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, | |
6304 | &num); | |
6305 | if (ret) { | |
6306 | mlog_errno(ret); | |
6307 | goto out_commit; | |
6308 | } | |
6309 | ||
6310 | /* | |
6311 | * Save two copies, one for insert, and one that can | |
6312 | * be changed by ocfs2_map_and_dirty_page() below. | |
6313 | */ | |
6314 | block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); | |
6315 | ||
6316 | /* | |
6317 | * Non sparse file systems zero on extend, so no need | |
6318 | * to do that now. | |
6319 | */ | |
6320 | if (!ocfs2_sparse_alloc(osb) && | |
6321 | PAGE_CACHE_SIZE < osb->s_clustersize) | |
6322 | end = PAGE_CACHE_SIZE; | |
6323 | ||
6324 | ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); | |
6325 | if (ret) { | |
6326 | mlog_errno(ret); | |
6327 | goto out_commit; | |
6328 | } | |
6329 | ||
6330 | /* | |
6331 | * This should populate the 1st page for us and mark | |
6332 | * it up to date. | |
6333 | */ | |
6334 | ret = ocfs2_read_inline_data(inode, pages[0], di_bh); | |
6335 | if (ret) { | |
6336 | mlog_errno(ret); | |
6337 | goto out_commit; | |
6338 | } | |
6339 | ||
6340 | page_end = PAGE_CACHE_SIZE; | |
6341 | if (PAGE_CACHE_SIZE > osb->s_clustersize) | |
6342 | page_end = osb->s_clustersize; | |
6343 | ||
6344 | for (i = 0; i < num_pages; i++) | |
6345 | ocfs2_map_and_dirty_page(inode, handle, 0, page_end, | |
6346 | pages[i], i > 0, &phys); | |
6347 | } | |
6348 | ||
6349 | spin_lock(&oi->ip_lock); | |
6350 | oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; | |
6351 | di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); | |
6352 | spin_unlock(&oi->ip_lock); | |
6353 | ||
5b6a3a2b | 6354 | ocfs2_dinode_new_extent_list(inode, di); |
1afc32b9 MF |
6355 | |
6356 | ocfs2_journal_dirty(handle, di_bh); | |
6357 | ||
6358 | if (has_data) { | |
6359 | /* | |
6360 | * An error at this point should be extremely rare. If | |
6361 | * this proves to be false, we could always re-build | |
6362 | * the in-inode data from our pages. | |
6363 | */ | |
6364 | ret = ocfs2_insert_extent(osb, handle, inode, di_bh, | |
6365 | 0, block, 1, 0, NULL); | |
6366 | if (ret) { | |
6367 | mlog_errno(ret); | |
6368 | goto out_commit; | |
6369 | } | |
6370 | ||
6371 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
6372 | } | |
6373 | ||
6374 | out_commit: | |
6375 | ocfs2_commit_trans(osb, handle); | |
6376 | ||
6377 | out_unlock: | |
6378 | if (data_ac) | |
6379 | ocfs2_free_alloc_context(data_ac); | |
6380 | ||
6381 | out: | |
6382 | if (pages) { | |
6383 | ocfs2_unlock_and_free_pages(pages, num_pages); | |
6384 | kfree(pages); | |
6385 | } | |
6386 | ||
6387 | return ret; | |
6388 | } | |
6389 | ||
ccd979bd MF |
6390 | /* |
6391 | * It is expected, that by the time you call this function, | |
6392 | * inode->i_size and fe->i_size have been adjusted. | |
6393 | * | |
6394 | * WARNING: This will kfree the truncate context | |
6395 | */ | |
6396 | int ocfs2_commit_truncate(struct ocfs2_super *osb, | |
6397 | struct inode *inode, | |
6398 | struct buffer_head *fe_bh, | |
6399 | struct ocfs2_truncate_context *tc) | |
6400 | { | |
6401 | int status, i, credits, tl_sem = 0; | |
dcd0538f | 6402 | u32 clusters_to_del, new_highest_cpos, range; |
ccd979bd | 6403 | struct ocfs2_extent_list *el; |
1fabe148 | 6404 | handle_t *handle = NULL; |
ccd979bd | 6405 | struct inode *tl_inode = osb->osb_tl_inode; |
dcd0538f | 6406 | struct ocfs2_path *path = NULL; |
ccd979bd MF |
6407 | |
6408 | mlog_entry_void(); | |
6409 | ||
dcd0538f | 6410 | new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, |
ccd979bd MF |
6411 | i_size_read(inode)); |
6412 | ||
dcd0538f MF |
6413 | path = ocfs2_new_inode_path(fe_bh); |
6414 | if (!path) { | |
6415 | status = -ENOMEM; | |
6416 | mlog_errno(status); | |
6417 | goto bail; | |
6418 | } | |
83418978 MF |
6419 | |
6420 | ocfs2_extent_map_trunc(inode, new_highest_cpos); | |
6421 | ||
ccd979bd | 6422 | start: |
3a0782d0 MF |
6423 | /* |
6424 | * Check that we still have allocation to delete. | |
6425 | */ | |
6426 | if (OCFS2_I(inode)->ip_clusters == 0) { | |
6427 | status = 0; | |
6428 | goto bail; | |
6429 | } | |
6430 | ||
dcd0538f MF |
6431 | /* |
6432 | * Truncate always works against the rightmost tree branch. | |
6433 | */ | |
6434 | status = ocfs2_find_path(inode, path, UINT_MAX); | |
6435 | if (status) { | |
6436 | mlog_errno(status); | |
6437 | goto bail; | |
ccd979bd MF |
6438 | } |
6439 | ||
dcd0538f MF |
6440 | mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", |
6441 | OCFS2_I(inode)->ip_clusters, path->p_tree_depth); | |
6442 | ||
6443 | /* | |
6444 | * By now, el will point to the extent list on the bottom most | |
6445 | * portion of this tree. Only the tail record is considered in | |
6446 | * each pass. | |
6447 | * | |
6448 | * We handle the following cases, in order: | |
6449 | * - empty extent: delete the remaining branch | |
6450 | * - remove the entire record | |
6451 | * - remove a partial record | |
6452 | * - no record needs to be removed (truncate has completed) | |
6453 | */ | |
6454 | el = path_leaf_el(path); | |
3a0782d0 MF |
6455 | if (le16_to_cpu(el->l_next_free_rec) == 0) { |
6456 | ocfs2_error(inode->i_sb, | |
6457 | "Inode %llu has empty extent block at %llu\n", | |
6458 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
6459 | (unsigned long long)path_leaf_bh(path)->b_blocknr); | |
6460 | status = -EROFS; | |
6461 | goto bail; | |
6462 | } | |
6463 | ||
ccd979bd | 6464 | i = le16_to_cpu(el->l_next_free_rec) - 1; |
dcd0538f | 6465 | range = le32_to_cpu(el->l_recs[i].e_cpos) + |
e48edee2 | 6466 | ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f MF |
6467 | if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) { |
6468 | clusters_to_del = 0; | |
6469 | } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) { | |
e48edee2 | 6470 | clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]); |
dcd0538f | 6471 | } else if (range > new_highest_cpos) { |
e48edee2 | 6472 | clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) + |
ccd979bd | 6473 | le32_to_cpu(el->l_recs[i].e_cpos)) - |
dcd0538f MF |
6474 | new_highest_cpos; |
6475 | } else { | |
6476 | status = 0; | |
6477 | goto bail; | |
6478 | } | |
ccd979bd | 6479 | |
dcd0538f MF |
6480 | mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n", |
6481 | clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr); | |
6482 | ||
1b1dcc1b | 6483 | mutex_lock(&tl_inode->i_mutex); |
ccd979bd MF |
6484 | tl_sem = 1; |
6485 | /* ocfs2_truncate_log_needs_flush guarantees us at least one | |
6486 | * record is free for use. If there isn't any, we flush to get | |
6487 | * an empty truncate log. */ | |
6488 | if (ocfs2_truncate_log_needs_flush(osb)) { | |
6489 | status = __ocfs2_flush_truncate_log(osb); | |
6490 | if (status < 0) { | |
6491 | mlog_errno(status); | |
6492 | goto bail; | |
6493 | } | |
6494 | } | |
6495 | ||
6496 | credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, | |
dcd0538f MF |
6497 | (struct ocfs2_dinode *)fe_bh->b_data, |
6498 | el); | |
65eff9cc | 6499 | handle = ocfs2_start_trans(osb, credits); |
ccd979bd MF |
6500 | if (IS_ERR(handle)) { |
6501 | status = PTR_ERR(handle); | |
6502 | handle = NULL; | |
6503 | mlog_errno(status); | |
6504 | goto bail; | |
6505 | } | |
6506 | ||
dcd0538f MF |
6507 | status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle, |
6508 | tc, path); | |
ccd979bd MF |
6509 | if (status < 0) { |
6510 | mlog_errno(status); | |
6511 | goto bail; | |
6512 | } | |
6513 | ||
1b1dcc1b | 6514 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
6515 | tl_sem = 0; |
6516 | ||
02dc1af4 | 6517 | ocfs2_commit_trans(osb, handle); |
ccd979bd MF |
6518 | handle = NULL; |
6519 | ||
dcd0538f MF |
6520 | ocfs2_reinit_path(path, 1); |
6521 | ||
6522 | /* | |
3a0782d0 MF |
6523 | * The check above will catch the case where we've truncated |
6524 | * away all allocation. | |
dcd0538f | 6525 | */ |
3a0782d0 MF |
6526 | goto start; |
6527 | ||
ccd979bd | 6528 | bail: |
ccd979bd MF |
6529 | |
6530 | ocfs2_schedule_truncate_log_flush(osb, 1); | |
6531 | ||
6532 | if (tl_sem) | |
1b1dcc1b | 6533 | mutex_unlock(&tl_inode->i_mutex); |
ccd979bd MF |
6534 | |
6535 | if (handle) | |
02dc1af4 | 6536 | ocfs2_commit_trans(osb, handle); |
ccd979bd | 6537 | |
59a5e416 MF |
6538 | ocfs2_run_deallocs(osb, &tc->tc_dealloc); |
6539 | ||
dcd0538f | 6540 | ocfs2_free_path(path); |
ccd979bd MF |
6541 | |
6542 | /* This will drop the ext_alloc cluster lock for us */ | |
6543 | ocfs2_free_truncate_context(tc); | |
6544 | ||
6545 | mlog_exit(status); | |
6546 | return status; | |
6547 | } | |
6548 | ||
ccd979bd | 6549 | /* |
59a5e416 | 6550 | * Expects the inode to already be locked. |
ccd979bd MF |
6551 | */ |
6552 | int ocfs2_prepare_truncate(struct ocfs2_super *osb, | |
6553 | struct inode *inode, | |
6554 | struct buffer_head *fe_bh, | |
6555 | struct ocfs2_truncate_context **tc) | |
6556 | { | |
59a5e416 | 6557 | int status; |
ccd979bd MF |
6558 | unsigned int new_i_clusters; |
6559 | struct ocfs2_dinode *fe; | |
6560 | struct ocfs2_extent_block *eb; | |
ccd979bd | 6561 | struct buffer_head *last_eb_bh = NULL; |
ccd979bd MF |
6562 | |
6563 | mlog_entry_void(); | |
6564 | ||
6565 | *tc = NULL; | |
6566 | ||
6567 | new_i_clusters = ocfs2_clusters_for_bytes(osb->sb, | |
6568 | i_size_read(inode)); | |
6569 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | |
6570 | ||
6571 | mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size =" | |
1ca1a111 MF |
6572 | "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters, |
6573 | (unsigned long long)le64_to_cpu(fe->i_size)); | |
ccd979bd | 6574 | |
cd861280 | 6575 | *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); |
ccd979bd MF |
6576 | if (!(*tc)) { |
6577 | status = -ENOMEM; | |
6578 | mlog_errno(status); | |
6579 | goto bail; | |
6580 | } | |
59a5e416 | 6581 | ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); |
ccd979bd | 6582 | |
ccd979bd | 6583 | if (fe->id2.i_list.l_tree_depth) { |
ccd979bd MF |
6584 | status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk), |
6585 | &last_eb_bh, OCFS2_BH_CACHED, inode); | |
6586 | if (status < 0) { | |
6587 | mlog_errno(status); | |
6588 | goto bail; | |
6589 | } | |
6590 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | |
6591 | if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { | |
6592 | OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); | |
6593 | ||
6594 | brelse(last_eb_bh); | |
6595 | status = -EIO; | |
6596 | goto bail; | |
6597 | } | |
ccd979bd MF |
6598 | } |
6599 | ||
6600 | (*tc)->tc_last_eb_bh = last_eb_bh; | |
6601 | ||
ccd979bd MF |
6602 | status = 0; |
6603 | bail: | |
6604 | if (status < 0) { | |
6605 | if (*tc) | |
6606 | ocfs2_free_truncate_context(*tc); | |
6607 | *tc = NULL; | |
6608 | } | |
6609 | mlog_exit_void(); | |
6610 | return status; | |
6611 | } | |
6612 | ||
1afc32b9 MF |
6613 | /* |
6614 | * 'start' is inclusive, 'end' is not. | |
6615 | */ | |
6616 | int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, | |
6617 | unsigned int start, unsigned int end, int trunc) | |
6618 | { | |
6619 | int ret; | |
6620 | unsigned int numbytes; | |
6621 | handle_t *handle; | |
6622 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | |
6623 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | |
6624 | struct ocfs2_inline_data *idata = &di->id2.i_data; | |
6625 | ||
6626 | if (end > i_size_read(inode)) | |
6627 | end = i_size_read(inode); | |
6628 | ||
6629 | BUG_ON(start >= end); | |
6630 | ||
6631 | if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || | |
6632 | !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || | |
6633 | !ocfs2_supports_inline_data(osb)) { | |
6634 | ocfs2_error(inode->i_sb, | |
6635 | "Inline data flags for inode %llu don't agree! " | |
6636 | "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n", | |
6637 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | |
6638 | le16_to_cpu(di->i_dyn_features), | |
6639 | OCFS2_I(inode)->ip_dyn_features, | |
6640 | osb->s_feature_incompat); | |
6641 | ret = -EROFS; | |
6642 | goto out; | |
6643 | } | |
6644 | ||
6645 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | |
6646 | if (IS_ERR(handle)) { | |
6647 | ret = PTR_ERR(handle); | |
6648 | mlog_errno(ret); | |
6649 | goto out; | |
6650 | } | |
6651 | ||
6652 | ret = ocfs2_journal_access(handle, inode, di_bh, | |
6653 | OCFS2_JOURNAL_ACCESS_WRITE); | |
6654 | if (ret) { | |
6655 | mlog_errno(ret); | |
6656 | goto out_commit; | |
6657 | } | |
6658 | ||
6659 | numbytes = end - start; | |
6660 | memset(idata->id_data + start, 0, numbytes); | |
6661 | ||
6662 | /* | |
6663 | * No need to worry about the data page here - it's been | |
6664 | * truncated already and inline data doesn't need it for | |
6665 | * pushing zero's to disk, so we'll let readpage pick it up | |
6666 | * later. | |
6667 | */ | |
6668 | if (trunc) { | |
6669 | i_size_write(inode, start); | |
6670 | di->i_size = cpu_to_le64(start); | |
6671 | } | |
6672 | ||
6673 | inode->i_blocks = ocfs2_inode_sector_count(inode); | |
6674 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | |
6675 | ||
6676 | di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); | |
6677 | di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | |
6678 | ||
6679 | ocfs2_journal_dirty(handle, di_bh); | |
6680 | ||
6681 | out_commit: | |
6682 | ocfs2_commit_trans(osb, handle); | |
6683 | ||
6684 | out: | |
6685 | return ret; | |
6686 | } | |
6687 | ||
ccd979bd MF |
6688 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc) |
6689 | { | |
59a5e416 MF |
6690 | /* |
6691 | * The caller is responsible for completing deallocation | |
6692 | * before freeing the context. | |
6693 | */ | |
6694 | if (tc->tc_dealloc.c_first_suballocator != NULL) | |
6695 | mlog(ML_NOTICE, | |
6696 | "Truncate completion has non-empty dealloc context\n"); | |
ccd979bd MF |
6697 | |
6698 | if (tc->tc_last_eb_bh) | |
6699 | brelse(tc->tc_last_eb_bh); | |
6700 | ||
6701 | kfree(tc); | |
6702 | } |