]>
Commit | Line | Data |
---|---|---|
6cbd5570 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
8f18cf13 | 19 | #include <linux/kernel.h> |
065631f6 | 20 | #include <linux/bio.h> |
39279cc3 | 21 | #include <linux/buffer_head.h> |
f2eb0a24 | 22 | #include <linux/file.h> |
39279cc3 CM |
23 | #include <linux/fs.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/smp_lock.h> | |
30 | #include <linux/backing-dev.h> | |
31 | #include <linux/mpage.h> | |
32 | #include <linux/swap.h> | |
33 | #include <linux/writeback.h> | |
34 | #include <linux/statfs.h> | |
35 | #include <linux/compat.h> | |
9ebefb18 | 36 | #include <linux/bit_spinlock.h> |
92fee66d | 37 | #include <linux/version.h> |
5103e947 | 38 | #include <linux/xattr.h> |
33268eaf | 39 | #include <linux/posix_acl.h> |
d899e052 | 40 | #include <linux/falloc.h> |
39279cc3 CM |
41 | #include "ctree.h" |
42 | #include "disk-io.h" | |
43 | #include "transaction.h" | |
44 | #include "btrfs_inode.h" | |
45 | #include "ioctl.h" | |
46 | #include "print-tree.h" | |
0b86a832 | 47 | #include "volumes.h" |
e6dcd2dc | 48 | #include "ordered-data.h" |
95819c05 | 49 | #include "xattr.h" |
e02119d5 CM |
50 | #include "compat.h" |
51 | #include "tree-log.h" | |
5b84e8d6 | 52 | #include "ref-cache.h" |
c8b97818 | 53 | #include "compression.h" |
39279cc3 CM |
54 | |
55 | struct btrfs_iget_args { | |
56 | u64 ino; | |
57 | struct btrfs_root *root; | |
58 | }; | |
59 | ||
60 | static struct inode_operations btrfs_dir_inode_operations; | |
61 | static struct inode_operations btrfs_symlink_inode_operations; | |
62 | static struct inode_operations btrfs_dir_ro_inode_operations; | |
618e21d5 | 63 | static struct inode_operations btrfs_special_inode_operations; |
39279cc3 CM |
64 | static struct inode_operations btrfs_file_inode_operations; |
65 | static struct address_space_operations btrfs_aops; | |
66 | static struct address_space_operations btrfs_symlink_aops; | |
67 | static struct file_operations btrfs_dir_file_operations; | |
d1310b2e | 68 | static struct extent_io_ops btrfs_extent_io_ops; |
39279cc3 CM |
69 | |
70 | static struct kmem_cache *btrfs_inode_cachep; | |
71 | struct kmem_cache *btrfs_trans_handle_cachep; | |
72 | struct kmem_cache *btrfs_transaction_cachep; | |
73 | struct kmem_cache *btrfs_bit_radix_cachep; | |
74 | struct kmem_cache *btrfs_path_cachep; | |
75 | ||
76 | #define S_SHIFT 12 | |
77 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |
78 | [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, | |
79 | [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, | |
80 | [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, | |
81 | [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, | |
82 | [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, | |
83 | [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, | |
84 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | |
85 | }; | |
86 | ||
7b128766 | 87 | static void btrfs_truncate(struct inode *inode); |
c8b97818 | 88 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); |
771ed689 CM |
89 | static noinline int cow_file_range(struct inode *inode, |
90 | struct page *locked_page, | |
91 | u64 start, u64 end, int *page_started, | |
92 | unsigned long *nr_written, int unlock); | |
7b128766 | 93 | |
d352ac68 CM |
94 | /* |
95 | * a very lame attempt at stopping writes when the FS is 85% full. There | |
96 | * are countless ways this is incorrect, but it is better than nothing. | |
97 | */ | |
1832a6d5 CM |
98 | int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, |
99 | int for_del) | |
100 | { | |
a2135011 CM |
101 | u64 total; |
102 | u64 used; | |
1832a6d5 | 103 | u64 thresh; |
bcbfce8a | 104 | unsigned long flags; |
1832a6d5 CM |
105 | int ret = 0; |
106 | ||
a2135011 CM |
107 | spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); |
108 | total = btrfs_super_total_bytes(&root->fs_info->super_copy); | |
109 | used = btrfs_super_bytes_used(&root->fs_info->super_copy); | |
1832a6d5 | 110 | if (for_del) |
f9ef6604 | 111 | thresh = total * 90; |
1832a6d5 | 112 | else |
f9ef6604 CM |
113 | thresh = total * 85; |
114 | ||
115 | do_div(thresh, 100); | |
1832a6d5 | 116 | |
1832a6d5 CM |
117 | if (used + root->fs_info->delalloc_bytes + num_required > thresh) |
118 | ret = -ENOSPC; | |
bcbfce8a | 119 | spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); |
1832a6d5 CM |
120 | return ret; |
121 | } | |
122 | ||
c8b97818 CM |
123 | /* |
124 | * this does all the hard work for inserting an inline extent into | |
125 | * the btree. The caller should have done a btrfs_drop_extents so that | |
126 | * no overlapping inline items exist in the btree | |
127 | */ | |
128 | static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, | |
129 | struct btrfs_root *root, struct inode *inode, | |
130 | u64 start, size_t size, size_t compressed_size, | |
131 | struct page **compressed_pages) | |
132 | { | |
133 | struct btrfs_key key; | |
134 | struct btrfs_path *path; | |
135 | struct extent_buffer *leaf; | |
136 | struct page *page = NULL; | |
137 | char *kaddr; | |
138 | unsigned long ptr; | |
139 | struct btrfs_file_extent_item *ei; | |
140 | int err = 0; | |
141 | int ret; | |
142 | size_t cur_size = size; | |
143 | size_t datasize; | |
144 | unsigned long offset; | |
145 | int use_compress = 0; | |
146 | ||
147 | if (compressed_size && compressed_pages) { | |
148 | use_compress = 1; | |
149 | cur_size = compressed_size; | |
150 | } | |
151 | ||
152 | path = btrfs_alloc_path(); if (!path) | |
153 | return -ENOMEM; | |
154 | ||
155 | btrfs_set_trans_block_group(trans, inode); | |
156 | ||
157 | key.objectid = inode->i_ino; | |
158 | key.offset = start; | |
159 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | |
160 | inode_add_bytes(inode, size); | |
161 | datasize = btrfs_file_extent_calc_inline_size(cur_size); | |
162 | ||
163 | inode_add_bytes(inode, size); | |
164 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
165 | datasize); | |
166 | BUG_ON(ret); | |
167 | if (ret) { | |
168 | err = ret; | |
169 | printk("got bad ret %d\n", ret); | |
170 | goto fail; | |
171 | } | |
172 | leaf = path->nodes[0]; | |
173 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
174 | struct btrfs_file_extent_item); | |
175 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
176 | btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); | |
177 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
178 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
179 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | |
180 | ptr = btrfs_file_extent_inline_start(ei); | |
181 | ||
182 | if (use_compress) { | |
183 | struct page *cpage; | |
184 | int i = 0; | |
185 | while(compressed_size > 0) { | |
186 | cpage = compressed_pages[i]; | |
187 | cur_size = min(compressed_size, | |
188 | PAGE_CACHE_SIZE); | |
189 | ||
190 | kaddr = kmap(cpage); | |
191 | write_extent_buffer(leaf, kaddr, ptr, cur_size); | |
192 | kunmap(cpage); | |
193 | ||
194 | i++; | |
195 | ptr += cur_size; | |
196 | compressed_size -= cur_size; | |
197 | } | |
198 | btrfs_set_file_extent_compression(leaf, ei, | |
199 | BTRFS_COMPRESS_ZLIB); | |
200 | } else { | |
201 | page = find_get_page(inode->i_mapping, | |
202 | start >> PAGE_CACHE_SHIFT); | |
203 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
204 | kaddr = kmap_atomic(page, KM_USER0); | |
205 | offset = start & (PAGE_CACHE_SIZE - 1); | |
206 | write_extent_buffer(leaf, kaddr + offset, ptr, size); | |
207 | kunmap_atomic(kaddr, KM_USER0); | |
208 | page_cache_release(page); | |
209 | } | |
210 | btrfs_mark_buffer_dirty(leaf); | |
211 | btrfs_free_path(path); | |
212 | ||
213 | BTRFS_I(inode)->disk_i_size = inode->i_size; | |
214 | btrfs_update_inode(trans, root, inode); | |
215 | return 0; | |
216 | fail: | |
217 | btrfs_free_path(path); | |
218 | return err; | |
219 | } | |
220 | ||
221 | ||
222 | /* | |
223 | * conditionally insert an inline extent into the file. This | |
224 | * does the checks required to make sure the data is small enough | |
225 | * to fit as an inline extent. | |
226 | */ | |
227 | static int cow_file_range_inline(struct btrfs_trans_handle *trans, | |
228 | struct btrfs_root *root, | |
229 | struct inode *inode, u64 start, u64 end, | |
230 | size_t compressed_size, | |
231 | struct page **compressed_pages) | |
232 | { | |
233 | u64 isize = i_size_read(inode); | |
234 | u64 actual_end = min(end + 1, isize); | |
235 | u64 inline_len = actual_end - start; | |
236 | u64 aligned_end = (end + root->sectorsize - 1) & | |
237 | ~((u64)root->sectorsize - 1); | |
238 | u64 hint_byte; | |
239 | u64 data_len = inline_len; | |
240 | int ret; | |
241 | ||
242 | if (compressed_size) | |
243 | data_len = compressed_size; | |
244 | ||
245 | if (start > 0 || | |
70b99e69 | 246 | actual_end >= PAGE_CACHE_SIZE || |
c8b97818 CM |
247 | data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || |
248 | (!compressed_size && | |
249 | (actual_end & (root->sectorsize - 1)) == 0) || | |
250 | end + 1 < isize || | |
251 | data_len > root->fs_info->max_inline) { | |
252 | return 1; | |
253 | } | |
254 | ||
c8b97818 | 255 | ret = btrfs_drop_extents(trans, root, inode, start, |
70b99e69 | 256 | aligned_end, start, &hint_byte); |
c8b97818 CM |
257 | BUG_ON(ret); |
258 | ||
259 | if (isize > actual_end) | |
260 | inline_len = min_t(u64, isize, actual_end); | |
261 | ret = insert_inline_extent(trans, root, inode, start, | |
262 | inline_len, compressed_size, | |
263 | compressed_pages); | |
264 | BUG_ON(ret); | |
265 | btrfs_drop_extent_cache(inode, start, aligned_end, 0); | |
c8b97818 CM |
266 | return 0; |
267 | } | |
268 | ||
771ed689 CM |
269 | struct async_extent { |
270 | u64 start; | |
271 | u64 ram_size; | |
272 | u64 compressed_size; | |
273 | struct page **pages; | |
274 | unsigned long nr_pages; | |
275 | struct list_head list; | |
276 | }; | |
277 | ||
278 | struct async_cow { | |
279 | struct inode *inode; | |
280 | struct btrfs_root *root; | |
281 | struct page *locked_page; | |
282 | u64 start; | |
283 | u64 end; | |
284 | struct list_head extents; | |
285 | struct btrfs_work work; | |
286 | }; | |
287 | ||
288 | static noinline int add_async_extent(struct async_cow *cow, | |
289 | u64 start, u64 ram_size, | |
290 | u64 compressed_size, | |
291 | struct page **pages, | |
292 | unsigned long nr_pages) | |
293 | { | |
294 | struct async_extent *async_extent; | |
295 | ||
296 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | |
297 | async_extent->start = start; | |
298 | async_extent->ram_size = ram_size; | |
299 | async_extent->compressed_size = compressed_size; | |
300 | async_extent->pages = pages; | |
301 | async_extent->nr_pages = nr_pages; | |
302 | list_add_tail(&async_extent->list, &cow->extents); | |
303 | return 0; | |
304 | } | |
305 | ||
d352ac68 | 306 | /* |
771ed689 CM |
307 | * we create compressed extents in two phases. The first |
308 | * phase compresses a range of pages that have already been | |
309 | * locked (both pages and state bits are locked). | |
c8b97818 | 310 | * |
771ed689 CM |
311 | * This is done inside an ordered work queue, and the compression |
312 | * is spread across many cpus. The actual IO submission is step | |
313 | * two, and the ordered work queue takes care of making sure that | |
314 | * happens in the same order things were put onto the queue by | |
315 | * writepages and friends. | |
c8b97818 | 316 | * |
771ed689 CM |
317 | * If this code finds it can't get good compression, it puts an |
318 | * entry onto the work queue to write the uncompressed bytes. This | |
319 | * makes sure that both compressed inodes and uncompressed inodes | |
320 | * are written in the same order that pdflush sent them down. | |
d352ac68 | 321 | */ |
771ed689 CM |
322 | static noinline int compress_file_range(struct inode *inode, |
323 | struct page *locked_page, | |
324 | u64 start, u64 end, | |
325 | struct async_cow *async_cow, | |
326 | int *num_added) | |
b888db2b CM |
327 | { |
328 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
329 | struct btrfs_trans_handle *trans; | |
db94535d | 330 | u64 num_bytes; |
c8b97818 CM |
331 | u64 orig_start; |
332 | u64 disk_num_bytes; | |
db94535d | 333 | u64 blocksize = root->sectorsize; |
c8b97818 | 334 | u64 actual_end; |
e6dcd2dc | 335 | int ret = 0; |
c8b97818 CM |
336 | struct page **pages = NULL; |
337 | unsigned long nr_pages; | |
338 | unsigned long nr_pages_ret = 0; | |
339 | unsigned long total_compressed = 0; | |
340 | unsigned long total_in = 0; | |
341 | unsigned long max_compressed = 128 * 1024; | |
771ed689 | 342 | unsigned long max_uncompressed = 128 * 1024; |
c8b97818 CM |
343 | int i; |
344 | int will_compress; | |
b888db2b | 345 | |
c8b97818 CM |
346 | orig_start = start; |
347 | ||
c8b97818 CM |
348 | again: |
349 | will_compress = 0; | |
350 | nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; | |
351 | nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); | |
be20aa9d | 352 | |
c8b97818 CM |
353 | actual_end = min_t(u64, i_size_read(inode), end + 1); |
354 | total_compressed = actual_end - start; | |
355 | ||
356 | /* we want to make sure that amount of ram required to uncompress | |
357 | * an extent is reasonable, so we limit the total size in ram | |
771ed689 CM |
358 | * of a compressed extent to 128k. This is a crucial number |
359 | * because it also controls how easily we can spread reads across | |
360 | * cpus for decompression. | |
361 | * | |
362 | * We also want to make sure the amount of IO required to do | |
363 | * a random read is reasonably small, so we limit the size of | |
364 | * a compressed extent to 128k. | |
c8b97818 CM |
365 | */ |
366 | total_compressed = min(total_compressed, max_uncompressed); | |
db94535d | 367 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); |
be20aa9d | 368 | num_bytes = max(blocksize, num_bytes); |
c8b97818 CM |
369 | disk_num_bytes = num_bytes; |
370 | total_in = 0; | |
371 | ret = 0; | |
db94535d | 372 | |
771ed689 CM |
373 | /* |
374 | * we do compression for mount -o compress and when the | |
375 | * inode has not been flagged as nocompress. This flag can | |
376 | * change at any time if we discover bad compression ratios. | |
c8b97818 CM |
377 | */ |
378 | if (!btrfs_test_flag(inode, NOCOMPRESS) && | |
379 | btrfs_test_opt(root, COMPRESS)) { | |
380 | WARN_ON(pages); | |
cfbc246e | 381 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
c8b97818 | 382 | |
c8b97818 CM |
383 | ret = btrfs_zlib_compress_pages(inode->i_mapping, start, |
384 | total_compressed, pages, | |
385 | nr_pages, &nr_pages_ret, | |
386 | &total_in, | |
387 | &total_compressed, | |
388 | max_compressed); | |
389 | ||
390 | if (!ret) { | |
391 | unsigned long offset = total_compressed & | |
392 | (PAGE_CACHE_SIZE - 1); | |
393 | struct page *page = pages[nr_pages_ret - 1]; | |
394 | char *kaddr; | |
395 | ||
396 | /* zero the tail end of the last page, we might be | |
397 | * sending it down to disk | |
398 | */ | |
399 | if (offset) { | |
400 | kaddr = kmap_atomic(page, KM_USER0); | |
401 | memset(kaddr + offset, 0, | |
402 | PAGE_CACHE_SIZE - offset); | |
403 | kunmap_atomic(kaddr, KM_USER0); | |
404 | } | |
405 | will_compress = 1; | |
406 | } | |
407 | } | |
408 | if (start == 0) { | |
771ed689 CM |
409 | trans = btrfs_join_transaction(root, 1); |
410 | BUG_ON(!trans); | |
411 | btrfs_set_trans_block_group(trans, inode); | |
412 | ||
c8b97818 | 413 | /* lets try to make an inline extent */ |
771ed689 | 414 | if (ret || total_in < (actual_end - start)) { |
c8b97818 | 415 | /* we didn't compress the entire range, try |
771ed689 | 416 | * to make an uncompressed inline extent. |
c8b97818 CM |
417 | */ |
418 | ret = cow_file_range_inline(trans, root, inode, | |
419 | start, end, 0, NULL); | |
420 | } else { | |
771ed689 | 421 | /* try making a compressed inline extent */ |
c8b97818 CM |
422 | ret = cow_file_range_inline(trans, root, inode, |
423 | start, end, | |
424 | total_compressed, pages); | |
425 | } | |
771ed689 | 426 | btrfs_end_transaction(trans, root); |
c8b97818 | 427 | if (ret == 0) { |
771ed689 CM |
428 | /* |
429 | * inline extent creation worked, we don't need | |
430 | * to create any more async work items. Unlock | |
431 | * and free up our temp pages. | |
432 | */ | |
c8b97818 CM |
433 | extent_clear_unlock_delalloc(inode, |
434 | &BTRFS_I(inode)->io_tree, | |
771ed689 CM |
435 | start, end, NULL, 1, 0, |
436 | 0, 1, 1, 1); | |
c8b97818 CM |
437 | ret = 0; |
438 | goto free_pages_out; | |
439 | } | |
440 | } | |
441 | ||
442 | if (will_compress) { | |
443 | /* | |
444 | * we aren't doing an inline extent round the compressed size | |
445 | * up to a block size boundary so the allocator does sane | |
446 | * things | |
447 | */ | |
448 | total_compressed = (total_compressed + blocksize - 1) & | |
449 | ~(blocksize - 1); | |
450 | ||
451 | /* | |
452 | * one last check to make sure the compression is really a | |
453 | * win, compare the page count read with the blocks on disk | |
454 | */ | |
455 | total_in = (total_in + PAGE_CACHE_SIZE - 1) & | |
456 | ~(PAGE_CACHE_SIZE - 1); | |
457 | if (total_compressed >= total_in) { | |
458 | will_compress = 0; | |
459 | } else { | |
460 | disk_num_bytes = total_compressed; | |
461 | num_bytes = total_in; | |
462 | } | |
463 | } | |
464 | if (!will_compress && pages) { | |
465 | /* | |
466 | * the compression code ran but failed to make things smaller, | |
467 | * free any pages it allocated and our page pointer array | |
468 | */ | |
469 | for (i = 0; i < nr_pages_ret; i++) { | |
70b99e69 | 470 | WARN_ON(pages[i]->mapping); |
c8b97818 CM |
471 | page_cache_release(pages[i]); |
472 | } | |
473 | kfree(pages); | |
474 | pages = NULL; | |
475 | total_compressed = 0; | |
476 | nr_pages_ret = 0; | |
477 | ||
478 | /* flag the file so we don't compress in the future */ | |
479 | btrfs_set_flag(inode, NOCOMPRESS); | |
480 | } | |
771ed689 CM |
481 | if (will_compress) { |
482 | *num_added += 1; | |
c8b97818 | 483 | |
771ed689 CM |
484 | /* the async work queues will take care of doing actual |
485 | * allocation on disk for these compressed pages, | |
486 | * and will submit them to the elevator. | |
487 | */ | |
488 | add_async_extent(async_cow, start, num_bytes, | |
489 | total_compressed, pages, nr_pages_ret); | |
179e29e4 | 490 | |
771ed689 CM |
491 | if (start + num_bytes < end) { |
492 | start += num_bytes; | |
493 | pages = NULL; | |
494 | cond_resched(); | |
495 | goto again; | |
496 | } | |
497 | } else { | |
498 | /* | |
499 | * No compression, but we still need to write the pages in | |
500 | * the file we've been given so far. redirty the locked | |
501 | * page if it corresponds to our extent and set things up | |
502 | * for the async work queue to run cow_file_range to do | |
503 | * the normal delalloc dance | |
504 | */ | |
505 | if (page_offset(locked_page) >= start && | |
506 | page_offset(locked_page) <= end) { | |
507 | __set_page_dirty_nobuffers(locked_page); | |
508 | /* unlocked later on in the async handlers */ | |
509 | } | |
510 | add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); | |
511 | *num_added += 1; | |
512 | } | |
3b951516 | 513 | |
771ed689 CM |
514 | out: |
515 | return 0; | |
516 | ||
517 | free_pages_out: | |
518 | for (i = 0; i < nr_pages_ret; i++) { | |
519 | WARN_ON(pages[i]->mapping); | |
520 | page_cache_release(pages[i]); | |
521 | } | |
522 | if (pages) | |
523 | kfree(pages); | |
524 | ||
525 | goto out; | |
526 | } | |
527 | ||
528 | /* | |
529 | * phase two of compressed writeback. This is the ordered portion | |
530 | * of the code, which only gets called in the order the work was | |
531 | * queued. We walk all the async extents created by compress_file_range | |
532 | * and send them down to the disk. | |
533 | */ | |
534 | static noinline int submit_compressed_extents(struct inode *inode, | |
535 | struct async_cow *async_cow) | |
536 | { | |
537 | struct async_extent *async_extent; | |
538 | u64 alloc_hint = 0; | |
539 | struct btrfs_trans_handle *trans; | |
540 | struct btrfs_key ins; | |
541 | struct extent_map *em; | |
542 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
543 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
544 | struct extent_io_tree *io_tree; | |
545 | int ret; | |
546 | ||
547 | if (list_empty(&async_cow->extents)) | |
548 | return 0; | |
549 | ||
550 | trans = btrfs_join_transaction(root, 1); | |
551 | ||
552 | while(!list_empty(&async_cow->extents)) { | |
553 | async_extent = list_entry(async_cow->extents.next, | |
554 | struct async_extent, list); | |
555 | list_del(&async_extent->list); | |
c8b97818 | 556 | |
771ed689 CM |
557 | io_tree = &BTRFS_I(inode)->io_tree; |
558 | ||
559 | /* did the compression code fall back to uncompressed IO? */ | |
560 | if (!async_extent->pages) { | |
561 | int page_started = 0; | |
562 | unsigned long nr_written = 0; | |
563 | ||
564 | lock_extent(io_tree, async_extent->start, | |
565 | async_extent->start + async_extent->ram_size - 1, | |
566 | GFP_NOFS); | |
567 | ||
568 | /* allocate blocks */ | |
569 | cow_file_range(inode, async_cow->locked_page, | |
570 | async_extent->start, | |
571 | async_extent->start + | |
572 | async_extent->ram_size - 1, | |
573 | &page_started, &nr_written, 0); | |
574 | ||
575 | /* | |
576 | * if page_started, cow_file_range inserted an | |
577 | * inline extent and took care of all the unlocking | |
578 | * and IO for us. Otherwise, we need to submit | |
579 | * all those pages down to the drive. | |
580 | */ | |
581 | if (!page_started) | |
582 | extent_write_locked_range(io_tree, | |
583 | inode, async_extent->start, | |
584 | async_extent->start + | |
585 | async_extent->ram_size - 1, | |
586 | btrfs_get_extent, | |
587 | WB_SYNC_ALL); | |
588 | kfree(async_extent); | |
589 | cond_resched(); | |
590 | continue; | |
591 | } | |
592 | ||
593 | lock_extent(io_tree, async_extent->start, | |
594 | async_extent->start + async_extent->ram_size - 1, | |
595 | GFP_NOFS); | |
c8b97818 | 596 | /* |
771ed689 CM |
597 | * here we're doing allocation and writeback of the |
598 | * compressed pages | |
c8b97818 | 599 | */ |
771ed689 CM |
600 | btrfs_drop_extent_cache(inode, async_extent->start, |
601 | async_extent->start + | |
602 | async_extent->ram_size - 1, 0); | |
603 | ||
604 | ret = btrfs_reserve_extent(trans, root, | |
605 | async_extent->compressed_size, | |
606 | async_extent->compressed_size, | |
607 | 0, alloc_hint, | |
608 | (u64)-1, &ins, 1); | |
609 | BUG_ON(ret); | |
610 | em = alloc_extent_map(GFP_NOFS); | |
611 | em->start = async_extent->start; | |
612 | em->len = async_extent->ram_size; | |
c8b97818 | 613 | |
771ed689 CM |
614 | em->block_start = ins.objectid; |
615 | em->block_len = ins.offset; | |
616 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
617 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
618 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
619 | ||
620 | while(1) { | |
621 | spin_lock(&em_tree->lock); | |
622 | ret = add_extent_mapping(em_tree, em); | |
623 | spin_unlock(&em_tree->lock); | |
624 | if (ret != -EEXIST) { | |
625 | free_extent_map(em); | |
626 | break; | |
627 | } | |
628 | btrfs_drop_extent_cache(inode, async_extent->start, | |
629 | async_extent->start + | |
630 | async_extent->ram_size - 1, 0); | |
631 | } | |
632 | ||
633 | ret = btrfs_add_ordered_extent(inode, async_extent->start, | |
634 | ins.objectid, | |
635 | async_extent->ram_size, | |
636 | ins.offset, | |
637 | BTRFS_ORDERED_COMPRESSED); | |
638 | BUG_ON(ret); | |
639 | ||
640 | btrfs_end_transaction(trans, root); | |
641 | ||
642 | /* | |
643 | * clear dirty, set writeback and unlock the pages. | |
644 | */ | |
645 | extent_clear_unlock_delalloc(inode, | |
646 | &BTRFS_I(inode)->io_tree, | |
647 | async_extent->start, | |
648 | async_extent->start + | |
649 | async_extent->ram_size - 1, | |
650 | NULL, 1, 1, 0, 1, 1, 0); | |
651 | ||
652 | ret = btrfs_submit_compressed_write(inode, | |
653 | async_extent->start, | |
654 | async_extent->ram_size, | |
655 | ins.objectid, | |
656 | ins.offset, async_extent->pages, | |
657 | async_extent->nr_pages); | |
658 | ||
659 | BUG_ON(ret); | |
660 | trans = btrfs_join_transaction(root, 1); | |
661 | alloc_hint = ins.objectid + ins.offset; | |
662 | kfree(async_extent); | |
663 | cond_resched(); | |
664 | } | |
665 | ||
666 | btrfs_end_transaction(trans, root); | |
667 | return 0; | |
668 | } | |
669 | ||
670 | /* | |
671 | * when extent_io.c finds a delayed allocation range in the file, | |
672 | * the call backs end up in this code. The basic idea is to | |
673 | * allocate extents on disk for the range, and create ordered data structs | |
674 | * in ram to track those extents. | |
675 | * | |
676 | * locked_page is the page that writepage had locked already. We use | |
677 | * it to make sure we don't do extra locks or unlocks. | |
678 | * | |
679 | * *page_started is set to one if we unlock locked_page and do everything | |
680 | * required to start IO on it. It may be clean and already done with | |
681 | * IO when we return. | |
682 | */ | |
683 | static noinline int cow_file_range(struct inode *inode, | |
684 | struct page *locked_page, | |
685 | u64 start, u64 end, int *page_started, | |
686 | unsigned long *nr_written, | |
687 | int unlock) | |
688 | { | |
689 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
690 | struct btrfs_trans_handle *trans; | |
691 | u64 alloc_hint = 0; | |
692 | u64 num_bytes; | |
693 | unsigned long ram_size; | |
694 | u64 disk_num_bytes; | |
695 | u64 cur_alloc_size; | |
696 | u64 blocksize = root->sectorsize; | |
697 | u64 actual_end; | |
698 | struct btrfs_key ins; | |
699 | struct extent_map *em; | |
700 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
701 | int ret = 0; | |
702 | ||
703 | trans = btrfs_join_transaction(root, 1); | |
704 | BUG_ON(!trans); | |
705 | btrfs_set_trans_block_group(trans, inode); | |
706 | ||
707 | actual_end = min_t(u64, i_size_read(inode), end + 1); | |
708 | ||
709 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); | |
710 | num_bytes = max(blocksize, num_bytes); | |
711 | disk_num_bytes = num_bytes; | |
712 | ret = 0; | |
713 | ||
714 | if (start == 0) { | |
715 | /* lets try to make an inline extent */ | |
716 | ret = cow_file_range_inline(trans, root, inode, | |
717 | start, end, 0, NULL); | |
718 | if (ret == 0) { | |
719 | extent_clear_unlock_delalloc(inode, | |
720 | &BTRFS_I(inode)->io_tree, | |
721 | start, end, NULL, 1, 1, | |
722 | 1, 1, 1, 1); | |
723 | *nr_written = *nr_written + | |
724 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | |
725 | *page_started = 1; | |
726 | ret = 0; | |
727 | goto out; | |
728 | } | |
729 | } | |
730 | ||
731 | BUG_ON(disk_num_bytes > | |
732 | btrfs_super_total_bytes(&root->fs_info->super_copy)); | |
733 | ||
734 | btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); | |
735 | ||
736 | while(disk_num_bytes > 0) { | |
c8b97818 | 737 | cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); |
e6dcd2dc | 738 | ret = btrfs_reserve_extent(trans, root, cur_alloc_size, |
771ed689 | 739 | root->sectorsize, 0, alloc_hint, |
e6dcd2dc | 740 | (u64)-1, &ins, 1); |
c59f8951 | 741 | if (ret) { |
771ed689 | 742 | BUG(); |
c59f8951 | 743 | } |
e6dcd2dc CM |
744 | em = alloc_extent_map(GFP_NOFS); |
745 | em->start = start; | |
c8b97818 | 746 | |
771ed689 CM |
747 | ram_size = ins.offset; |
748 | em->len = ins.offset; | |
c8b97818 | 749 | |
e6dcd2dc | 750 | em->block_start = ins.objectid; |
c8b97818 | 751 | em->block_len = ins.offset; |
e6dcd2dc | 752 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
7f3c74fb | 753 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
c8b97818 | 754 | |
e6dcd2dc CM |
755 | while(1) { |
756 | spin_lock(&em_tree->lock); | |
757 | ret = add_extent_mapping(em_tree, em); | |
758 | spin_unlock(&em_tree->lock); | |
759 | if (ret != -EEXIST) { | |
760 | free_extent_map(em); | |
761 | break; | |
762 | } | |
763 | btrfs_drop_extent_cache(inode, start, | |
c8b97818 | 764 | start + ram_size - 1, 0); |
e6dcd2dc CM |
765 | } |
766 | ||
98d20f67 | 767 | cur_alloc_size = ins.offset; |
e6dcd2dc | 768 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
771ed689 | 769 | ram_size, cur_alloc_size, 0); |
e6dcd2dc | 770 | BUG_ON(ret); |
c8b97818 CM |
771 | |
772 | if (disk_num_bytes < cur_alloc_size) { | |
773 | printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, | |
3b951516 CM |
774 | cur_alloc_size); |
775 | break; | |
776 | } | |
c8b97818 CM |
777 | /* we're not doing compressed IO, don't unlock the first |
778 | * page (which the caller expects to stay locked), don't | |
779 | * clear any dirty bits and don't set any writeback bits | |
780 | */ | |
781 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, | |
782 | start, start + ram_size - 1, | |
771ed689 CM |
783 | locked_page, unlock, 1, |
784 | 1, 0, 0, 0); | |
c8b97818 | 785 | disk_num_bytes -= cur_alloc_size; |
c59f8951 CM |
786 | num_bytes -= cur_alloc_size; |
787 | alloc_hint = ins.objectid + ins.offset; | |
788 | start += cur_alloc_size; | |
b888db2b | 789 | } |
b888db2b | 790 | out: |
771ed689 | 791 | ret = 0; |
b888db2b | 792 | btrfs_end_transaction(trans, root); |
c8b97818 | 793 | |
be20aa9d | 794 | return ret; |
771ed689 | 795 | } |
c8b97818 | 796 | |
771ed689 CM |
797 | /* |
798 | * work queue call back to started compression on a file and pages | |
799 | */ | |
800 | static noinline void async_cow_start(struct btrfs_work *work) | |
801 | { | |
802 | struct async_cow *async_cow; | |
803 | int num_added = 0; | |
804 | async_cow = container_of(work, struct async_cow, work); | |
805 | ||
806 | compress_file_range(async_cow->inode, async_cow->locked_page, | |
807 | async_cow->start, async_cow->end, async_cow, | |
808 | &num_added); | |
809 | if (num_added == 0) | |
810 | async_cow->inode = NULL; | |
811 | } | |
812 | ||
813 | /* | |
814 | * work queue call back to submit previously compressed pages | |
815 | */ | |
816 | static noinline void async_cow_submit(struct btrfs_work *work) | |
817 | { | |
818 | struct async_cow *async_cow; | |
819 | struct btrfs_root *root; | |
820 | unsigned long nr_pages; | |
821 | ||
822 | async_cow = container_of(work, struct async_cow, work); | |
823 | ||
824 | root = async_cow->root; | |
825 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> | |
826 | PAGE_CACHE_SHIFT; | |
827 | ||
828 | atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); | |
829 | ||
830 | if (atomic_read(&root->fs_info->async_delalloc_pages) < | |
831 | 5 * 1042 * 1024 && | |
832 | waitqueue_active(&root->fs_info->async_submit_wait)) | |
833 | wake_up(&root->fs_info->async_submit_wait); | |
834 | ||
835 | if (async_cow->inode) { | |
836 | submit_compressed_extents(async_cow->inode, async_cow); | |
70b99e69 | 837 | } |
771ed689 | 838 | } |
c8b97818 | 839 | |
771ed689 CM |
840 | static noinline void async_cow_free(struct btrfs_work *work) |
841 | { | |
842 | struct async_cow *async_cow; | |
843 | async_cow = container_of(work, struct async_cow, work); | |
844 | kfree(async_cow); | |
845 | } | |
846 | ||
847 | static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |
848 | u64 start, u64 end, int *page_started, | |
849 | unsigned long *nr_written) | |
850 | { | |
851 | struct async_cow *async_cow; | |
852 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
853 | unsigned long nr_pages; | |
854 | u64 cur_end; | |
855 | int limit = 10 * 1024 * 1042; | |
856 | ||
857 | if (!btrfs_test_opt(root, COMPRESS)) { | |
858 | return cow_file_range(inode, locked_page, start, end, | |
859 | page_started, nr_written, 1); | |
860 | } | |
861 | ||
862 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | | |
863 | EXTENT_DELALLOC, 1, 0, GFP_NOFS); | |
864 | while(start < end) { | |
865 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); | |
866 | async_cow->inode = inode; | |
867 | async_cow->root = root; | |
868 | async_cow->locked_page = locked_page; | |
869 | async_cow->start = start; | |
870 | ||
871 | if (btrfs_test_flag(inode, NOCOMPRESS)) | |
872 | cur_end = end; | |
873 | else | |
874 | cur_end = min(end, start + 512 * 1024 - 1); | |
875 | ||
876 | async_cow->end = cur_end; | |
877 | INIT_LIST_HEAD(&async_cow->extents); | |
878 | ||
879 | async_cow->work.func = async_cow_start; | |
880 | async_cow->work.ordered_func = async_cow_submit; | |
881 | async_cow->work.ordered_free = async_cow_free; | |
882 | async_cow->work.flags = 0; | |
883 | ||
884 | while(atomic_read(&root->fs_info->async_submit_draining) && | |
885 | atomic_read(&root->fs_info->async_delalloc_pages)) { | |
886 | wait_event(root->fs_info->async_submit_wait, | |
887 | (atomic_read(&root->fs_info->async_delalloc_pages) | |
888 | == 0)); | |
889 | } | |
890 | ||
891 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> | |
892 | PAGE_CACHE_SHIFT; | |
893 | atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); | |
894 | ||
895 | btrfs_queue_worker(&root->fs_info->delalloc_workers, | |
896 | &async_cow->work); | |
897 | ||
898 | if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { | |
899 | wait_event(root->fs_info->async_submit_wait, | |
900 | (atomic_read(&root->fs_info->async_delalloc_pages) < | |
901 | limit)); | |
902 | } | |
903 | ||
904 | while(atomic_read(&root->fs_info->async_submit_draining) && | |
905 | atomic_read(&root->fs_info->async_delalloc_pages)) { | |
906 | wait_event(root->fs_info->async_submit_wait, | |
907 | (atomic_read(&root->fs_info->async_delalloc_pages) == | |
908 | 0)); | |
909 | } | |
910 | ||
911 | *nr_written += nr_pages; | |
912 | start = cur_end + 1; | |
913 | } | |
914 | *page_started = 1; | |
915 | return 0; | |
be20aa9d CM |
916 | } |
917 | ||
d352ac68 CM |
918 | /* |
919 | * when nowcow writeback call back. This checks for snapshots or COW copies | |
920 | * of the extents that exist in the file, and COWs the file as required. | |
921 | * | |
922 | * If no cow copies or snapshots exist, we write directly to the existing | |
923 | * blocks on disk | |
924 | */ | |
c8b97818 | 925 | static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, |
771ed689 CM |
926 | u64 start, u64 end, int *page_started, int force, |
927 | unsigned long *nr_written) | |
be20aa9d | 928 | { |
be20aa9d | 929 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7ea394f1 | 930 | struct btrfs_trans_handle *trans; |
be20aa9d | 931 | struct extent_buffer *leaf; |
be20aa9d | 932 | struct btrfs_path *path; |
80ff3856 | 933 | struct btrfs_file_extent_item *fi; |
be20aa9d | 934 | struct btrfs_key found_key; |
80ff3856 YZ |
935 | u64 cow_start; |
936 | u64 cur_offset; | |
937 | u64 extent_end; | |
938 | u64 disk_bytenr; | |
939 | u64 num_bytes; | |
940 | int extent_type; | |
941 | int ret; | |
d899e052 | 942 | int type; |
80ff3856 YZ |
943 | int nocow; |
944 | int check_prev = 1; | |
be20aa9d CM |
945 | |
946 | path = btrfs_alloc_path(); | |
947 | BUG_ON(!path); | |
7ea394f1 YZ |
948 | trans = btrfs_join_transaction(root, 1); |
949 | BUG_ON(!trans); | |
be20aa9d | 950 | |
80ff3856 YZ |
951 | cow_start = (u64)-1; |
952 | cur_offset = start; | |
953 | while (1) { | |
954 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | |
955 | cur_offset, 0); | |
956 | BUG_ON(ret < 0); | |
957 | if (ret > 0 && path->slots[0] > 0 && check_prev) { | |
958 | leaf = path->nodes[0]; | |
959 | btrfs_item_key_to_cpu(leaf, &found_key, | |
960 | path->slots[0] - 1); | |
961 | if (found_key.objectid == inode->i_ino && | |
962 | found_key.type == BTRFS_EXTENT_DATA_KEY) | |
963 | path->slots[0]--; | |
964 | } | |
965 | check_prev = 0; | |
966 | next_slot: | |
967 | leaf = path->nodes[0]; | |
968 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
969 | ret = btrfs_next_leaf(root, path); | |
970 | if (ret < 0) | |
971 | BUG_ON(1); | |
972 | if (ret > 0) | |
973 | break; | |
974 | leaf = path->nodes[0]; | |
975 | } | |
be20aa9d | 976 | |
80ff3856 YZ |
977 | nocow = 0; |
978 | disk_bytenr = 0; | |
979 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
980 | ||
981 | if (found_key.objectid > inode->i_ino || | |
982 | found_key.type > BTRFS_EXTENT_DATA_KEY || | |
983 | found_key.offset > end) | |
984 | break; | |
985 | ||
986 | if (found_key.offset > cur_offset) { | |
987 | extent_end = found_key.offset; | |
988 | goto out_check; | |
989 | } | |
990 | ||
991 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
992 | struct btrfs_file_extent_item); | |
993 | extent_type = btrfs_file_extent_type(leaf, fi); | |
994 | ||
d899e052 YZ |
995 | if (extent_type == BTRFS_FILE_EXTENT_REG || |
996 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
80ff3856 YZ |
997 | struct btrfs_block_group_cache *block_group; |
998 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
999 | extent_end = found_key.offset + | |
1000 | btrfs_file_extent_num_bytes(leaf, fi); | |
1001 | if (extent_end <= start) { | |
1002 | path->slots[0]++; | |
1003 | goto next_slot; | |
1004 | } | |
1005 | if (btrfs_file_extent_compression(leaf, fi) || | |
1006 | btrfs_file_extent_encryption(leaf, fi) || | |
1007 | btrfs_file_extent_other_encoding(leaf, fi)) | |
1008 | goto out_check; | |
1009 | if (disk_bytenr == 0) | |
1010 | goto out_check; | |
d899e052 YZ |
1011 | if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
1012 | goto out_check; | |
80ff3856 YZ |
1013 | if (btrfs_cross_ref_exist(trans, root, disk_bytenr)) |
1014 | goto out_check; | |
1015 | block_group = btrfs_lookup_block_group(root->fs_info, | |
1016 | disk_bytenr); | |
1017 | if (!block_group || block_group->ro) | |
1018 | goto out_check; | |
1019 | disk_bytenr += btrfs_file_extent_offset(leaf, fi); | |
1020 | nocow = 1; | |
1021 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
1022 | extent_end = found_key.offset + | |
1023 | btrfs_file_extent_inline_len(leaf, fi); | |
1024 | extent_end = ALIGN(extent_end, root->sectorsize); | |
1025 | } else { | |
1026 | BUG_ON(1); | |
1027 | } | |
1028 | out_check: | |
1029 | if (extent_end <= start) { | |
1030 | path->slots[0]++; | |
1031 | goto next_slot; | |
1032 | } | |
1033 | if (!nocow) { | |
1034 | if (cow_start == (u64)-1) | |
1035 | cow_start = cur_offset; | |
1036 | cur_offset = extent_end; | |
1037 | if (cur_offset > end) | |
1038 | break; | |
1039 | path->slots[0]++; | |
1040 | goto next_slot; | |
7ea394f1 YZ |
1041 | } |
1042 | ||
1043 | btrfs_release_path(root, path); | |
80ff3856 YZ |
1044 | if (cow_start != (u64)-1) { |
1045 | ret = cow_file_range(inode, locked_page, cow_start, | |
771ed689 CM |
1046 | found_key.offset - 1, page_started, |
1047 | nr_written, 1); | |
80ff3856 YZ |
1048 | BUG_ON(ret); |
1049 | cow_start = (u64)-1; | |
7ea394f1 | 1050 | } |
80ff3856 YZ |
1051 | |
1052 | disk_bytenr += cur_offset - found_key.offset; | |
1053 | num_bytes = min(end + 1, extent_end) - cur_offset; | |
d899e052 YZ |
1054 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
1055 | struct extent_map *em; | |
1056 | struct extent_map_tree *em_tree; | |
1057 | em_tree = &BTRFS_I(inode)->extent_tree; | |
1058 | em = alloc_extent_map(GFP_NOFS); | |
1059 | em->start = cur_offset; | |
1060 | em->len = num_bytes; | |
1061 | em->block_len = num_bytes; | |
1062 | em->block_start = disk_bytenr; | |
1063 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
1064 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
1065 | while (1) { | |
1066 | spin_lock(&em_tree->lock); | |
1067 | ret = add_extent_mapping(em_tree, em); | |
1068 | spin_unlock(&em_tree->lock); | |
1069 | if (ret != -EEXIST) { | |
1070 | free_extent_map(em); | |
1071 | break; | |
1072 | } | |
1073 | btrfs_drop_extent_cache(inode, em->start, | |
1074 | em->start + em->len - 1, 0); | |
1075 | } | |
1076 | type = BTRFS_ORDERED_PREALLOC; | |
1077 | } else { | |
1078 | type = BTRFS_ORDERED_NOCOW; | |
1079 | } | |
80ff3856 YZ |
1080 | |
1081 | ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, | |
d899e052 YZ |
1082 | num_bytes, num_bytes, type); |
1083 | BUG_ON(ret); | |
771ed689 | 1084 | |
d899e052 YZ |
1085 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, |
1086 | cur_offset, cur_offset + num_bytes - 1, | |
771ed689 | 1087 | locked_page, 1, 1, 1, 0, 0, 0); |
80ff3856 YZ |
1088 | cur_offset = extent_end; |
1089 | if (cur_offset > end) | |
1090 | break; | |
be20aa9d | 1091 | } |
80ff3856 YZ |
1092 | btrfs_release_path(root, path); |
1093 | ||
1094 | if (cur_offset <= end && cow_start == (u64)-1) | |
1095 | cow_start = cur_offset; | |
1096 | if (cow_start != (u64)-1) { | |
1097 | ret = cow_file_range(inode, locked_page, cow_start, end, | |
771ed689 | 1098 | page_started, nr_written, 1); |
80ff3856 YZ |
1099 | BUG_ON(ret); |
1100 | } | |
1101 | ||
1102 | ret = btrfs_end_transaction(trans, root); | |
1103 | BUG_ON(ret); | |
7ea394f1 | 1104 | btrfs_free_path(path); |
80ff3856 | 1105 | return 0; |
be20aa9d CM |
1106 | } |
1107 | ||
d352ac68 CM |
1108 | /* |
1109 | * extent_io.c call back to do delayed allocation processing | |
1110 | */ | |
c8b97818 | 1111 | static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
771ed689 CM |
1112 | u64 start, u64 end, int *page_started, |
1113 | unsigned long *nr_written) | |
be20aa9d CM |
1114 | { |
1115 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1116 | int ret; | |
a2135011 | 1117 | |
b98b6767 Y |
1118 | if (btrfs_test_opt(root, NODATACOW) || |
1119 | btrfs_test_flag(inode, NODATACOW)) | |
c8b97818 | 1120 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
771ed689 | 1121 | page_started, 0, nr_written); |
d899e052 YZ |
1122 | else if (btrfs_test_flag(inode, PREALLOC)) |
1123 | ret = run_delalloc_nocow(inode, locked_page, start, end, | |
771ed689 | 1124 | page_started, 1, nr_written); |
be20aa9d | 1125 | else |
771ed689 CM |
1126 | ret = cow_file_range_async(inode, locked_page, start, end, |
1127 | page_started, nr_written); | |
1832a6d5 | 1128 | |
b888db2b CM |
1129 | return ret; |
1130 | } | |
1131 | ||
d352ac68 CM |
1132 | /* |
1133 | * extent_io.c set_bit_hook, used to track delayed allocation | |
1134 | * bytes in this file, and to maintain the list of inodes that | |
1135 | * have pending delalloc work to be done. | |
1136 | */ | |
291d673e | 1137 | int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, |
b0c68f8b | 1138 | unsigned long old, unsigned long bits) |
291d673e | 1139 | { |
bcbfce8a | 1140 | unsigned long flags; |
b0c68f8b | 1141 | if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { |
291d673e | 1142 | struct btrfs_root *root = BTRFS_I(inode)->root; |
bcbfce8a | 1143 | spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); |
9069218d | 1144 | BTRFS_I(inode)->delalloc_bytes += end - start + 1; |
291d673e | 1145 | root->fs_info->delalloc_bytes += end - start + 1; |
ea8c2819 CM |
1146 | if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
1147 | list_add_tail(&BTRFS_I(inode)->delalloc_inodes, | |
1148 | &root->fs_info->delalloc_inodes); | |
1149 | } | |
bcbfce8a | 1150 | spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); |
291d673e CM |
1151 | } |
1152 | return 0; | |
1153 | } | |
1154 | ||
d352ac68 CM |
1155 | /* |
1156 | * extent_io.c clear_bit_hook, see set_bit_hook for why | |
1157 | */ | |
291d673e | 1158 | int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, |
b0c68f8b | 1159 | unsigned long old, unsigned long bits) |
291d673e | 1160 | { |
b0c68f8b | 1161 | if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { |
291d673e | 1162 | struct btrfs_root *root = BTRFS_I(inode)->root; |
bcbfce8a CM |
1163 | unsigned long flags; |
1164 | ||
1165 | spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); | |
b0c68f8b CM |
1166 | if (end - start + 1 > root->fs_info->delalloc_bytes) { |
1167 | printk("warning: delalloc account %Lu %Lu\n", | |
1168 | end - start + 1, root->fs_info->delalloc_bytes); | |
1169 | root->fs_info->delalloc_bytes = 0; | |
9069218d | 1170 | BTRFS_I(inode)->delalloc_bytes = 0; |
b0c68f8b CM |
1171 | } else { |
1172 | root->fs_info->delalloc_bytes -= end - start + 1; | |
9069218d | 1173 | BTRFS_I(inode)->delalloc_bytes -= end - start + 1; |
b0c68f8b | 1174 | } |
ea8c2819 CM |
1175 | if (BTRFS_I(inode)->delalloc_bytes == 0 && |
1176 | !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1177 | list_del_init(&BTRFS_I(inode)->delalloc_inodes); | |
1178 | } | |
bcbfce8a | 1179 | spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); |
291d673e CM |
1180 | } |
1181 | return 0; | |
1182 | } | |
1183 | ||
d352ac68 CM |
1184 | /* |
1185 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | |
1186 | * we don't create bios that span stripes or chunks | |
1187 | */ | |
239b14b3 | 1188 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
c8b97818 CM |
1189 | size_t size, struct bio *bio, |
1190 | unsigned long bio_flags) | |
239b14b3 CM |
1191 | { |
1192 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
1193 | struct btrfs_mapping_tree *map_tree; | |
a62b9401 | 1194 | u64 logical = (u64)bio->bi_sector << 9; |
239b14b3 CM |
1195 | u64 length = 0; |
1196 | u64 map_length; | |
239b14b3 CM |
1197 | int ret; |
1198 | ||
771ed689 CM |
1199 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
1200 | return 0; | |
1201 | ||
f2d8d74d | 1202 | length = bio->bi_size; |
239b14b3 CM |
1203 | map_tree = &root->fs_info->mapping_tree; |
1204 | map_length = length; | |
cea9e445 | 1205 | ret = btrfs_map_block(map_tree, READ, logical, |
f188591e | 1206 | &map_length, NULL, 0); |
cea9e445 | 1207 | |
239b14b3 | 1208 | if (map_length < length + size) { |
239b14b3 CM |
1209 | return 1; |
1210 | } | |
1211 | return 0; | |
1212 | } | |
1213 | ||
d352ac68 CM |
1214 | /* |
1215 | * in order to insert checksums into the metadata in large chunks, | |
1216 | * we wait until bio submission time. All the pages in the bio are | |
1217 | * checksummed and sums are attached onto the ordered extent record. | |
1218 | * | |
1219 | * At IO completion time the cums attached on the ordered extent record | |
1220 | * are inserted into the btree | |
1221 | */ | |
4a69a410 | 1222 | int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, |
c8b97818 | 1223 | int mirror_num, unsigned long bio_flags) |
065631f6 | 1224 | { |
065631f6 | 1225 | struct btrfs_root *root = BTRFS_I(inode)->root; |
065631f6 | 1226 | int ret = 0; |
e015640f | 1227 | |
3edf7d33 | 1228 | ret = btrfs_csum_one_bio(root, inode, bio); |
44b8bd7e | 1229 | BUG_ON(ret); |
4a69a410 CM |
1230 | return 0; |
1231 | } | |
e015640f | 1232 | |
4a69a410 CM |
1233 | /* |
1234 | * in order to insert checksums into the metadata in large chunks, | |
1235 | * we wait until bio submission time. All the pages in the bio are | |
1236 | * checksummed and sums are attached onto the ordered extent record. | |
1237 | * | |
1238 | * At IO completion time the cums attached on the ordered extent record | |
1239 | * are inserted into the btree | |
1240 | */ | |
1241 | int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, | |
1242 | int mirror_num, unsigned long bio_flags) | |
1243 | { | |
1244 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
8b712842 | 1245 | return btrfs_map_bio(root, rw, bio, mirror_num, 1); |
44b8bd7e CM |
1246 | } |
1247 | ||
d352ac68 CM |
1248 | /* |
1249 | * extent_io.c submission hook. This does the right thing for csum calculation on write, | |
1250 | * or reading the csums from the tree before a read | |
1251 | */ | |
44b8bd7e | 1252 | int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
c8b97818 | 1253 | int mirror_num, unsigned long bio_flags) |
44b8bd7e CM |
1254 | { |
1255 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1256 | int ret = 0; | |
19b9bdb0 | 1257 | int skip_sum; |
44b8bd7e | 1258 | |
e6dcd2dc CM |
1259 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); |
1260 | BUG_ON(ret); | |
065631f6 | 1261 | |
19b9bdb0 CM |
1262 | skip_sum = btrfs_test_opt(root, NODATASUM) || |
1263 | btrfs_test_flag(inode, NODATASUM); | |
7ea394f1 | 1264 | |
4d1b5fb4 | 1265 | if (!(rw & (1 << BIO_RW))) { |
c8b97818 | 1266 | |
19b9bdb0 | 1267 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
c8b97818 CM |
1268 | return btrfs_submit_compressed_read(inode, bio, |
1269 | mirror_num, bio_flags); | |
771ed689 CM |
1270 | else if (!skip_sum) |
1271 | btrfs_lookup_bio_sums(root, inode, bio); | |
4d1b5fb4 | 1272 | goto mapit; |
19b9bdb0 CM |
1273 | } else if (!skip_sum) { |
1274 | /* we're doing a write, do the async checksumming */ | |
1275 | return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | |
44b8bd7e | 1276 | inode, rw, bio, mirror_num, |
4a69a410 CM |
1277 | bio_flags, __btrfs_submit_bio_start, |
1278 | __btrfs_submit_bio_done); | |
19b9bdb0 CM |
1279 | } |
1280 | ||
0b86a832 | 1281 | mapit: |
8b712842 | 1282 | return btrfs_map_bio(root, rw, bio, mirror_num, 0); |
065631f6 | 1283 | } |
6885f308 | 1284 | |
d352ac68 CM |
1285 | /* |
1286 | * given a list of ordered sums record them in the inode. This happens | |
1287 | * at IO completion time based on sums calculated at bio submission time. | |
1288 | */ | |
ba1da2f4 | 1289 | static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
e6dcd2dc CM |
1290 | struct inode *inode, u64 file_offset, |
1291 | struct list_head *list) | |
1292 | { | |
1293 | struct list_head *cur; | |
1294 | struct btrfs_ordered_sum *sum; | |
1295 | ||
1296 | btrfs_set_trans_block_group(trans, inode); | |
ba1da2f4 | 1297 | list_for_each(cur, list) { |
e6dcd2dc | 1298 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
e6dcd2dc CM |
1299 | btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, |
1300 | inode, sum); | |
e6dcd2dc CM |
1301 | } |
1302 | return 0; | |
1303 | } | |
1304 | ||
ea8c2819 CM |
1305 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) |
1306 | { | |
771ed689 CM |
1307 | if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { |
1308 | WARN_ON(1); | |
1309 | } | |
ea8c2819 CM |
1310 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
1311 | GFP_NOFS); | |
1312 | } | |
1313 | ||
d352ac68 | 1314 | /* see btrfs_writepage_start_hook for details on why this is required */ |
247e743c CM |
1315 | struct btrfs_writepage_fixup { |
1316 | struct page *page; | |
1317 | struct btrfs_work work; | |
1318 | }; | |
1319 | ||
247e743c CM |
1320 | void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
1321 | { | |
1322 | struct btrfs_writepage_fixup *fixup; | |
1323 | struct btrfs_ordered_extent *ordered; | |
1324 | struct page *page; | |
1325 | struct inode *inode; | |
1326 | u64 page_start; | |
1327 | u64 page_end; | |
1328 | ||
1329 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | |
1330 | page = fixup->page; | |
4a096752 | 1331 | again: |
247e743c CM |
1332 | lock_page(page); |
1333 | if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { | |
1334 | ClearPageChecked(page); | |
1335 | goto out_page; | |
1336 | } | |
1337 | ||
1338 | inode = page->mapping->host; | |
1339 | page_start = page_offset(page); | |
1340 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; | |
1341 | ||
1342 | lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); | |
4a096752 CM |
1343 | |
1344 | /* already ordered? We're done */ | |
1345 | if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, | |
1346 | EXTENT_ORDERED, 0)) { | |
247e743c | 1347 | goto out; |
4a096752 CM |
1348 | } |
1349 | ||
1350 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
1351 | if (ordered) { | |
1352 | unlock_extent(&BTRFS_I(inode)->io_tree, page_start, | |
1353 | page_end, GFP_NOFS); | |
1354 | unlock_page(page); | |
1355 | btrfs_start_ordered_extent(inode, ordered, 1); | |
1356 | goto again; | |
1357 | } | |
247e743c | 1358 | |
ea8c2819 | 1359 | btrfs_set_extent_delalloc(inode, page_start, page_end); |
247e743c CM |
1360 | ClearPageChecked(page); |
1361 | out: | |
1362 | unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); | |
1363 | out_page: | |
1364 | unlock_page(page); | |
1365 | page_cache_release(page); | |
1366 | } | |
1367 | ||
1368 | /* | |
1369 | * There are a few paths in the higher layers of the kernel that directly | |
1370 | * set the page dirty bit without asking the filesystem if it is a | |
1371 | * good idea. This causes problems because we want to make sure COW | |
1372 | * properly happens and the data=ordered rules are followed. | |
1373 | * | |
c8b97818 | 1374 | * In our case any range that doesn't have the ORDERED bit set |
247e743c CM |
1375 | * hasn't been properly setup for IO. We kick off an async process |
1376 | * to fix it up. The async helper will wait for ordered extents, set | |
1377 | * the delalloc bit and make it safe to write the page. | |
1378 | */ | |
1379 | int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) | |
1380 | { | |
1381 | struct inode *inode = page->mapping->host; | |
1382 | struct btrfs_writepage_fixup *fixup; | |
1383 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1384 | int ret; | |
1385 | ||
1386 | ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end, | |
1387 | EXTENT_ORDERED, 0); | |
1388 | if (ret) | |
1389 | return 0; | |
1390 | ||
1391 | if (PageChecked(page)) | |
1392 | return -EAGAIN; | |
1393 | ||
1394 | fixup = kzalloc(sizeof(*fixup), GFP_NOFS); | |
1395 | if (!fixup) | |
1396 | return -EAGAIN; | |
f421950f | 1397 | |
247e743c CM |
1398 | SetPageChecked(page); |
1399 | page_cache_get(page); | |
1400 | fixup->work.func = btrfs_writepage_fixup_worker; | |
1401 | fixup->page = page; | |
1402 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | |
1403 | return -EAGAIN; | |
1404 | } | |
1405 | ||
d899e052 YZ |
1406 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
1407 | struct inode *inode, u64 file_pos, | |
1408 | u64 disk_bytenr, u64 disk_num_bytes, | |
1409 | u64 num_bytes, u64 ram_bytes, | |
1410 | u8 compression, u8 encryption, | |
1411 | u16 other_encoding, int extent_type) | |
1412 | { | |
1413 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1414 | struct btrfs_file_extent_item *fi; | |
1415 | struct btrfs_path *path; | |
1416 | struct extent_buffer *leaf; | |
1417 | struct btrfs_key ins; | |
1418 | u64 hint; | |
1419 | int ret; | |
1420 | ||
1421 | path = btrfs_alloc_path(); | |
1422 | BUG_ON(!path); | |
1423 | ||
1424 | ret = btrfs_drop_extents(trans, root, inode, file_pos, | |
1425 | file_pos + num_bytes, file_pos, &hint); | |
1426 | BUG_ON(ret); | |
1427 | ||
1428 | ins.objectid = inode->i_ino; | |
1429 | ins.offset = file_pos; | |
1430 | ins.type = BTRFS_EXTENT_DATA_KEY; | |
1431 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); | |
1432 | BUG_ON(ret); | |
1433 | leaf = path->nodes[0]; | |
1434 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1435 | struct btrfs_file_extent_item); | |
1436 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | |
1437 | btrfs_set_file_extent_type(leaf, fi, extent_type); | |
1438 | btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); | |
1439 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); | |
1440 | btrfs_set_file_extent_offset(leaf, fi, 0); | |
1441 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | |
1442 | btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); | |
1443 | btrfs_set_file_extent_compression(leaf, fi, compression); | |
1444 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | |
1445 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | |
1446 | btrfs_mark_buffer_dirty(leaf); | |
1447 | ||
1448 | inode_add_bytes(inode, num_bytes); | |
1449 | btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0); | |
1450 | ||
1451 | ins.objectid = disk_bytenr; | |
1452 | ins.offset = disk_num_bytes; | |
1453 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
1454 | ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, | |
1455 | root->root_key.objectid, | |
1456 | trans->transid, inode->i_ino, &ins); | |
1457 | BUG_ON(ret); | |
1458 | ||
1459 | btrfs_free_path(path); | |
1460 | return 0; | |
1461 | } | |
1462 | ||
d352ac68 CM |
1463 | /* as ordered data IO finishes, this gets called so we can finish |
1464 | * an ordered extent if the range of bytes in the file it covers are | |
1465 | * fully written. | |
1466 | */ | |
211f90e6 | 1467 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) |
e6dcd2dc | 1468 | { |
e6dcd2dc CM |
1469 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1470 | struct btrfs_trans_handle *trans; | |
1471 | struct btrfs_ordered_extent *ordered_extent; | |
1472 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
d899e052 | 1473 | int compressed = 0; |
e6dcd2dc CM |
1474 | int ret; |
1475 | ||
1476 | ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); | |
ba1da2f4 | 1477 | if (!ret) |
e6dcd2dc | 1478 | return 0; |
e6dcd2dc | 1479 | |
f9295749 | 1480 | trans = btrfs_join_transaction(root, 1); |
e6dcd2dc CM |
1481 | |
1482 | ordered_extent = btrfs_lookup_ordered_extent(inode, start); | |
1483 | BUG_ON(!ordered_extent); | |
7ea394f1 YZ |
1484 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) |
1485 | goto nocow; | |
e6dcd2dc CM |
1486 | |
1487 | lock_extent(io_tree, ordered_extent->file_offset, | |
1488 | ordered_extent->file_offset + ordered_extent->len - 1, | |
1489 | GFP_NOFS); | |
1490 | ||
c8b97818 | 1491 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
d899e052 YZ |
1492 | compressed = 1; |
1493 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { | |
1494 | BUG_ON(compressed); | |
1495 | ret = btrfs_mark_extent_written(trans, root, inode, | |
1496 | ordered_extent->file_offset, | |
1497 | ordered_extent->file_offset + | |
1498 | ordered_extent->len); | |
1499 | BUG_ON(ret); | |
1500 | } else { | |
1501 | ret = insert_reserved_file_extent(trans, inode, | |
1502 | ordered_extent->file_offset, | |
1503 | ordered_extent->start, | |
1504 | ordered_extent->disk_len, | |
1505 | ordered_extent->len, | |
1506 | ordered_extent->len, | |
1507 | compressed, 0, 0, | |
1508 | BTRFS_FILE_EXTENT_REG); | |
1509 | BUG_ON(ret); | |
1510 | } | |
e6dcd2dc CM |
1511 | unlock_extent(io_tree, ordered_extent->file_offset, |
1512 | ordered_extent->file_offset + ordered_extent->len - 1, | |
1513 | GFP_NOFS); | |
7ea394f1 | 1514 | nocow: |
e6dcd2dc CM |
1515 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
1516 | &ordered_extent->list); | |
1517 | ||
34353029 | 1518 | mutex_lock(&BTRFS_I(inode)->extent_mutex); |
dbe674a9 | 1519 | btrfs_ordered_update_i_size(inode, ordered_extent); |
e02119d5 | 1520 | btrfs_update_inode(trans, root, inode); |
e6dcd2dc | 1521 | btrfs_remove_ordered_extent(inode, ordered_extent); |
34353029 | 1522 | mutex_unlock(&BTRFS_I(inode)->extent_mutex); |
7f3c74fb | 1523 | |
e6dcd2dc CM |
1524 | /* once for us */ |
1525 | btrfs_put_ordered_extent(ordered_extent); | |
1526 | /* once for the tree */ | |
1527 | btrfs_put_ordered_extent(ordered_extent); | |
1528 | ||
e6dcd2dc CM |
1529 | btrfs_end_transaction(trans, root); |
1530 | return 0; | |
1531 | } | |
1532 | ||
211f90e6 CM |
1533 | int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
1534 | struct extent_state *state, int uptodate) | |
1535 | { | |
1536 | return btrfs_finish_ordered_io(page->mapping->host, start, end); | |
1537 | } | |
1538 | ||
d352ac68 CM |
1539 | /* |
1540 | * When IO fails, either with EIO or csum verification fails, we | |
1541 | * try other mirrors that might have a good copy of the data. This | |
1542 | * io_failure_record is used to record state as we go through all the | |
1543 | * mirrors. If another mirror has good data, the page is set up to date | |
1544 | * and things continue. If a good mirror can't be found, the original | |
1545 | * bio end_io callback is called to indicate things have failed. | |
1546 | */ | |
7e38326f CM |
1547 | struct io_failure_record { |
1548 | struct page *page; | |
1549 | u64 start; | |
1550 | u64 len; | |
1551 | u64 logical; | |
1552 | int last_mirror; | |
1553 | }; | |
1554 | ||
1259ab75 CM |
1555 | int btrfs_io_failed_hook(struct bio *failed_bio, |
1556 | struct page *page, u64 start, u64 end, | |
1557 | struct extent_state *state) | |
7e38326f CM |
1558 | { |
1559 | struct io_failure_record *failrec = NULL; | |
1560 | u64 private; | |
1561 | struct extent_map *em; | |
1562 | struct inode *inode = page->mapping->host; | |
1563 | struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; | |
3b951516 | 1564 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
7e38326f CM |
1565 | struct bio *bio; |
1566 | int num_copies; | |
1567 | int ret; | |
1259ab75 | 1568 | int rw; |
7e38326f | 1569 | u64 logical; |
c8b97818 | 1570 | unsigned long bio_flags = 0; |
7e38326f CM |
1571 | |
1572 | ret = get_state_private(failure_tree, start, &private); | |
1573 | if (ret) { | |
7e38326f CM |
1574 | failrec = kmalloc(sizeof(*failrec), GFP_NOFS); |
1575 | if (!failrec) | |
1576 | return -ENOMEM; | |
1577 | failrec->start = start; | |
1578 | failrec->len = end - start + 1; | |
1579 | failrec->last_mirror = 0; | |
1580 | ||
3b951516 CM |
1581 | spin_lock(&em_tree->lock); |
1582 | em = lookup_extent_mapping(em_tree, start, failrec->len); | |
1583 | if (em->start > start || em->start + em->len < start) { | |
1584 | free_extent_map(em); | |
1585 | em = NULL; | |
1586 | } | |
1587 | spin_unlock(&em_tree->lock); | |
7e38326f CM |
1588 | |
1589 | if (!em || IS_ERR(em)) { | |
1590 | kfree(failrec); | |
1591 | return -EIO; | |
1592 | } | |
1593 | logical = start - em->start; | |
1594 | logical = em->block_start + logical; | |
c8b97818 CM |
1595 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) |
1596 | bio_flags = EXTENT_BIO_COMPRESSED; | |
7e38326f CM |
1597 | failrec->logical = logical; |
1598 | free_extent_map(em); | |
1599 | set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | | |
1600 | EXTENT_DIRTY, GFP_NOFS); | |
587f7704 CM |
1601 | set_state_private(failure_tree, start, |
1602 | (u64)(unsigned long)failrec); | |
7e38326f | 1603 | } else { |
587f7704 | 1604 | failrec = (struct io_failure_record *)(unsigned long)private; |
7e38326f CM |
1605 | } |
1606 | num_copies = btrfs_num_copies( | |
1607 | &BTRFS_I(inode)->root->fs_info->mapping_tree, | |
1608 | failrec->logical, failrec->len); | |
1609 | failrec->last_mirror++; | |
1610 | if (!state) { | |
1611 | spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); | |
1612 | state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, | |
1613 | failrec->start, | |
1614 | EXTENT_LOCKED); | |
1615 | if (state && state->start != failrec->start) | |
1616 | state = NULL; | |
1617 | spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); | |
1618 | } | |
1619 | if (!state || failrec->last_mirror > num_copies) { | |
1620 | set_state_private(failure_tree, failrec->start, 0); | |
1621 | clear_extent_bits(failure_tree, failrec->start, | |
1622 | failrec->start + failrec->len - 1, | |
1623 | EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); | |
1624 | kfree(failrec); | |
1625 | return -EIO; | |
1626 | } | |
1627 | bio = bio_alloc(GFP_NOFS, 1); | |
1628 | bio->bi_private = state; | |
1629 | bio->bi_end_io = failed_bio->bi_end_io; | |
1630 | bio->bi_sector = failrec->logical >> 9; | |
1631 | bio->bi_bdev = failed_bio->bi_bdev; | |
e1c4b745 | 1632 | bio->bi_size = 0; |
7e38326f | 1633 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); |
1259ab75 CM |
1634 | if (failed_bio->bi_rw & (1 << BIO_RW)) |
1635 | rw = WRITE; | |
1636 | else | |
1637 | rw = READ; | |
1638 | ||
1639 | BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, | |
c8b97818 CM |
1640 | failrec->last_mirror, |
1641 | bio_flags); | |
1259ab75 CM |
1642 | return 0; |
1643 | } | |
1644 | ||
d352ac68 CM |
1645 | /* |
1646 | * each time an IO finishes, we do a fast check in the IO failure tree | |
1647 | * to see if we need to process or clean up an io_failure_record | |
1648 | */ | |
1259ab75 CM |
1649 | int btrfs_clean_io_failures(struct inode *inode, u64 start) |
1650 | { | |
1651 | u64 private; | |
1652 | u64 private_failure; | |
1653 | struct io_failure_record *failure; | |
1654 | int ret; | |
1655 | ||
1656 | private = 0; | |
1657 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, | |
1658 | (u64)-1, 1, EXTENT_DIRTY)) { | |
1659 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, | |
1660 | start, &private_failure); | |
1661 | if (ret == 0) { | |
1662 | failure = (struct io_failure_record *)(unsigned long) | |
1663 | private_failure; | |
1664 | set_state_private(&BTRFS_I(inode)->io_failure_tree, | |
1665 | failure->start, 0); | |
1666 | clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, | |
1667 | failure->start, | |
1668 | failure->start + failure->len - 1, | |
1669 | EXTENT_DIRTY | EXTENT_LOCKED, | |
1670 | GFP_NOFS); | |
1671 | kfree(failure); | |
1672 | } | |
1673 | } | |
7e38326f CM |
1674 | return 0; |
1675 | } | |
1676 | ||
d352ac68 CM |
1677 | /* |
1678 | * when reads are done, we need to check csums to verify the data is correct | |
1679 | * if there's a match, we allow the bio to finish. If not, we go through | |
1680 | * the io_failure_record routines to find good copies | |
1681 | */ | |
70dec807 CM |
1682 | int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, |
1683 | struct extent_state *state) | |
07157aac | 1684 | { |
35ebb934 | 1685 | size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); |
07157aac | 1686 | struct inode *inode = page->mapping->host; |
d1310b2e | 1687 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
07157aac | 1688 | char *kaddr; |
aadfeb6e | 1689 | u64 private = ~(u32)0; |
07157aac | 1690 | int ret; |
ff79f819 CM |
1691 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1692 | u32 csum = ~(u32)0; | |
bbf0d006 | 1693 | unsigned long flags; |
d1310b2e | 1694 | |
b98b6767 Y |
1695 | if (btrfs_test_opt(root, NODATASUM) || |
1696 | btrfs_test_flag(inode, NODATASUM)) | |
b6cda9bc | 1697 | return 0; |
c2e639f0 | 1698 | if (state && state->start == start) { |
70dec807 CM |
1699 | private = state->private; |
1700 | ret = 0; | |
1701 | } else { | |
1702 | ret = get_state_private(io_tree, start, &private); | |
1703 | } | |
bbf0d006 | 1704 | local_irq_save(flags); |
07157aac CM |
1705 | kaddr = kmap_atomic(page, KM_IRQ0); |
1706 | if (ret) { | |
1707 | goto zeroit; | |
1708 | } | |
ff79f819 CM |
1709 | csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); |
1710 | btrfs_csum_final(csum, (char *)&csum); | |
1711 | if (csum != private) { | |
07157aac CM |
1712 | goto zeroit; |
1713 | } | |
1714 | kunmap_atomic(kaddr, KM_IRQ0); | |
bbf0d006 | 1715 | local_irq_restore(flags); |
7e38326f CM |
1716 | |
1717 | /* if the io failure tree for this inode is non-empty, | |
1718 | * check to see if we've recovered from a failed IO | |
1719 | */ | |
1259ab75 | 1720 | btrfs_clean_io_failures(inode, start); |
07157aac CM |
1721 | return 0; |
1722 | ||
1723 | zeroit: | |
aadfeb6e CM |
1724 | printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n", |
1725 | page->mapping->host->i_ino, (unsigned long long)start, csum, | |
1726 | private); | |
db94535d CM |
1727 | memset(kaddr + offset, 1, end - start + 1); |
1728 | flush_dcache_page(page); | |
07157aac | 1729 | kunmap_atomic(kaddr, KM_IRQ0); |
bbf0d006 | 1730 | local_irq_restore(flags); |
3b951516 CM |
1731 | if (private == 0) |
1732 | return 0; | |
7e38326f | 1733 | return -EIO; |
07157aac | 1734 | } |
b888db2b | 1735 | |
7b128766 JB |
1736 | /* |
1737 | * This creates an orphan entry for the given inode in case something goes | |
1738 | * wrong in the middle of an unlink/truncate. | |
1739 | */ | |
1740 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |
1741 | { | |
1742 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1743 | int ret = 0; | |
1744 | ||
bcc63abb | 1745 | spin_lock(&root->list_lock); |
7b128766 JB |
1746 | |
1747 | /* already on the orphan list, we're good */ | |
1748 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { | |
bcc63abb | 1749 | spin_unlock(&root->list_lock); |
7b128766 JB |
1750 | return 0; |
1751 | } | |
1752 | ||
1753 | list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); | |
1754 | ||
bcc63abb | 1755 | spin_unlock(&root->list_lock); |
7b128766 JB |
1756 | |
1757 | /* | |
1758 | * insert an orphan item to track this unlinked/truncated file | |
1759 | */ | |
1760 | ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); | |
1761 | ||
1762 | return ret; | |
1763 | } | |
1764 | ||
1765 | /* | |
1766 | * We have done the truncate/delete so we can go ahead and remove the orphan | |
1767 | * item for this particular inode. | |
1768 | */ | |
1769 | int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) | |
1770 | { | |
1771 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1772 | int ret = 0; | |
1773 | ||
bcc63abb | 1774 | spin_lock(&root->list_lock); |
7b128766 JB |
1775 | |
1776 | if (list_empty(&BTRFS_I(inode)->i_orphan)) { | |
bcc63abb | 1777 | spin_unlock(&root->list_lock); |
7b128766 JB |
1778 | return 0; |
1779 | } | |
1780 | ||
1781 | list_del_init(&BTRFS_I(inode)->i_orphan); | |
1782 | if (!trans) { | |
bcc63abb | 1783 | spin_unlock(&root->list_lock); |
7b128766 JB |
1784 | return 0; |
1785 | } | |
1786 | ||
bcc63abb | 1787 | spin_unlock(&root->list_lock); |
7b128766 JB |
1788 | |
1789 | ret = btrfs_del_orphan_item(trans, root, inode->i_ino); | |
1790 | ||
1791 | return ret; | |
1792 | } | |
1793 | ||
1794 | /* | |
1795 | * this cleans up any orphans that may be left on the list from the last use | |
1796 | * of this root. | |
1797 | */ | |
1798 | void btrfs_orphan_cleanup(struct btrfs_root *root) | |
1799 | { | |
1800 | struct btrfs_path *path; | |
1801 | struct extent_buffer *leaf; | |
1802 | struct btrfs_item *item; | |
1803 | struct btrfs_key key, found_key; | |
1804 | struct btrfs_trans_handle *trans; | |
1805 | struct inode *inode; | |
1806 | int ret = 0, nr_unlink = 0, nr_truncate = 0; | |
1807 | ||
1808 | /* don't do orphan cleanup if the fs is readonly. */ | |
5b21f2ed | 1809 | if (root->fs_info->sb->s_flags & MS_RDONLY) |
7b128766 JB |
1810 | return; |
1811 | ||
1812 | path = btrfs_alloc_path(); | |
1813 | if (!path) | |
1814 | return; | |
1815 | path->reada = -1; | |
1816 | ||
1817 | key.objectid = BTRFS_ORPHAN_OBJECTID; | |
1818 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | |
1819 | key.offset = (u64)-1; | |
1820 | ||
7b128766 JB |
1821 | |
1822 | while (1) { | |
1823 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
1824 | if (ret < 0) { | |
1825 | printk(KERN_ERR "Error searching slot for orphan: %d" | |
1826 | "\n", ret); | |
1827 | break; | |
1828 | } | |
1829 | ||
1830 | /* | |
1831 | * if ret == 0 means we found what we were searching for, which | |
1832 | * is weird, but possible, so only screw with path if we didnt | |
1833 | * find the key and see if we have stuff that matches | |
1834 | */ | |
1835 | if (ret > 0) { | |
1836 | if (path->slots[0] == 0) | |
1837 | break; | |
1838 | path->slots[0]--; | |
1839 | } | |
1840 | ||
1841 | /* pull out the item */ | |
1842 | leaf = path->nodes[0]; | |
1843 | item = btrfs_item_nr(leaf, path->slots[0]); | |
1844 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1845 | ||
1846 | /* make sure the item matches what we want */ | |
1847 | if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) | |
1848 | break; | |
1849 | if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) | |
1850 | break; | |
1851 | ||
1852 | /* release the path since we're done with it */ | |
1853 | btrfs_release_path(root, path); | |
1854 | ||
1855 | /* | |
1856 | * this is where we are basically btrfs_lookup, without the | |
1857 | * crossing root thing. we store the inode number in the | |
1858 | * offset of the orphan item. | |
1859 | */ | |
5b21f2ed | 1860 | inode = btrfs_iget_locked(root->fs_info->sb, |
7b128766 JB |
1861 | found_key.offset, root); |
1862 | if (!inode) | |
1863 | break; | |
1864 | ||
1865 | if (inode->i_state & I_NEW) { | |
1866 | BTRFS_I(inode)->root = root; | |
1867 | ||
1868 | /* have to set the location manually */ | |
1869 | BTRFS_I(inode)->location.objectid = inode->i_ino; | |
1870 | BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; | |
1871 | BTRFS_I(inode)->location.offset = 0; | |
1872 | ||
1873 | btrfs_read_locked_inode(inode); | |
1874 | unlock_new_inode(inode); | |
1875 | } | |
1876 | ||
1877 | /* | |
1878 | * add this inode to the orphan list so btrfs_orphan_del does | |
1879 | * the proper thing when we hit it | |
1880 | */ | |
bcc63abb | 1881 | spin_lock(&root->list_lock); |
7b128766 | 1882 | list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); |
bcc63abb | 1883 | spin_unlock(&root->list_lock); |
7b128766 JB |
1884 | |
1885 | /* | |
1886 | * if this is a bad inode, means we actually succeeded in | |
1887 | * removing the inode, but not the orphan record, which means | |
1888 | * we need to manually delete the orphan since iput will just | |
1889 | * do a destroy_inode | |
1890 | */ | |
1891 | if (is_bad_inode(inode)) { | |
5b21f2ed | 1892 | trans = btrfs_start_transaction(root, 1); |
7b128766 | 1893 | btrfs_orphan_del(trans, inode); |
5b21f2ed | 1894 | btrfs_end_transaction(trans, root); |
7b128766 JB |
1895 | iput(inode); |
1896 | continue; | |
1897 | } | |
1898 | ||
1899 | /* if we have links, this was a truncate, lets do that */ | |
1900 | if (inode->i_nlink) { | |
1901 | nr_truncate++; | |
1902 | btrfs_truncate(inode); | |
1903 | } else { | |
1904 | nr_unlink++; | |
1905 | } | |
1906 | ||
1907 | /* this will do delete_inode and everything for us */ | |
1908 | iput(inode); | |
1909 | } | |
1910 | ||
1911 | if (nr_unlink) | |
1912 | printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); | |
1913 | if (nr_truncate) | |
1914 | printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); | |
1915 | ||
1916 | btrfs_free_path(path); | |
7b128766 JB |
1917 | } |
1918 | ||
d352ac68 CM |
1919 | /* |
1920 | * read an inode from the btree into the in-memory inode | |
1921 | */ | |
39279cc3 CM |
1922 | void btrfs_read_locked_inode(struct inode *inode) |
1923 | { | |
1924 | struct btrfs_path *path; | |
5f39d397 | 1925 | struct extent_buffer *leaf; |
39279cc3 | 1926 | struct btrfs_inode_item *inode_item; |
0b86a832 | 1927 | struct btrfs_timespec *tspec; |
39279cc3 CM |
1928 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1929 | struct btrfs_key location; | |
1930 | u64 alloc_group_block; | |
618e21d5 | 1931 | u32 rdev; |
39279cc3 CM |
1932 | int ret; |
1933 | ||
1934 | path = btrfs_alloc_path(); | |
1935 | BUG_ON(!path); | |
39279cc3 | 1936 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); |
dc17ff8f | 1937 | |
39279cc3 | 1938 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); |
5f39d397 | 1939 | if (ret) |
39279cc3 | 1940 | goto make_bad; |
39279cc3 | 1941 | |
5f39d397 CM |
1942 | leaf = path->nodes[0]; |
1943 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
1944 | struct btrfs_inode_item); | |
1945 | ||
1946 | inode->i_mode = btrfs_inode_mode(leaf, inode_item); | |
1947 | inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); | |
1948 | inode->i_uid = btrfs_inode_uid(leaf, inode_item); | |
1949 | inode->i_gid = btrfs_inode_gid(leaf, inode_item); | |
dbe674a9 | 1950 | btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); |
5f39d397 CM |
1951 | |
1952 | tspec = btrfs_inode_atime(inode_item); | |
1953 | inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
1954 | inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
1955 | ||
1956 | tspec = btrfs_inode_mtime(inode_item); | |
1957 | inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
1958 | inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
1959 | ||
1960 | tspec = btrfs_inode_ctime(inode_item); | |
1961 | inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
1962 | inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
1963 | ||
a76a3cd4 | 1964 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); |
e02119d5 CM |
1965 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
1966 | inode->i_generation = BTRFS_I(inode)->generation; | |
618e21d5 | 1967 | inode->i_rdev = 0; |
5f39d397 CM |
1968 | rdev = btrfs_inode_rdev(leaf, inode_item); |
1969 | ||
aec7477b JB |
1970 | BTRFS_I(inode)->index_cnt = (u64)-1; |
1971 | ||
5f39d397 | 1972 | alloc_group_block = btrfs_inode_block_group(leaf, inode_item); |
39279cc3 CM |
1973 | BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info, |
1974 | alloc_group_block); | |
b98b6767 | 1975 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); |
e52ec0eb CM |
1976 | if (!BTRFS_I(inode)->block_group) { |
1977 | BTRFS_I(inode)->block_group = btrfs_find_block_group(root, | |
0b86a832 CM |
1978 | NULL, 0, |
1979 | BTRFS_BLOCK_GROUP_METADATA, 0); | |
e52ec0eb | 1980 | } |
39279cc3 CM |
1981 | btrfs_free_path(path); |
1982 | inode_item = NULL; | |
1983 | ||
39279cc3 | 1984 | switch (inode->i_mode & S_IFMT) { |
39279cc3 CM |
1985 | case S_IFREG: |
1986 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 1987 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d1310b2e | 1988 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
1989 | inode->i_fop = &btrfs_file_operations; |
1990 | inode->i_op = &btrfs_file_inode_operations; | |
1991 | break; | |
1992 | case S_IFDIR: | |
1993 | inode->i_fop = &btrfs_dir_file_operations; | |
1994 | if (root == root->fs_info->tree_root) | |
1995 | inode->i_op = &btrfs_dir_ro_inode_operations; | |
1996 | else | |
1997 | inode->i_op = &btrfs_dir_inode_operations; | |
1998 | break; | |
1999 | case S_IFLNK: | |
2000 | inode->i_op = &btrfs_symlink_inode_operations; | |
2001 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 2002 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 | 2003 | break; |
618e21d5 JB |
2004 | default: |
2005 | init_special_inode(inode, inode->i_mode, rdev); | |
2006 | break; | |
39279cc3 CM |
2007 | } |
2008 | return; | |
2009 | ||
2010 | make_bad: | |
39279cc3 | 2011 | btrfs_free_path(path); |
39279cc3 CM |
2012 | make_bad_inode(inode); |
2013 | } | |
2014 | ||
d352ac68 CM |
2015 | /* |
2016 | * given a leaf and an inode, copy the inode fields into the leaf | |
2017 | */ | |
e02119d5 CM |
2018 | static void fill_inode_item(struct btrfs_trans_handle *trans, |
2019 | struct extent_buffer *leaf, | |
5f39d397 | 2020 | struct btrfs_inode_item *item, |
39279cc3 CM |
2021 | struct inode *inode) |
2022 | { | |
5f39d397 CM |
2023 | btrfs_set_inode_uid(leaf, item, inode->i_uid); |
2024 | btrfs_set_inode_gid(leaf, item, inode->i_gid); | |
dbe674a9 | 2025 | btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); |
5f39d397 CM |
2026 | btrfs_set_inode_mode(leaf, item, inode->i_mode); |
2027 | btrfs_set_inode_nlink(leaf, item, inode->i_nlink); | |
2028 | ||
2029 | btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), | |
2030 | inode->i_atime.tv_sec); | |
2031 | btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), | |
2032 | inode->i_atime.tv_nsec); | |
2033 | ||
2034 | btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), | |
2035 | inode->i_mtime.tv_sec); | |
2036 | btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), | |
2037 | inode->i_mtime.tv_nsec); | |
2038 | ||
2039 | btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), | |
2040 | inode->i_ctime.tv_sec); | |
2041 | btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), | |
2042 | inode->i_ctime.tv_nsec); | |
2043 | ||
a76a3cd4 | 2044 | btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); |
e02119d5 CM |
2045 | btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); |
2046 | btrfs_set_inode_transid(leaf, item, trans->transid); | |
5f39d397 | 2047 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); |
b98b6767 | 2048 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); |
5f39d397 | 2049 | btrfs_set_inode_block_group(leaf, item, |
39279cc3 CM |
2050 | BTRFS_I(inode)->block_group->key.objectid); |
2051 | } | |
2052 | ||
d352ac68 CM |
2053 | /* |
2054 | * copy everything in the in-memory inode into the btree. | |
2055 | */ | |
ba1da2f4 | 2056 | int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, |
39279cc3 CM |
2057 | struct btrfs_root *root, |
2058 | struct inode *inode) | |
2059 | { | |
2060 | struct btrfs_inode_item *inode_item; | |
2061 | struct btrfs_path *path; | |
5f39d397 | 2062 | struct extent_buffer *leaf; |
39279cc3 CM |
2063 | int ret; |
2064 | ||
2065 | path = btrfs_alloc_path(); | |
2066 | BUG_ON(!path); | |
39279cc3 CM |
2067 | ret = btrfs_lookup_inode(trans, root, path, |
2068 | &BTRFS_I(inode)->location, 1); | |
2069 | if (ret) { | |
2070 | if (ret > 0) | |
2071 | ret = -ENOENT; | |
2072 | goto failed; | |
2073 | } | |
2074 | ||
5f39d397 CM |
2075 | leaf = path->nodes[0]; |
2076 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
39279cc3 CM |
2077 | struct btrfs_inode_item); |
2078 | ||
e02119d5 | 2079 | fill_inode_item(trans, leaf, inode_item, inode); |
5f39d397 | 2080 | btrfs_mark_buffer_dirty(leaf); |
15ee9bc7 | 2081 | btrfs_set_inode_last_trans(trans, inode); |
39279cc3 CM |
2082 | ret = 0; |
2083 | failed: | |
39279cc3 CM |
2084 | btrfs_free_path(path); |
2085 | return ret; | |
2086 | } | |
2087 | ||
2088 | ||
d352ac68 CM |
2089 | /* |
2090 | * unlink helper that gets used here in inode.c and in the tree logging | |
2091 | * recovery code. It remove a link in a directory with a given name, and | |
2092 | * also drops the back refs in the inode to the directory | |
2093 | */ | |
e02119d5 CM |
2094 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
2095 | struct btrfs_root *root, | |
2096 | struct inode *dir, struct inode *inode, | |
2097 | const char *name, int name_len) | |
39279cc3 CM |
2098 | { |
2099 | struct btrfs_path *path; | |
39279cc3 | 2100 | int ret = 0; |
5f39d397 | 2101 | struct extent_buffer *leaf; |
39279cc3 | 2102 | struct btrfs_dir_item *di; |
5f39d397 | 2103 | struct btrfs_key key; |
aec7477b | 2104 | u64 index; |
39279cc3 CM |
2105 | |
2106 | path = btrfs_alloc_path(); | |
54aa1f4d CM |
2107 | if (!path) { |
2108 | ret = -ENOMEM; | |
2109 | goto err; | |
2110 | } | |
2111 | ||
39279cc3 CM |
2112 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, |
2113 | name, name_len, -1); | |
2114 | if (IS_ERR(di)) { | |
2115 | ret = PTR_ERR(di); | |
2116 | goto err; | |
2117 | } | |
2118 | if (!di) { | |
2119 | ret = -ENOENT; | |
2120 | goto err; | |
2121 | } | |
5f39d397 CM |
2122 | leaf = path->nodes[0]; |
2123 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
39279cc3 | 2124 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
54aa1f4d CM |
2125 | if (ret) |
2126 | goto err; | |
39279cc3 CM |
2127 | btrfs_release_path(root, path); |
2128 | ||
aec7477b | 2129 | ret = btrfs_del_inode_ref(trans, root, name, name_len, |
e02119d5 CM |
2130 | inode->i_ino, |
2131 | dir->i_ino, &index); | |
aec7477b JB |
2132 | if (ret) { |
2133 | printk("failed to delete reference to %.*s, " | |
2134 | "inode %lu parent %lu\n", name_len, name, | |
e02119d5 | 2135 | inode->i_ino, dir->i_ino); |
aec7477b JB |
2136 | goto err; |
2137 | } | |
2138 | ||
39279cc3 | 2139 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, |
aec7477b | 2140 | index, name, name_len, -1); |
39279cc3 CM |
2141 | if (IS_ERR(di)) { |
2142 | ret = PTR_ERR(di); | |
2143 | goto err; | |
2144 | } | |
2145 | if (!di) { | |
2146 | ret = -ENOENT; | |
2147 | goto err; | |
2148 | } | |
2149 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
925baedd | 2150 | btrfs_release_path(root, path); |
39279cc3 | 2151 | |
e02119d5 CM |
2152 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, |
2153 | inode, dir->i_ino); | |
49eb7e46 CM |
2154 | BUG_ON(ret != 0 && ret != -ENOENT); |
2155 | if (ret != -ENOENT) | |
2156 | BTRFS_I(dir)->log_dirty_trans = trans->transid; | |
e02119d5 CM |
2157 | |
2158 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, | |
2159 | dir, index); | |
2160 | BUG_ON(ret); | |
39279cc3 CM |
2161 | err: |
2162 | btrfs_free_path(path); | |
e02119d5 CM |
2163 | if (ret) |
2164 | goto out; | |
2165 | ||
2166 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
2167 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; | |
2168 | btrfs_update_inode(trans, root, dir); | |
2169 | btrfs_drop_nlink(inode); | |
2170 | ret = btrfs_update_inode(trans, root, inode); | |
2171 | dir->i_sb->s_dirt = 1; | |
2172 | out: | |
39279cc3 CM |
2173 | return ret; |
2174 | } | |
2175 | ||
2176 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | |
2177 | { | |
2178 | struct btrfs_root *root; | |
2179 | struct btrfs_trans_handle *trans; | |
7b128766 | 2180 | struct inode *inode = dentry->d_inode; |
39279cc3 | 2181 | int ret; |
1832a6d5 | 2182 | unsigned long nr = 0; |
39279cc3 CM |
2183 | |
2184 | root = BTRFS_I(dir)->root; | |
1832a6d5 CM |
2185 | |
2186 | ret = btrfs_check_free_space(root, 1, 1); | |
2187 | if (ret) | |
2188 | goto fail; | |
2189 | ||
39279cc3 | 2190 | trans = btrfs_start_transaction(root, 1); |
5f39d397 | 2191 | |
39279cc3 | 2192 | btrfs_set_trans_block_group(trans, dir); |
e02119d5 CM |
2193 | ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
2194 | dentry->d_name.name, dentry->d_name.len); | |
7b128766 JB |
2195 | |
2196 | if (inode->i_nlink == 0) | |
2197 | ret = btrfs_orphan_add(trans, inode); | |
2198 | ||
d3c2fdcf | 2199 | nr = trans->blocks_used; |
5f39d397 | 2200 | |
89ce8a63 | 2201 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 2202 | fail: |
d3c2fdcf | 2203 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
2204 | return ret; |
2205 | } | |
2206 | ||
2207 | static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |
2208 | { | |
2209 | struct inode *inode = dentry->d_inode; | |
1832a6d5 | 2210 | int err = 0; |
39279cc3 CM |
2211 | int ret; |
2212 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
39279cc3 | 2213 | struct btrfs_trans_handle *trans; |
1832a6d5 | 2214 | unsigned long nr = 0; |
39279cc3 | 2215 | |
925baedd | 2216 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) { |
134d4512 | 2217 | return -ENOTEMPTY; |
925baedd | 2218 | } |
134d4512 | 2219 | |
1832a6d5 CM |
2220 | ret = btrfs_check_free_space(root, 1, 1); |
2221 | if (ret) | |
2222 | goto fail; | |
2223 | ||
39279cc3 CM |
2224 | trans = btrfs_start_transaction(root, 1); |
2225 | btrfs_set_trans_block_group(trans, dir); | |
39279cc3 | 2226 | |
7b128766 JB |
2227 | err = btrfs_orphan_add(trans, inode); |
2228 | if (err) | |
2229 | goto fail_trans; | |
2230 | ||
39279cc3 | 2231 | /* now the directory is empty */ |
e02119d5 CM |
2232 | err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
2233 | dentry->d_name.name, dentry->d_name.len); | |
39279cc3 | 2234 | if (!err) { |
dbe674a9 | 2235 | btrfs_i_size_write(inode, 0); |
39279cc3 | 2236 | } |
3954401f | 2237 | |
7b128766 | 2238 | fail_trans: |
d3c2fdcf | 2239 | nr = trans->blocks_used; |
89ce8a63 | 2240 | ret = btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 2241 | fail: |
d3c2fdcf | 2242 | btrfs_btree_balance_dirty(root, nr); |
3954401f | 2243 | |
39279cc3 CM |
2244 | if (ret && !err) |
2245 | err = ret; | |
2246 | return err; | |
2247 | } | |
2248 | ||
323ac95b CM |
2249 | /* |
2250 | * when truncating bytes in a file, it is possible to avoid reading | |
2251 | * the leaves that contain only checksum items. This can be the | |
2252 | * majority of the IO required to delete a large file, but it must | |
2253 | * be done carefully. | |
2254 | * | |
2255 | * The keys in the level just above the leaves are checked to make sure | |
2256 | * the lowest key in a given leaf is a csum key, and starts at an offset | |
2257 | * after the new size. | |
2258 | * | |
2259 | * Then the key for the next leaf is checked to make sure it also has | |
2260 | * a checksum item for the same file. If it does, we know our target leaf | |
2261 | * contains only checksum items, and it can be safely freed without reading | |
2262 | * it. | |
2263 | * | |
2264 | * This is just an optimization targeted at large files. It may do | |
2265 | * nothing. It will return 0 unless things went badly. | |
2266 | */ | |
2267 | static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, | |
2268 | struct btrfs_root *root, | |
2269 | struct btrfs_path *path, | |
2270 | struct inode *inode, u64 new_size) | |
2271 | { | |
2272 | struct btrfs_key key; | |
2273 | int ret; | |
2274 | int nritems; | |
2275 | struct btrfs_key found_key; | |
2276 | struct btrfs_key other_key; | |
5b84e8d6 YZ |
2277 | struct btrfs_leaf_ref *ref; |
2278 | u64 leaf_gen; | |
2279 | u64 leaf_start; | |
323ac95b CM |
2280 | |
2281 | path->lowest_level = 1; | |
2282 | key.objectid = inode->i_ino; | |
2283 | key.type = BTRFS_CSUM_ITEM_KEY; | |
2284 | key.offset = new_size; | |
2285 | again: | |
2286 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
2287 | if (ret < 0) | |
2288 | goto out; | |
2289 | ||
2290 | if (path->nodes[1] == NULL) { | |
2291 | ret = 0; | |
2292 | goto out; | |
2293 | } | |
2294 | ret = 0; | |
2295 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); | |
2296 | nritems = btrfs_header_nritems(path->nodes[1]); | |
2297 | ||
2298 | if (!nritems) | |
2299 | goto out; | |
2300 | ||
2301 | if (path->slots[1] >= nritems) | |
2302 | goto next_node; | |
2303 | ||
2304 | /* did we find a key greater than anything we want to delete? */ | |
2305 | if (found_key.objectid > inode->i_ino || | |
2306 | (found_key.objectid == inode->i_ino && found_key.type > key.type)) | |
2307 | goto out; | |
2308 | ||
2309 | /* we check the next key in the node to make sure the leave contains | |
2310 | * only checksum items. This comparison doesn't work if our | |
2311 | * leaf is the last one in the node | |
2312 | */ | |
2313 | if (path->slots[1] + 1 >= nritems) { | |
2314 | next_node: | |
2315 | /* search forward from the last key in the node, this | |
2316 | * will bring us into the next node in the tree | |
2317 | */ | |
2318 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); | |
2319 | ||
2320 | /* unlikely, but we inc below, so check to be safe */ | |
2321 | if (found_key.offset == (u64)-1) | |
2322 | goto out; | |
2323 | ||
2324 | /* search_forward needs a path with locks held, do the | |
2325 | * search again for the original key. It is possible | |
2326 | * this will race with a balance and return a path that | |
2327 | * we could modify, but this drop is just an optimization | |
2328 | * and is allowed to miss some leaves. | |
2329 | */ | |
2330 | btrfs_release_path(root, path); | |
2331 | found_key.offset++; | |
2332 | ||
2333 | /* setup a max key for search_forward */ | |
2334 | other_key.offset = (u64)-1; | |
2335 | other_key.type = key.type; | |
2336 | other_key.objectid = key.objectid; | |
2337 | ||
2338 | path->keep_locks = 1; | |
2339 | ret = btrfs_search_forward(root, &found_key, &other_key, | |
2340 | path, 0, 0); | |
2341 | path->keep_locks = 0; | |
2342 | if (ret || found_key.objectid != key.objectid || | |
2343 | found_key.type != key.type) { | |
2344 | ret = 0; | |
2345 | goto out; | |
2346 | } | |
2347 | ||
2348 | key.offset = found_key.offset; | |
2349 | btrfs_release_path(root, path); | |
2350 | cond_resched(); | |
2351 | goto again; | |
2352 | } | |
2353 | ||
2354 | /* we know there's one more slot after us in the tree, | |
2355 | * read that key so we can verify it is also a checksum item | |
2356 | */ | |
2357 | btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); | |
2358 | ||
2359 | if (found_key.objectid < inode->i_ino) | |
2360 | goto next_key; | |
2361 | ||
2362 | if (found_key.type != key.type || found_key.offset < new_size) | |
2363 | goto next_key; | |
2364 | ||
2365 | /* | |
2366 | * if the key for the next leaf isn't a csum key from this objectid, | |
2367 | * we can't be sure there aren't good items inside this leaf. | |
2368 | * Bail out | |
2369 | */ | |
2370 | if (other_key.objectid != inode->i_ino || other_key.type != key.type) | |
2371 | goto out; | |
2372 | ||
5b84e8d6 YZ |
2373 | leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); |
2374 | leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); | |
323ac95b CM |
2375 | /* |
2376 | * it is safe to delete this leaf, it contains only | |
2377 | * csum items from this inode at an offset >= new_size | |
2378 | */ | |
5b84e8d6 | 2379 | ret = btrfs_del_leaf(trans, root, path, leaf_start); |
323ac95b CM |
2380 | BUG_ON(ret); |
2381 | ||
5b84e8d6 YZ |
2382 | if (root->ref_cows && leaf_gen < trans->transid) { |
2383 | ref = btrfs_alloc_leaf_ref(root, 0); | |
2384 | if (ref) { | |
2385 | ref->root_gen = root->root_key.offset; | |
2386 | ref->bytenr = leaf_start; | |
2387 | ref->owner = 0; | |
2388 | ref->generation = leaf_gen; | |
2389 | ref->nritems = 0; | |
2390 | ||
2391 | ret = btrfs_add_leaf_ref(root, ref, 0); | |
2392 | WARN_ON(ret); | |
2393 | btrfs_free_leaf_ref(root, ref); | |
2394 | } else { | |
2395 | WARN_ON(1); | |
2396 | } | |
2397 | } | |
323ac95b CM |
2398 | next_key: |
2399 | btrfs_release_path(root, path); | |
2400 | ||
2401 | if (other_key.objectid == inode->i_ino && | |
2402 | other_key.type == key.type && other_key.offset > key.offset) { | |
2403 | key.offset = other_key.offset; | |
2404 | cond_resched(); | |
2405 | goto again; | |
2406 | } | |
2407 | ret = 0; | |
2408 | out: | |
2409 | /* fixup any changes we've made to the path */ | |
2410 | path->lowest_level = 0; | |
2411 | path->keep_locks = 0; | |
2412 | btrfs_release_path(root, path); | |
2413 | return ret; | |
2414 | } | |
2415 | ||
39279cc3 CM |
2416 | /* |
2417 | * this can truncate away extent items, csum items and directory items. | |
2418 | * It starts at a high offset and removes keys until it can't find | |
d352ac68 | 2419 | * any higher than new_size |
39279cc3 CM |
2420 | * |
2421 | * csum items that cross the new i_size are truncated to the new size | |
2422 | * as well. | |
7b128766 JB |
2423 | * |
2424 | * min_type is the minimum key type to truncate down to. If set to 0, this | |
2425 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | |
39279cc3 | 2426 | */ |
e02119d5 CM |
2427 | noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
2428 | struct btrfs_root *root, | |
2429 | struct inode *inode, | |
2430 | u64 new_size, u32 min_type) | |
39279cc3 CM |
2431 | { |
2432 | int ret; | |
2433 | struct btrfs_path *path; | |
2434 | struct btrfs_key key; | |
5f39d397 | 2435 | struct btrfs_key found_key; |
39279cc3 | 2436 | u32 found_type; |
5f39d397 | 2437 | struct extent_buffer *leaf; |
39279cc3 CM |
2438 | struct btrfs_file_extent_item *fi; |
2439 | u64 extent_start = 0; | |
db94535d | 2440 | u64 extent_num_bytes = 0; |
39279cc3 | 2441 | u64 item_end = 0; |
7bb86316 | 2442 | u64 root_gen = 0; |
d8d5f3e1 | 2443 | u64 root_owner = 0; |
39279cc3 CM |
2444 | int found_extent; |
2445 | int del_item; | |
85e21bac CM |
2446 | int pending_del_nr = 0; |
2447 | int pending_del_slot = 0; | |
179e29e4 | 2448 | int extent_type = -1; |
771ed689 | 2449 | int encoding; |
3b951516 | 2450 | u64 mask = root->sectorsize - 1; |
39279cc3 | 2451 | |
e02119d5 | 2452 | if (root->ref_cows) |
5b21f2ed | 2453 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
39279cc3 | 2454 | path = btrfs_alloc_path(); |
3c69faec | 2455 | path->reada = -1; |
39279cc3 | 2456 | BUG_ON(!path); |
5f39d397 | 2457 | |
39279cc3 CM |
2458 | /* FIXME, add redo link to tree so we don't leak on crash */ |
2459 | key.objectid = inode->i_ino; | |
2460 | key.offset = (u64)-1; | |
5f39d397 CM |
2461 | key.type = (u8)-1; |
2462 | ||
85e21bac | 2463 | btrfs_init_path(path); |
323ac95b CM |
2464 | |
2465 | ret = drop_csum_leaves(trans, root, path, inode, new_size); | |
2466 | BUG_ON(ret); | |
2467 | ||
85e21bac CM |
2468 | search_again: |
2469 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
2470 | if (ret < 0) { | |
2471 | goto error; | |
2472 | } | |
2473 | if (ret > 0) { | |
e02119d5 CM |
2474 | /* there are no items in the tree for us to truncate, we're |
2475 | * done | |
2476 | */ | |
2477 | if (path->slots[0] == 0) { | |
2478 | ret = 0; | |
2479 | goto error; | |
2480 | } | |
85e21bac CM |
2481 | path->slots[0]--; |
2482 | } | |
2483 | ||
39279cc3 | 2484 | while(1) { |
39279cc3 | 2485 | fi = NULL; |
5f39d397 CM |
2486 | leaf = path->nodes[0]; |
2487 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
2488 | found_type = btrfs_key_type(&found_key); | |
771ed689 | 2489 | encoding = 0; |
39279cc3 | 2490 | |
5f39d397 | 2491 | if (found_key.objectid != inode->i_ino) |
39279cc3 | 2492 | break; |
5f39d397 | 2493 | |
85e21bac | 2494 | if (found_type < min_type) |
39279cc3 CM |
2495 | break; |
2496 | ||
5f39d397 | 2497 | item_end = found_key.offset; |
39279cc3 | 2498 | if (found_type == BTRFS_EXTENT_DATA_KEY) { |
5f39d397 | 2499 | fi = btrfs_item_ptr(leaf, path->slots[0], |
39279cc3 | 2500 | struct btrfs_file_extent_item); |
179e29e4 | 2501 | extent_type = btrfs_file_extent_type(leaf, fi); |
771ed689 CM |
2502 | encoding = btrfs_file_extent_compression(leaf, fi); |
2503 | encoding |= btrfs_file_extent_encryption(leaf, fi); | |
2504 | encoding |= btrfs_file_extent_other_encoding(leaf, fi); | |
2505 | ||
179e29e4 | 2506 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { |
5f39d397 | 2507 | item_end += |
db94535d | 2508 | btrfs_file_extent_num_bytes(leaf, fi); |
179e29e4 | 2509 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
179e29e4 | 2510 | item_end += btrfs_file_extent_inline_len(leaf, |
c8b97818 | 2511 | fi); |
39279cc3 | 2512 | } |
008630c1 | 2513 | item_end--; |
39279cc3 CM |
2514 | } |
2515 | if (found_type == BTRFS_CSUM_ITEM_KEY) { | |
2516 | ret = btrfs_csum_truncate(trans, root, path, | |
e02119d5 | 2517 | new_size); |
39279cc3 CM |
2518 | BUG_ON(ret); |
2519 | } | |
e02119d5 | 2520 | if (item_end < new_size) { |
b888db2b CM |
2521 | if (found_type == BTRFS_DIR_ITEM_KEY) { |
2522 | found_type = BTRFS_INODE_ITEM_KEY; | |
2523 | } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { | |
2524 | found_type = BTRFS_CSUM_ITEM_KEY; | |
85e21bac CM |
2525 | } else if (found_type == BTRFS_EXTENT_DATA_KEY) { |
2526 | found_type = BTRFS_XATTR_ITEM_KEY; | |
2527 | } else if (found_type == BTRFS_XATTR_ITEM_KEY) { | |
2528 | found_type = BTRFS_INODE_REF_KEY; | |
b888db2b CM |
2529 | } else if (found_type) { |
2530 | found_type--; | |
2531 | } else { | |
2532 | break; | |
39279cc3 | 2533 | } |
a61721d5 | 2534 | btrfs_set_key_type(&key, found_type); |
85e21bac | 2535 | goto next; |
39279cc3 | 2536 | } |
e02119d5 | 2537 | if (found_key.offset >= new_size) |
39279cc3 CM |
2538 | del_item = 1; |
2539 | else | |
2540 | del_item = 0; | |
2541 | found_extent = 0; | |
2542 | ||
2543 | /* FIXME, shrink the extent if the ref count is only 1 */ | |
179e29e4 CM |
2544 | if (found_type != BTRFS_EXTENT_DATA_KEY) |
2545 | goto delete; | |
2546 | ||
2547 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
39279cc3 | 2548 | u64 num_dec; |
db94535d | 2549 | extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); |
771ed689 | 2550 | if (!del_item && !encoding) { |
db94535d CM |
2551 | u64 orig_num_bytes = |
2552 | btrfs_file_extent_num_bytes(leaf, fi); | |
e02119d5 | 2553 | extent_num_bytes = new_size - |
5f39d397 | 2554 | found_key.offset + root->sectorsize - 1; |
b1632b10 Y |
2555 | extent_num_bytes = extent_num_bytes & |
2556 | ~((u64)root->sectorsize - 1); | |
db94535d CM |
2557 | btrfs_set_file_extent_num_bytes(leaf, fi, |
2558 | extent_num_bytes); | |
2559 | num_dec = (orig_num_bytes - | |
9069218d | 2560 | extent_num_bytes); |
e02119d5 | 2561 | if (root->ref_cows && extent_start != 0) |
a76a3cd4 | 2562 | inode_sub_bytes(inode, num_dec); |
5f39d397 | 2563 | btrfs_mark_buffer_dirty(leaf); |
39279cc3 | 2564 | } else { |
db94535d CM |
2565 | extent_num_bytes = |
2566 | btrfs_file_extent_disk_num_bytes(leaf, | |
2567 | fi); | |
39279cc3 | 2568 | /* FIXME blocksize != 4096 */ |
9069218d | 2569 | num_dec = btrfs_file_extent_num_bytes(leaf, fi); |
39279cc3 CM |
2570 | if (extent_start != 0) { |
2571 | found_extent = 1; | |
e02119d5 | 2572 | if (root->ref_cows) |
a76a3cd4 | 2573 | inode_sub_bytes(inode, num_dec); |
e02119d5 | 2574 | } |
31840ae1 | 2575 | root_gen = btrfs_header_generation(leaf); |
d8d5f3e1 | 2576 | root_owner = btrfs_header_owner(leaf); |
39279cc3 | 2577 | } |
9069218d | 2578 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
c8b97818 CM |
2579 | /* |
2580 | * we can't truncate inline items that have had | |
2581 | * special encodings | |
2582 | */ | |
2583 | if (!del_item && | |
2584 | btrfs_file_extent_compression(leaf, fi) == 0 && | |
2585 | btrfs_file_extent_encryption(leaf, fi) == 0 && | |
2586 | btrfs_file_extent_other_encoding(leaf, fi) == 0) { | |
e02119d5 CM |
2587 | u32 size = new_size - found_key.offset; |
2588 | ||
2589 | if (root->ref_cows) { | |
a76a3cd4 YZ |
2590 | inode_sub_bytes(inode, item_end + 1 - |
2591 | new_size); | |
e02119d5 CM |
2592 | } |
2593 | size = | |
2594 | btrfs_file_extent_calc_inline_size(size); | |
9069218d | 2595 | ret = btrfs_truncate_item(trans, root, path, |
e02119d5 | 2596 | size, 1); |
9069218d | 2597 | BUG_ON(ret); |
e02119d5 | 2598 | } else if (root->ref_cows) { |
a76a3cd4 YZ |
2599 | inode_sub_bytes(inode, item_end + 1 - |
2600 | found_key.offset); | |
9069218d | 2601 | } |
39279cc3 | 2602 | } |
179e29e4 | 2603 | delete: |
39279cc3 | 2604 | if (del_item) { |
85e21bac CM |
2605 | if (!pending_del_nr) { |
2606 | /* no pending yet, add ourselves */ | |
2607 | pending_del_slot = path->slots[0]; | |
2608 | pending_del_nr = 1; | |
2609 | } else if (pending_del_nr && | |
2610 | path->slots[0] + 1 == pending_del_slot) { | |
2611 | /* hop on the pending chunk */ | |
2612 | pending_del_nr++; | |
2613 | pending_del_slot = path->slots[0]; | |
2614 | } else { | |
2615 | printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); | |
2616 | } | |
39279cc3 CM |
2617 | } else { |
2618 | break; | |
2619 | } | |
39279cc3 CM |
2620 | if (found_extent) { |
2621 | ret = btrfs_free_extent(trans, root, extent_start, | |
7bb86316 | 2622 | extent_num_bytes, |
31840ae1 | 2623 | leaf->start, root_owner, |
3bb1a1bc | 2624 | root_gen, inode->i_ino, 0); |
39279cc3 CM |
2625 | BUG_ON(ret); |
2626 | } | |
85e21bac CM |
2627 | next: |
2628 | if (path->slots[0] == 0) { | |
2629 | if (pending_del_nr) | |
2630 | goto del_pending; | |
2631 | btrfs_release_path(root, path); | |
2632 | goto search_again; | |
2633 | } | |
2634 | ||
2635 | path->slots[0]--; | |
2636 | if (pending_del_nr && | |
2637 | path->slots[0] + 1 != pending_del_slot) { | |
2638 | struct btrfs_key debug; | |
2639 | del_pending: | |
2640 | btrfs_item_key_to_cpu(path->nodes[0], &debug, | |
2641 | pending_del_slot); | |
2642 | ret = btrfs_del_items(trans, root, path, | |
2643 | pending_del_slot, | |
2644 | pending_del_nr); | |
2645 | BUG_ON(ret); | |
2646 | pending_del_nr = 0; | |
2647 | btrfs_release_path(root, path); | |
2648 | goto search_again; | |
2649 | } | |
39279cc3 CM |
2650 | } |
2651 | ret = 0; | |
2652 | error: | |
85e21bac CM |
2653 | if (pending_del_nr) { |
2654 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | |
2655 | pending_del_nr); | |
2656 | } | |
39279cc3 CM |
2657 | btrfs_free_path(path); |
2658 | inode->i_sb->s_dirt = 1; | |
2659 | return ret; | |
2660 | } | |
2661 | ||
2662 | /* | |
2663 | * taken from block_truncate_page, but does cow as it zeros out | |
2664 | * any bytes left in the last page in the file. | |
2665 | */ | |
2666 | static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |
2667 | { | |
2668 | struct inode *inode = mapping->host; | |
db94535d | 2669 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
2670 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
2671 | struct btrfs_ordered_extent *ordered; | |
2672 | char *kaddr; | |
db94535d | 2673 | u32 blocksize = root->sectorsize; |
39279cc3 CM |
2674 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
2675 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
2676 | struct page *page; | |
39279cc3 | 2677 | int ret = 0; |
a52d9a80 | 2678 | u64 page_start; |
e6dcd2dc | 2679 | u64 page_end; |
39279cc3 CM |
2680 | |
2681 | if ((offset & (blocksize - 1)) == 0) | |
2682 | goto out; | |
2683 | ||
2684 | ret = -ENOMEM; | |
211c17f5 | 2685 | again: |
39279cc3 CM |
2686 | page = grab_cache_page(mapping, index); |
2687 | if (!page) | |
2688 | goto out; | |
e6dcd2dc CM |
2689 | |
2690 | page_start = page_offset(page); | |
2691 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
2692 | ||
39279cc3 | 2693 | if (!PageUptodate(page)) { |
9ebefb18 | 2694 | ret = btrfs_readpage(NULL, page); |
39279cc3 | 2695 | lock_page(page); |
211c17f5 CM |
2696 | if (page->mapping != mapping) { |
2697 | unlock_page(page); | |
2698 | page_cache_release(page); | |
2699 | goto again; | |
2700 | } | |
39279cc3 CM |
2701 | if (!PageUptodate(page)) { |
2702 | ret = -EIO; | |
89642229 | 2703 | goto out_unlock; |
39279cc3 CM |
2704 | } |
2705 | } | |
211c17f5 | 2706 | wait_on_page_writeback(page); |
e6dcd2dc CM |
2707 | |
2708 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
2709 | set_page_extent_mapped(page); | |
2710 | ||
2711 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
2712 | if (ordered) { | |
2713 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
2714 | unlock_page(page); | |
2715 | page_cache_release(page); | |
eb84ae03 | 2716 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
2717 | btrfs_put_ordered_extent(ordered); |
2718 | goto again; | |
2719 | } | |
2720 | ||
ea8c2819 | 2721 | btrfs_set_extent_delalloc(inode, page_start, page_end); |
e6dcd2dc CM |
2722 | ret = 0; |
2723 | if (offset != PAGE_CACHE_SIZE) { | |
2724 | kaddr = kmap(page); | |
2725 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | |
2726 | flush_dcache_page(page); | |
2727 | kunmap(page); | |
2728 | } | |
247e743c | 2729 | ClearPageChecked(page); |
e6dcd2dc CM |
2730 | set_page_dirty(page); |
2731 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
39279cc3 | 2732 | |
89642229 | 2733 | out_unlock: |
39279cc3 CM |
2734 | unlock_page(page); |
2735 | page_cache_release(page); | |
2736 | out: | |
2737 | return ret; | |
2738 | } | |
2739 | ||
9036c102 | 2740 | int btrfs_cont_expand(struct inode *inode, loff_t size) |
39279cc3 | 2741 | { |
9036c102 YZ |
2742 | struct btrfs_trans_handle *trans; |
2743 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2744 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
2745 | struct extent_map *em; | |
2746 | u64 mask = root->sectorsize - 1; | |
2747 | u64 hole_start = (inode->i_size + mask) & ~mask; | |
2748 | u64 block_end = (size + mask) & ~mask; | |
2749 | u64 last_byte; | |
2750 | u64 cur_offset; | |
2751 | u64 hole_size; | |
39279cc3 CM |
2752 | int err; |
2753 | ||
9036c102 YZ |
2754 | if (size <= hole_start) |
2755 | return 0; | |
2756 | ||
2757 | err = btrfs_check_free_space(root, 1, 0); | |
39279cc3 CM |
2758 | if (err) |
2759 | return err; | |
2760 | ||
9036c102 | 2761 | btrfs_truncate_page(inode->i_mapping, inode->i_size); |
2bf5a725 | 2762 | |
9036c102 YZ |
2763 | while (1) { |
2764 | struct btrfs_ordered_extent *ordered; | |
2765 | btrfs_wait_ordered_range(inode, hole_start, | |
2766 | block_end - hole_start); | |
2767 | lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | |
2768 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); | |
2769 | if (!ordered) | |
2770 | break; | |
2771 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | |
2772 | btrfs_put_ordered_extent(ordered); | |
2773 | } | |
39279cc3 | 2774 | |
9036c102 YZ |
2775 | trans = btrfs_start_transaction(root, 1); |
2776 | btrfs_set_trans_block_group(trans, inode); | |
39279cc3 | 2777 | |
9036c102 YZ |
2778 | cur_offset = hole_start; |
2779 | while (1) { | |
2780 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
2781 | block_end - cur_offset, 0); | |
2782 | BUG_ON(IS_ERR(em) || !em); | |
2783 | last_byte = min(extent_map_end(em), block_end); | |
2784 | last_byte = (last_byte + mask) & ~mask; | |
2785 | if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { | |
771ed689 | 2786 | u64 hint_byte = 0; |
9036c102 | 2787 | hole_size = last_byte - cur_offset; |
771ed689 CM |
2788 | err = btrfs_drop_extents(trans, root, inode, |
2789 | cur_offset, | |
2790 | cur_offset + hole_size, | |
2791 | cur_offset, &hint_byte); | |
2792 | if (err) | |
2793 | break; | |
9036c102 YZ |
2794 | err = btrfs_insert_file_extent(trans, root, |
2795 | inode->i_ino, cur_offset, 0, | |
2796 | 0, hole_size, 0, hole_size, | |
2797 | 0, 0, 0); | |
2798 | btrfs_drop_extent_cache(inode, hole_start, | |
2799 | last_byte - 1, 0); | |
2800 | } | |
2801 | free_extent_map(em); | |
2802 | cur_offset = last_byte; | |
2803 | if (err || cur_offset >= block_end) | |
2804 | break; | |
2805 | } | |
1832a6d5 | 2806 | |
9036c102 YZ |
2807 | btrfs_end_transaction(trans, root); |
2808 | unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); | |
2809 | return err; | |
2810 | } | |
39279cc3 | 2811 | |
9036c102 YZ |
2812 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
2813 | { | |
2814 | struct inode *inode = dentry->d_inode; | |
2815 | int err; | |
39279cc3 | 2816 | |
9036c102 YZ |
2817 | err = inode_change_ok(inode, attr); |
2818 | if (err) | |
2819 | return err; | |
2bf5a725 | 2820 | |
9036c102 YZ |
2821 | if (S_ISREG(inode->i_mode) && |
2822 | attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) { | |
2823 | err = btrfs_cont_expand(inode, attr->ia_size); | |
54aa1f4d CM |
2824 | if (err) |
2825 | return err; | |
39279cc3 | 2826 | } |
9036c102 | 2827 | |
39279cc3 | 2828 | err = inode_setattr(inode, attr); |
33268eaf JB |
2829 | |
2830 | if (!err && ((attr->ia_valid & ATTR_MODE))) | |
2831 | err = btrfs_acl_chmod(inode); | |
39279cc3 CM |
2832 | return err; |
2833 | } | |
61295eb8 | 2834 | |
39279cc3 CM |
2835 | void btrfs_delete_inode(struct inode *inode) |
2836 | { | |
2837 | struct btrfs_trans_handle *trans; | |
2838 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d3c2fdcf | 2839 | unsigned long nr; |
39279cc3 CM |
2840 | int ret; |
2841 | ||
2842 | truncate_inode_pages(&inode->i_data, 0); | |
2843 | if (is_bad_inode(inode)) { | |
7b128766 | 2844 | btrfs_orphan_del(NULL, inode); |
39279cc3 CM |
2845 | goto no_delete; |
2846 | } | |
4a096752 | 2847 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5f39d397 | 2848 | |
dbe674a9 | 2849 | btrfs_i_size_write(inode, 0); |
39279cc3 | 2850 | trans = btrfs_start_transaction(root, 1); |
5f39d397 | 2851 | |
39279cc3 | 2852 | btrfs_set_trans_block_group(trans, inode); |
e02119d5 | 2853 | ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0); |
7b128766 JB |
2854 | if (ret) { |
2855 | btrfs_orphan_del(NULL, inode); | |
54aa1f4d | 2856 | goto no_delete_lock; |
7b128766 JB |
2857 | } |
2858 | ||
2859 | btrfs_orphan_del(trans, inode); | |
85e21bac | 2860 | |
d3c2fdcf | 2861 | nr = trans->blocks_used; |
85e21bac | 2862 | clear_inode(inode); |
5f39d397 | 2863 | |
39279cc3 | 2864 | btrfs_end_transaction(trans, root); |
d3c2fdcf | 2865 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 | 2866 | return; |
54aa1f4d CM |
2867 | |
2868 | no_delete_lock: | |
d3c2fdcf | 2869 | nr = trans->blocks_used; |
54aa1f4d | 2870 | btrfs_end_transaction(trans, root); |
d3c2fdcf | 2871 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
2872 | no_delete: |
2873 | clear_inode(inode); | |
2874 | } | |
2875 | ||
2876 | /* | |
2877 | * this returns the key found in the dir entry in the location pointer. | |
2878 | * If no dir entries were found, location->objectid is 0. | |
2879 | */ | |
2880 | static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |
2881 | struct btrfs_key *location) | |
2882 | { | |
2883 | const char *name = dentry->d_name.name; | |
2884 | int namelen = dentry->d_name.len; | |
2885 | struct btrfs_dir_item *di; | |
2886 | struct btrfs_path *path; | |
2887 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
0d9f7f3e | 2888 | int ret = 0; |
39279cc3 CM |
2889 | |
2890 | path = btrfs_alloc_path(); | |
2891 | BUG_ON(!path); | |
3954401f | 2892 | |
39279cc3 CM |
2893 | di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, |
2894 | namelen, 0); | |
0d9f7f3e Y |
2895 | if (IS_ERR(di)) |
2896 | ret = PTR_ERR(di); | |
39279cc3 | 2897 | if (!di || IS_ERR(di)) { |
3954401f | 2898 | goto out_err; |
39279cc3 | 2899 | } |
5f39d397 | 2900 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
39279cc3 | 2901 | out: |
39279cc3 CM |
2902 | btrfs_free_path(path); |
2903 | return ret; | |
3954401f CM |
2904 | out_err: |
2905 | location->objectid = 0; | |
2906 | goto out; | |
39279cc3 CM |
2907 | } |
2908 | ||
2909 | /* | |
2910 | * when we hit a tree root in a directory, the btrfs part of the inode | |
2911 | * needs to be changed to reflect the root directory of the tree root. This | |
2912 | * is kind of like crossing a mount point. | |
2913 | */ | |
2914 | static int fixup_tree_root_location(struct btrfs_root *root, | |
2915 | struct btrfs_key *location, | |
58176a96 JB |
2916 | struct btrfs_root **sub_root, |
2917 | struct dentry *dentry) | |
39279cc3 | 2918 | { |
39279cc3 CM |
2919 | struct btrfs_root_item *ri; |
2920 | ||
2921 | if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY) | |
2922 | return 0; | |
2923 | if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) | |
2924 | return 0; | |
2925 | ||
58176a96 JB |
2926 | *sub_root = btrfs_read_fs_root(root->fs_info, location, |
2927 | dentry->d_name.name, | |
2928 | dentry->d_name.len); | |
39279cc3 CM |
2929 | if (IS_ERR(*sub_root)) |
2930 | return PTR_ERR(*sub_root); | |
2931 | ||
2932 | ri = &(*sub_root)->root_item; | |
2933 | location->objectid = btrfs_root_dirid(ri); | |
39279cc3 CM |
2934 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); |
2935 | location->offset = 0; | |
2936 | ||
39279cc3 CM |
2937 | return 0; |
2938 | } | |
2939 | ||
e02119d5 | 2940 | static noinline void init_btrfs_i(struct inode *inode) |
39279cc3 | 2941 | { |
e02119d5 CM |
2942 | struct btrfs_inode *bi = BTRFS_I(inode); |
2943 | ||
2944 | bi->i_acl = NULL; | |
2945 | bi->i_default_acl = NULL; | |
2946 | ||
2947 | bi->generation = 0; | |
2948 | bi->last_trans = 0; | |
2949 | bi->logged_trans = 0; | |
2950 | bi->delalloc_bytes = 0; | |
2951 | bi->disk_i_size = 0; | |
2952 | bi->flags = 0; | |
2953 | bi->index_cnt = (u64)-1; | |
49eb7e46 | 2954 | bi->log_dirty_trans = 0; |
d1310b2e CM |
2955 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); |
2956 | extent_io_tree_init(&BTRFS_I(inode)->io_tree, | |
b888db2b | 2957 | inode->i_mapping, GFP_NOFS); |
7e38326f CM |
2958 | extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, |
2959 | inode->i_mapping, GFP_NOFS); | |
ea8c2819 | 2960 | INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes); |
ba1da2f4 | 2961 | btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); |
1b1e2135 | 2962 | mutex_init(&BTRFS_I(inode)->csum_mutex); |
ee6e6504 | 2963 | mutex_init(&BTRFS_I(inode)->extent_mutex); |
e02119d5 CM |
2964 | mutex_init(&BTRFS_I(inode)->log_mutex); |
2965 | } | |
2966 | ||
2967 | static int btrfs_init_locked_inode(struct inode *inode, void *p) | |
2968 | { | |
2969 | struct btrfs_iget_args *args = p; | |
2970 | inode->i_ino = args->ino; | |
2971 | init_btrfs_i(inode); | |
2972 | BTRFS_I(inode)->root = args->root; | |
39279cc3 CM |
2973 | return 0; |
2974 | } | |
2975 | ||
2976 | static int btrfs_find_actor(struct inode *inode, void *opaque) | |
2977 | { | |
2978 | struct btrfs_iget_args *args = opaque; | |
2979 | return (args->ino == inode->i_ino && | |
2980 | args->root == BTRFS_I(inode)->root); | |
2981 | } | |
2982 | ||
5b21f2ed ZY |
2983 | struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, |
2984 | struct btrfs_root *root, int wait) | |
2985 | { | |
2986 | struct inode *inode; | |
2987 | struct btrfs_iget_args args; | |
2988 | args.ino = objectid; | |
2989 | args.root = root; | |
2990 | ||
2991 | if (wait) { | |
2992 | inode = ilookup5(s, objectid, btrfs_find_actor, | |
2993 | (void *)&args); | |
2994 | } else { | |
2995 | inode = ilookup5_nowait(s, objectid, btrfs_find_actor, | |
2996 | (void *)&args); | |
2997 | } | |
2998 | return inode; | |
2999 | } | |
3000 | ||
39279cc3 CM |
3001 | struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid, |
3002 | struct btrfs_root *root) | |
3003 | { | |
3004 | struct inode *inode; | |
3005 | struct btrfs_iget_args args; | |
3006 | args.ino = objectid; | |
3007 | args.root = root; | |
3008 | ||
3009 | inode = iget5_locked(s, objectid, btrfs_find_actor, | |
3010 | btrfs_init_locked_inode, | |
3011 | (void *)&args); | |
3012 | return inode; | |
3013 | } | |
3014 | ||
1a54ef8c BR |
3015 | /* Get an inode object given its location and corresponding root. |
3016 | * Returns in *is_new if the inode was read from disk | |
3017 | */ | |
3018 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |
3019 | struct btrfs_root *root, int *is_new) | |
3020 | { | |
3021 | struct inode *inode; | |
3022 | ||
3023 | inode = btrfs_iget_locked(s, location->objectid, root); | |
3024 | if (!inode) | |
3025 | return ERR_PTR(-EACCES); | |
3026 | ||
3027 | if (inode->i_state & I_NEW) { | |
3028 | BTRFS_I(inode)->root = root; | |
3029 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); | |
3030 | btrfs_read_locked_inode(inode); | |
3031 | unlock_new_inode(inode); | |
3032 | if (is_new) | |
3033 | *is_new = 1; | |
3034 | } else { | |
3035 | if (is_new) | |
3036 | *is_new = 0; | |
3037 | } | |
3038 | ||
3039 | return inode; | |
3040 | } | |
3041 | ||
39279cc3 CM |
3042 | static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, |
3043 | struct nameidata *nd) | |
3044 | { | |
3045 | struct inode * inode; | |
3046 | struct btrfs_inode *bi = BTRFS_I(dir); | |
3047 | struct btrfs_root *root = bi->root; | |
3048 | struct btrfs_root *sub_root = root; | |
3049 | struct btrfs_key location; | |
1a54ef8c | 3050 | int ret, new, do_orphan = 0; |
39279cc3 CM |
3051 | |
3052 | if (dentry->d_name.len > BTRFS_NAME_LEN) | |
3053 | return ERR_PTR(-ENAMETOOLONG); | |
5f39d397 | 3054 | |
39279cc3 | 3055 | ret = btrfs_inode_by_name(dir, dentry, &location); |
5f39d397 | 3056 | |
39279cc3 CM |
3057 | if (ret < 0) |
3058 | return ERR_PTR(ret); | |
5f39d397 | 3059 | |
39279cc3 CM |
3060 | inode = NULL; |
3061 | if (location.objectid) { | |
58176a96 JB |
3062 | ret = fixup_tree_root_location(root, &location, &sub_root, |
3063 | dentry); | |
39279cc3 CM |
3064 | if (ret < 0) |
3065 | return ERR_PTR(ret); | |
3066 | if (ret > 0) | |
3067 | return ERR_PTR(-ENOENT); | |
1a54ef8c BR |
3068 | inode = btrfs_iget(dir->i_sb, &location, sub_root, &new); |
3069 | if (IS_ERR(inode)) | |
3070 | return ERR_CAST(inode); | |
3071 | ||
3072 | /* the inode and parent dir are two different roots */ | |
3073 | if (new && root != sub_root) { | |
3074 | igrab(inode); | |
3075 | sub_root->inode = inode; | |
3076 | do_orphan = 1; | |
39279cc3 CM |
3077 | } |
3078 | } | |
7b128766 JB |
3079 | |
3080 | if (unlikely(do_orphan)) | |
3081 | btrfs_orphan_cleanup(sub_root); | |
3082 | ||
39279cc3 CM |
3083 | return d_splice_alias(inode, dentry); |
3084 | } | |
3085 | ||
39279cc3 CM |
3086 | static unsigned char btrfs_filetype_table[] = { |
3087 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK | |
3088 | }; | |
3089 | ||
cbdf5a24 DW |
3090 | static int btrfs_real_readdir(struct file *filp, void *dirent, |
3091 | filldir_t filldir) | |
39279cc3 | 3092 | { |
6da6abae | 3093 | struct inode *inode = filp->f_dentry->d_inode; |
39279cc3 CM |
3094 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3095 | struct btrfs_item *item; | |
3096 | struct btrfs_dir_item *di; | |
3097 | struct btrfs_key key; | |
5f39d397 | 3098 | struct btrfs_key found_key; |
39279cc3 CM |
3099 | struct btrfs_path *path; |
3100 | int ret; | |
3101 | u32 nritems; | |
5f39d397 | 3102 | struct extent_buffer *leaf; |
39279cc3 CM |
3103 | int slot; |
3104 | int advance; | |
3105 | unsigned char d_type; | |
3106 | int over = 0; | |
3107 | u32 di_cur; | |
3108 | u32 di_total; | |
3109 | u32 di_len; | |
3110 | int key_type = BTRFS_DIR_INDEX_KEY; | |
5f39d397 CM |
3111 | char tmp_name[32]; |
3112 | char *name_ptr; | |
3113 | int name_len; | |
39279cc3 CM |
3114 | |
3115 | /* FIXME, use a real flag for deciding about the key type */ | |
3116 | if (root->fs_info->tree_root == root) | |
3117 | key_type = BTRFS_DIR_ITEM_KEY; | |
5f39d397 | 3118 | |
3954401f CM |
3119 | /* special case for "." */ |
3120 | if (filp->f_pos == 0) { | |
3121 | over = filldir(dirent, ".", 1, | |
3122 | 1, inode->i_ino, | |
3123 | DT_DIR); | |
3124 | if (over) | |
3125 | return 0; | |
3126 | filp->f_pos = 1; | |
3127 | } | |
3954401f CM |
3128 | /* special case for .., just use the back ref */ |
3129 | if (filp->f_pos == 1) { | |
5ecc7e5d | 3130 | u64 pino = parent_ino(filp->f_path.dentry); |
3954401f | 3131 | over = filldir(dirent, "..", 2, |
5ecc7e5d | 3132 | 2, pino, DT_DIR); |
3954401f | 3133 | if (over) |
49593bfa | 3134 | return 0; |
3954401f CM |
3135 | filp->f_pos = 2; |
3136 | } | |
3137 | ||
49593bfa DW |
3138 | path = btrfs_alloc_path(); |
3139 | path->reada = 2; | |
3140 | ||
39279cc3 CM |
3141 | btrfs_set_key_type(&key, key_type); |
3142 | key.offset = filp->f_pos; | |
49593bfa | 3143 | key.objectid = inode->i_ino; |
5f39d397 | 3144 | |
39279cc3 CM |
3145 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3146 | if (ret < 0) | |
3147 | goto err; | |
3148 | advance = 0; | |
49593bfa DW |
3149 | |
3150 | while (1) { | |
5f39d397 CM |
3151 | leaf = path->nodes[0]; |
3152 | nritems = btrfs_header_nritems(leaf); | |
39279cc3 CM |
3153 | slot = path->slots[0]; |
3154 | if (advance || slot >= nritems) { | |
49593bfa | 3155 | if (slot >= nritems - 1) { |
39279cc3 CM |
3156 | ret = btrfs_next_leaf(root, path); |
3157 | if (ret) | |
3158 | break; | |
5f39d397 CM |
3159 | leaf = path->nodes[0]; |
3160 | nritems = btrfs_header_nritems(leaf); | |
39279cc3 CM |
3161 | slot = path->slots[0]; |
3162 | } else { | |
3163 | slot++; | |
3164 | path->slots[0]++; | |
3165 | } | |
3166 | } | |
3167 | advance = 1; | |
5f39d397 CM |
3168 | item = btrfs_item_nr(leaf, slot); |
3169 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
3170 | ||
3171 | if (found_key.objectid != key.objectid) | |
39279cc3 | 3172 | break; |
5f39d397 | 3173 | if (btrfs_key_type(&found_key) != key_type) |
39279cc3 | 3174 | break; |
5f39d397 | 3175 | if (found_key.offset < filp->f_pos) |
39279cc3 | 3176 | continue; |
5f39d397 CM |
3177 | |
3178 | filp->f_pos = found_key.offset; | |
49593bfa | 3179 | |
39279cc3 CM |
3180 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
3181 | di_cur = 0; | |
5f39d397 | 3182 | di_total = btrfs_item_size(leaf, item); |
49593bfa DW |
3183 | |
3184 | while (di_cur < di_total) { | |
5f39d397 CM |
3185 | struct btrfs_key location; |
3186 | ||
3187 | name_len = btrfs_dir_name_len(leaf, di); | |
49593bfa | 3188 | if (name_len <= sizeof(tmp_name)) { |
5f39d397 CM |
3189 | name_ptr = tmp_name; |
3190 | } else { | |
3191 | name_ptr = kmalloc(name_len, GFP_NOFS); | |
49593bfa DW |
3192 | if (!name_ptr) { |
3193 | ret = -ENOMEM; | |
3194 | goto err; | |
3195 | } | |
5f39d397 CM |
3196 | } |
3197 | read_extent_buffer(leaf, name_ptr, | |
3198 | (unsigned long)(di + 1), name_len); | |
3199 | ||
3200 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | |
3201 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
5f39d397 | 3202 | over = filldir(dirent, name_ptr, name_len, |
49593bfa | 3203 | found_key.offset, location.objectid, |
39279cc3 | 3204 | d_type); |
5f39d397 CM |
3205 | |
3206 | if (name_ptr != tmp_name) | |
3207 | kfree(name_ptr); | |
3208 | ||
39279cc3 CM |
3209 | if (over) |
3210 | goto nopos; | |
49593bfa | 3211 | |
5103e947 | 3212 | di_len = btrfs_dir_name_len(leaf, di) + |
49593bfa | 3213 | btrfs_dir_data_len(leaf, di) + sizeof(*di); |
39279cc3 CM |
3214 | di_cur += di_len; |
3215 | di = (struct btrfs_dir_item *)((char *)di + di_len); | |
3216 | } | |
3217 | } | |
49593bfa DW |
3218 | |
3219 | /* Reached end of directory/root. Bump pos past the last item. */ | |
5e591a07 YZ |
3220 | if (key_type == BTRFS_DIR_INDEX_KEY) |
3221 | filp->f_pos = INT_LIMIT(typeof(filp->f_pos)); | |
3222 | else | |
3223 | filp->f_pos++; | |
39279cc3 CM |
3224 | nopos: |
3225 | ret = 0; | |
3226 | err: | |
39279cc3 | 3227 | btrfs_free_path(path); |
39279cc3 CM |
3228 | return ret; |
3229 | } | |
3230 | ||
3231 | int btrfs_write_inode(struct inode *inode, int wait) | |
3232 | { | |
3233 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3234 | struct btrfs_trans_handle *trans; | |
3235 | int ret = 0; | |
3236 | ||
4ca8b41e CM |
3237 | if (root->fs_info->closing > 1) |
3238 | return 0; | |
3239 | ||
39279cc3 | 3240 | if (wait) { |
f9295749 | 3241 | trans = btrfs_join_transaction(root, 1); |
39279cc3 CM |
3242 | btrfs_set_trans_block_group(trans, inode); |
3243 | ret = btrfs_commit_transaction(trans, root); | |
39279cc3 CM |
3244 | } |
3245 | return ret; | |
3246 | } | |
3247 | ||
3248 | /* | |
54aa1f4d | 3249 | * This is somewhat expensive, updating the tree every time the |
39279cc3 CM |
3250 | * inode changes. But, it is most likely to find the inode in cache. |
3251 | * FIXME, needs more benchmarking...there are no reasons other than performance | |
3252 | * to keep or drop this code. | |
3253 | */ | |
3254 | void btrfs_dirty_inode(struct inode *inode) | |
3255 | { | |
3256 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3257 | struct btrfs_trans_handle *trans; | |
3258 | ||
f9295749 | 3259 | trans = btrfs_join_transaction(root, 1); |
39279cc3 CM |
3260 | btrfs_set_trans_block_group(trans, inode); |
3261 | btrfs_update_inode(trans, root, inode); | |
3262 | btrfs_end_transaction(trans, root); | |
39279cc3 CM |
3263 | } |
3264 | ||
d352ac68 CM |
3265 | /* |
3266 | * find the highest existing sequence number in a directory | |
3267 | * and then set the in-memory index_cnt variable to reflect | |
3268 | * free sequence numbers | |
3269 | */ | |
aec7477b JB |
3270 | static int btrfs_set_inode_index_count(struct inode *inode) |
3271 | { | |
3272 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3273 | struct btrfs_key key, found_key; | |
3274 | struct btrfs_path *path; | |
3275 | struct extent_buffer *leaf; | |
3276 | int ret; | |
3277 | ||
3278 | key.objectid = inode->i_ino; | |
3279 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); | |
3280 | key.offset = (u64)-1; | |
3281 | ||
3282 | path = btrfs_alloc_path(); | |
3283 | if (!path) | |
3284 | return -ENOMEM; | |
3285 | ||
3286 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
3287 | if (ret < 0) | |
3288 | goto out; | |
3289 | /* FIXME: we should be able to handle this */ | |
3290 | if (ret == 0) | |
3291 | goto out; | |
3292 | ret = 0; | |
3293 | ||
3294 | /* | |
3295 | * MAGIC NUMBER EXPLANATION: | |
3296 | * since we search a directory based on f_pos we have to start at 2 | |
3297 | * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody | |
3298 | * else has to start at 2 | |
3299 | */ | |
3300 | if (path->slots[0] == 0) { | |
3301 | BTRFS_I(inode)->index_cnt = 2; | |
3302 | goto out; | |
3303 | } | |
3304 | ||
3305 | path->slots[0]--; | |
3306 | ||
3307 | leaf = path->nodes[0]; | |
3308 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
3309 | ||
3310 | if (found_key.objectid != inode->i_ino || | |
3311 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { | |
3312 | BTRFS_I(inode)->index_cnt = 2; | |
3313 | goto out; | |
3314 | } | |
3315 | ||
3316 | BTRFS_I(inode)->index_cnt = found_key.offset + 1; | |
3317 | out: | |
3318 | btrfs_free_path(path); | |
3319 | return ret; | |
3320 | } | |
3321 | ||
d352ac68 CM |
3322 | /* |
3323 | * helper to find a free sequence number in a given directory. This current | |
3324 | * code is very simple, later versions will do smarter things in the btree | |
3325 | */ | |
00e4e6b3 CM |
3326 | static int btrfs_set_inode_index(struct inode *dir, struct inode *inode, |
3327 | u64 *index) | |
aec7477b JB |
3328 | { |
3329 | int ret = 0; | |
3330 | ||
3331 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { | |
3332 | ret = btrfs_set_inode_index_count(dir); | |
8d5bf1cb | 3333 | if (ret) { |
aec7477b | 3334 | return ret; |
8d5bf1cb | 3335 | } |
aec7477b JB |
3336 | } |
3337 | ||
00e4e6b3 | 3338 | *index = BTRFS_I(dir)->index_cnt; |
aec7477b JB |
3339 | BTRFS_I(dir)->index_cnt++; |
3340 | ||
3341 | return ret; | |
3342 | } | |
3343 | ||
39279cc3 CM |
3344 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, |
3345 | struct btrfs_root *root, | |
aec7477b | 3346 | struct inode *dir, |
9c58309d CM |
3347 | const char *name, int name_len, |
3348 | u64 ref_objectid, | |
39279cc3 CM |
3349 | u64 objectid, |
3350 | struct btrfs_block_group_cache *group, | |
00e4e6b3 | 3351 | int mode, u64 *index) |
39279cc3 CM |
3352 | { |
3353 | struct inode *inode; | |
5f39d397 | 3354 | struct btrfs_inode_item *inode_item; |
6324fbf3 | 3355 | struct btrfs_block_group_cache *new_inode_group; |
39279cc3 | 3356 | struct btrfs_key *location; |
5f39d397 | 3357 | struct btrfs_path *path; |
9c58309d CM |
3358 | struct btrfs_inode_ref *ref; |
3359 | struct btrfs_key key[2]; | |
3360 | u32 sizes[2]; | |
3361 | unsigned long ptr; | |
39279cc3 CM |
3362 | int ret; |
3363 | int owner; | |
3364 | ||
5f39d397 CM |
3365 | path = btrfs_alloc_path(); |
3366 | BUG_ON(!path); | |
3367 | ||
39279cc3 CM |
3368 | inode = new_inode(root->fs_info->sb); |
3369 | if (!inode) | |
3370 | return ERR_PTR(-ENOMEM); | |
3371 | ||
aec7477b | 3372 | if (dir) { |
00e4e6b3 | 3373 | ret = btrfs_set_inode_index(dir, inode, index); |
aec7477b JB |
3374 | if (ret) |
3375 | return ERR_PTR(ret); | |
aec7477b JB |
3376 | } |
3377 | /* | |
3378 | * index_cnt is ignored for everything but a dir, | |
3379 | * btrfs_get_inode_index_count has an explanation for the magic | |
3380 | * number | |
3381 | */ | |
e02119d5 | 3382 | init_btrfs_i(inode); |
aec7477b | 3383 | BTRFS_I(inode)->index_cnt = 2; |
39279cc3 | 3384 | BTRFS_I(inode)->root = root; |
e02119d5 | 3385 | BTRFS_I(inode)->generation = trans->transid; |
b888db2b | 3386 | |
39279cc3 CM |
3387 | if (mode & S_IFDIR) |
3388 | owner = 0; | |
3389 | else | |
3390 | owner = 1; | |
6324fbf3 | 3391 | new_inode_group = btrfs_find_block_group(root, group, 0, |
0b86a832 | 3392 | BTRFS_BLOCK_GROUP_METADATA, owner); |
6324fbf3 CM |
3393 | if (!new_inode_group) { |
3394 | printk("find_block group failed\n"); | |
3395 | new_inode_group = group; | |
3396 | } | |
3397 | BTRFS_I(inode)->block_group = new_inode_group; | |
9c58309d CM |
3398 | |
3399 | key[0].objectid = objectid; | |
3400 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | |
3401 | key[0].offset = 0; | |
3402 | ||
3403 | key[1].objectid = objectid; | |
3404 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); | |
3405 | key[1].offset = ref_objectid; | |
3406 | ||
3407 | sizes[0] = sizeof(struct btrfs_inode_item); | |
3408 | sizes[1] = name_len + sizeof(*ref); | |
3409 | ||
3410 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); | |
3411 | if (ret != 0) | |
5f39d397 CM |
3412 | goto fail; |
3413 | ||
9c58309d CM |
3414 | if (objectid > root->highest_inode) |
3415 | root->highest_inode = objectid; | |
3416 | ||
39279cc3 CM |
3417 | inode->i_uid = current->fsuid; |
3418 | inode->i_gid = current->fsgid; | |
3419 | inode->i_mode = mode; | |
3420 | inode->i_ino = objectid; | |
a76a3cd4 | 3421 | inode_set_bytes(inode, 0); |
39279cc3 | 3422 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
5f39d397 CM |
3423 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
3424 | struct btrfs_inode_item); | |
e02119d5 | 3425 | fill_inode_item(trans, path->nodes[0], inode_item, inode); |
9c58309d CM |
3426 | |
3427 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, | |
3428 | struct btrfs_inode_ref); | |
3429 | btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); | |
00e4e6b3 | 3430 | btrfs_set_inode_ref_index(path->nodes[0], ref, *index); |
9c58309d CM |
3431 | ptr = (unsigned long)(ref + 1); |
3432 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | |
3433 | ||
5f39d397 CM |
3434 | btrfs_mark_buffer_dirty(path->nodes[0]); |
3435 | btrfs_free_path(path); | |
3436 | ||
39279cc3 CM |
3437 | location = &BTRFS_I(inode)->location; |
3438 | location->objectid = objectid; | |
39279cc3 CM |
3439 | location->offset = 0; |
3440 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | |
3441 | ||
39279cc3 CM |
3442 | insert_inode_hash(inode); |
3443 | return inode; | |
5f39d397 | 3444 | fail: |
aec7477b JB |
3445 | if (dir) |
3446 | BTRFS_I(dir)->index_cnt--; | |
5f39d397 CM |
3447 | btrfs_free_path(path); |
3448 | return ERR_PTR(ret); | |
39279cc3 CM |
3449 | } |
3450 | ||
3451 | static inline u8 btrfs_inode_type(struct inode *inode) | |
3452 | { | |
3453 | return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; | |
3454 | } | |
3455 | ||
d352ac68 CM |
3456 | /* |
3457 | * utility function to add 'inode' into 'parent_inode' with | |
3458 | * a give name and a given sequence number. | |
3459 | * if 'add_backref' is true, also insert a backref from the | |
3460 | * inode to the parent directory. | |
3461 | */ | |
e02119d5 CM |
3462 | int btrfs_add_link(struct btrfs_trans_handle *trans, |
3463 | struct inode *parent_inode, struct inode *inode, | |
3464 | const char *name, int name_len, int add_backref, u64 index) | |
39279cc3 CM |
3465 | { |
3466 | int ret; | |
3467 | struct btrfs_key key; | |
e02119d5 | 3468 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; |
5f39d397 | 3469 | |
39279cc3 | 3470 | key.objectid = inode->i_ino; |
39279cc3 CM |
3471 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); |
3472 | key.offset = 0; | |
3473 | ||
e02119d5 CM |
3474 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
3475 | parent_inode->i_ino, | |
aec7477b | 3476 | &key, btrfs_inode_type(inode), |
00e4e6b3 | 3477 | index); |
39279cc3 | 3478 | if (ret == 0) { |
9c58309d CM |
3479 | if (add_backref) { |
3480 | ret = btrfs_insert_inode_ref(trans, root, | |
e02119d5 CM |
3481 | name, name_len, |
3482 | inode->i_ino, | |
3483 | parent_inode->i_ino, | |
3484 | index); | |
9c58309d | 3485 | } |
dbe674a9 | 3486 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
e02119d5 | 3487 | name_len * 2); |
79c44584 | 3488 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; |
e02119d5 | 3489 | ret = btrfs_update_inode(trans, root, parent_inode); |
39279cc3 CM |
3490 | } |
3491 | return ret; | |
3492 | } | |
3493 | ||
3494 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | |
9c58309d | 3495 | struct dentry *dentry, struct inode *inode, |
00e4e6b3 | 3496 | int backref, u64 index) |
39279cc3 | 3497 | { |
e02119d5 CM |
3498 | int err = btrfs_add_link(trans, dentry->d_parent->d_inode, |
3499 | inode, dentry->d_name.name, | |
3500 | dentry->d_name.len, backref, index); | |
39279cc3 CM |
3501 | if (!err) { |
3502 | d_instantiate(dentry, inode); | |
3503 | return 0; | |
3504 | } | |
3505 | if (err > 0) | |
3506 | err = -EEXIST; | |
3507 | return err; | |
3508 | } | |
3509 | ||
618e21d5 JB |
3510 | static int btrfs_mknod(struct inode *dir, struct dentry *dentry, |
3511 | int mode, dev_t rdev) | |
3512 | { | |
3513 | struct btrfs_trans_handle *trans; | |
3514 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 3515 | struct inode *inode = NULL; |
618e21d5 JB |
3516 | int err; |
3517 | int drop_inode = 0; | |
3518 | u64 objectid; | |
1832a6d5 | 3519 | unsigned long nr = 0; |
00e4e6b3 | 3520 | u64 index = 0; |
618e21d5 JB |
3521 | |
3522 | if (!new_valid_dev(rdev)) | |
3523 | return -EINVAL; | |
3524 | ||
1832a6d5 CM |
3525 | err = btrfs_check_free_space(root, 1, 0); |
3526 | if (err) | |
3527 | goto fail; | |
3528 | ||
618e21d5 JB |
3529 | trans = btrfs_start_transaction(root, 1); |
3530 | btrfs_set_trans_block_group(trans, dir); | |
3531 | ||
3532 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); | |
3533 | if (err) { | |
3534 | err = -ENOSPC; | |
3535 | goto out_unlock; | |
3536 | } | |
3537 | ||
aec7477b | 3538 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
9c58309d CM |
3539 | dentry->d_name.len, |
3540 | dentry->d_parent->d_inode->i_ino, objectid, | |
00e4e6b3 | 3541 | BTRFS_I(dir)->block_group, mode, &index); |
618e21d5 JB |
3542 | err = PTR_ERR(inode); |
3543 | if (IS_ERR(inode)) | |
3544 | goto out_unlock; | |
3545 | ||
33268eaf JB |
3546 | err = btrfs_init_acl(inode, dir); |
3547 | if (err) { | |
3548 | drop_inode = 1; | |
3549 | goto out_unlock; | |
3550 | } | |
3551 | ||
618e21d5 | 3552 | btrfs_set_trans_block_group(trans, inode); |
00e4e6b3 | 3553 | err = btrfs_add_nondir(trans, dentry, inode, 0, index); |
618e21d5 JB |
3554 | if (err) |
3555 | drop_inode = 1; | |
3556 | else { | |
3557 | inode->i_op = &btrfs_special_inode_operations; | |
3558 | init_special_inode(inode, inode->i_mode, rdev); | |
1b4ab1bb | 3559 | btrfs_update_inode(trans, root, inode); |
618e21d5 JB |
3560 | } |
3561 | dir->i_sb->s_dirt = 1; | |
3562 | btrfs_update_inode_block_group(trans, inode); | |
3563 | btrfs_update_inode_block_group(trans, dir); | |
3564 | out_unlock: | |
d3c2fdcf | 3565 | nr = trans->blocks_used; |
89ce8a63 | 3566 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 3567 | fail: |
618e21d5 JB |
3568 | if (drop_inode) { |
3569 | inode_dec_link_count(inode); | |
3570 | iput(inode); | |
3571 | } | |
d3c2fdcf | 3572 | btrfs_btree_balance_dirty(root, nr); |
618e21d5 JB |
3573 | return err; |
3574 | } | |
3575 | ||
39279cc3 CM |
3576 | static int btrfs_create(struct inode *dir, struct dentry *dentry, |
3577 | int mode, struct nameidata *nd) | |
3578 | { | |
3579 | struct btrfs_trans_handle *trans; | |
3580 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 3581 | struct inode *inode = NULL; |
39279cc3 CM |
3582 | int err; |
3583 | int drop_inode = 0; | |
1832a6d5 | 3584 | unsigned long nr = 0; |
39279cc3 | 3585 | u64 objectid; |
00e4e6b3 | 3586 | u64 index = 0; |
39279cc3 | 3587 | |
1832a6d5 CM |
3588 | err = btrfs_check_free_space(root, 1, 0); |
3589 | if (err) | |
3590 | goto fail; | |
39279cc3 CM |
3591 | trans = btrfs_start_transaction(root, 1); |
3592 | btrfs_set_trans_block_group(trans, dir); | |
3593 | ||
3594 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); | |
3595 | if (err) { | |
3596 | err = -ENOSPC; | |
3597 | goto out_unlock; | |
3598 | } | |
3599 | ||
aec7477b | 3600 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
9c58309d CM |
3601 | dentry->d_name.len, |
3602 | dentry->d_parent->d_inode->i_ino, | |
00e4e6b3 CM |
3603 | objectid, BTRFS_I(dir)->block_group, mode, |
3604 | &index); | |
39279cc3 CM |
3605 | err = PTR_ERR(inode); |
3606 | if (IS_ERR(inode)) | |
3607 | goto out_unlock; | |
3608 | ||
33268eaf JB |
3609 | err = btrfs_init_acl(inode, dir); |
3610 | if (err) { | |
3611 | drop_inode = 1; | |
3612 | goto out_unlock; | |
3613 | } | |
3614 | ||
39279cc3 | 3615 | btrfs_set_trans_block_group(trans, inode); |
00e4e6b3 | 3616 | err = btrfs_add_nondir(trans, dentry, inode, 0, index); |
39279cc3 CM |
3617 | if (err) |
3618 | drop_inode = 1; | |
3619 | else { | |
3620 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 3621 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 CM |
3622 | inode->i_fop = &btrfs_file_operations; |
3623 | inode->i_op = &btrfs_file_inode_operations; | |
d1310b2e | 3624 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
3625 | } |
3626 | dir->i_sb->s_dirt = 1; | |
3627 | btrfs_update_inode_block_group(trans, inode); | |
3628 | btrfs_update_inode_block_group(trans, dir); | |
3629 | out_unlock: | |
d3c2fdcf | 3630 | nr = trans->blocks_used; |
ab78c84d | 3631 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 3632 | fail: |
39279cc3 CM |
3633 | if (drop_inode) { |
3634 | inode_dec_link_count(inode); | |
3635 | iput(inode); | |
3636 | } | |
d3c2fdcf | 3637 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
3638 | return err; |
3639 | } | |
3640 | ||
3641 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |
3642 | struct dentry *dentry) | |
3643 | { | |
3644 | struct btrfs_trans_handle *trans; | |
3645 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3646 | struct inode *inode = old_dentry->d_inode; | |
00e4e6b3 | 3647 | u64 index; |
1832a6d5 | 3648 | unsigned long nr = 0; |
39279cc3 CM |
3649 | int err; |
3650 | int drop_inode = 0; | |
3651 | ||
3652 | if (inode->i_nlink == 0) | |
3653 | return -ENOENT; | |
3654 | ||
e02119d5 | 3655 | btrfs_inc_nlink(inode); |
1832a6d5 CM |
3656 | err = btrfs_check_free_space(root, 1, 0); |
3657 | if (err) | |
3658 | goto fail; | |
00e4e6b3 | 3659 | err = btrfs_set_inode_index(dir, inode, &index); |
aec7477b JB |
3660 | if (err) |
3661 | goto fail; | |
3662 | ||
39279cc3 | 3663 | trans = btrfs_start_transaction(root, 1); |
5f39d397 | 3664 | |
39279cc3 CM |
3665 | btrfs_set_trans_block_group(trans, dir); |
3666 | atomic_inc(&inode->i_count); | |
aec7477b | 3667 | |
00e4e6b3 | 3668 | err = btrfs_add_nondir(trans, dentry, inode, 1, index); |
5f39d397 | 3669 | |
39279cc3 CM |
3670 | if (err) |
3671 | drop_inode = 1; | |
5f39d397 | 3672 | |
39279cc3 CM |
3673 | dir->i_sb->s_dirt = 1; |
3674 | btrfs_update_inode_block_group(trans, dir); | |
54aa1f4d | 3675 | err = btrfs_update_inode(trans, root, inode); |
5f39d397 | 3676 | |
54aa1f4d CM |
3677 | if (err) |
3678 | drop_inode = 1; | |
39279cc3 | 3679 | |
d3c2fdcf | 3680 | nr = trans->blocks_used; |
ab78c84d | 3681 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 3682 | fail: |
39279cc3 CM |
3683 | if (drop_inode) { |
3684 | inode_dec_link_count(inode); | |
3685 | iput(inode); | |
3686 | } | |
d3c2fdcf | 3687 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
3688 | return err; |
3689 | } | |
3690 | ||
39279cc3 CM |
3691 | static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
3692 | { | |
b9d86667 | 3693 | struct inode *inode = NULL; |
39279cc3 CM |
3694 | struct btrfs_trans_handle *trans; |
3695 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3696 | int err = 0; | |
3697 | int drop_on_err = 0; | |
b9d86667 | 3698 | u64 objectid = 0; |
00e4e6b3 | 3699 | u64 index = 0; |
d3c2fdcf | 3700 | unsigned long nr = 1; |
39279cc3 | 3701 | |
1832a6d5 CM |
3702 | err = btrfs_check_free_space(root, 1, 0); |
3703 | if (err) | |
3704 | goto out_unlock; | |
3705 | ||
39279cc3 CM |
3706 | trans = btrfs_start_transaction(root, 1); |
3707 | btrfs_set_trans_block_group(trans, dir); | |
5f39d397 | 3708 | |
39279cc3 CM |
3709 | if (IS_ERR(trans)) { |
3710 | err = PTR_ERR(trans); | |
3711 | goto out_unlock; | |
3712 | } | |
3713 | ||
3714 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); | |
3715 | if (err) { | |
3716 | err = -ENOSPC; | |
3717 | goto out_unlock; | |
3718 | } | |
3719 | ||
aec7477b | 3720 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
9c58309d CM |
3721 | dentry->d_name.len, |
3722 | dentry->d_parent->d_inode->i_ino, objectid, | |
00e4e6b3 CM |
3723 | BTRFS_I(dir)->block_group, S_IFDIR | mode, |
3724 | &index); | |
39279cc3 CM |
3725 | if (IS_ERR(inode)) { |
3726 | err = PTR_ERR(inode); | |
3727 | goto out_fail; | |
3728 | } | |
5f39d397 | 3729 | |
39279cc3 | 3730 | drop_on_err = 1; |
33268eaf JB |
3731 | |
3732 | err = btrfs_init_acl(inode, dir); | |
3733 | if (err) | |
3734 | goto out_fail; | |
3735 | ||
39279cc3 CM |
3736 | inode->i_op = &btrfs_dir_inode_operations; |
3737 | inode->i_fop = &btrfs_dir_file_operations; | |
3738 | btrfs_set_trans_block_group(trans, inode); | |
3739 | ||
dbe674a9 | 3740 | btrfs_i_size_write(inode, 0); |
39279cc3 CM |
3741 | err = btrfs_update_inode(trans, root, inode); |
3742 | if (err) | |
3743 | goto out_fail; | |
5f39d397 | 3744 | |
e02119d5 CM |
3745 | err = btrfs_add_link(trans, dentry->d_parent->d_inode, |
3746 | inode, dentry->d_name.name, | |
3747 | dentry->d_name.len, 0, index); | |
39279cc3 CM |
3748 | if (err) |
3749 | goto out_fail; | |
5f39d397 | 3750 | |
39279cc3 CM |
3751 | d_instantiate(dentry, inode); |
3752 | drop_on_err = 0; | |
3753 | dir->i_sb->s_dirt = 1; | |
3754 | btrfs_update_inode_block_group(trans, inode); | |
3755 | btrfs_update_inode_block_group(trans, dir); | |
3756 | ||
3757 | out_fail: | |
d3c2fdcf | 3758 | nr = trans->blocks_used; |
ab78c84d | 3759 | btrfs_end_transaction_throttle(trans, root); |
5f39d397 | 3760 | |
39279cc3 | 3761 | out_unlock: |
39279cc3 CM |
3762 | if (drop_on_err) |
3763 | iput(inode); | |
d3c2fdcf | 3764 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
3765 | return err; |
3766 | } | |
3767 | ||
d352ac68 CM |
3768 | /* helper for btfs_get_extent. Given an existing extent in the tree, |
3769 | * and an extent that you want to insert, deal with overlap and insert | |
3770 | * the new extent into the tree. | |
3771 | */ | |
3b951516 CM |
3772 | static int merge_extent_mapping(struct extent_map_tree *em_tree, |
3773 | struct extent_map *existing, | |
e6dcd2dc CM |
3774 | struct extent_map *em, |
3775 | u64 map_start, u64 map_len) | |
3b951516 CM |
3776 | { |
3777 | u64 start_diff; | |
3b951516 | 3778 | |
e6dcd2dc CM |
3779 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
3780 | start_diff = map_start - em->start; | |
3781 | em->start = map_start; | |
3782 | em->len = map_len; | |
c8b97818 CM |
3783 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
3784 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | |
e6dcd2dc | 3785 | em->block_start += start_diff; |
c8b97818 CM |
3786 | em->block_len -= start_diff; |
3787 | } | |
e6dcd2dc | 3788 | return add_extent_mapping(em_tree, em); |
3b951516 CM |
3789 | } |
3790 | ||
c8b97818 CM |
3791 | static noinline int uncompress_inline(struct btrfs_path *path, |
3792 | struct inode *inode, struct page *page, | |
3793 | size_t pg_offset, u64 extent_offset, | |
3794 | struct btrfs_file_extent_item *item) | |
3795 | { | |
3796 | int ret; | |
3797 | struct extent_buffer *leaf = path->nodes[0]; | |
3798 | char *tmp; | |
3799 | size_t max_size; | |
3800 | unsigned long inline_size; | |
3801 | unsigned long ptr; | |
3802 | ||
3803 | WARN_ON(pg_offset != 0); | |
3804 | max_size = btrfs_file_extent_ram_bytes(leaf, item); | |
3805 | inline_size = btrfs_file_extent_inline_item_len(leaf, | |
3806 | btrfs_item_nr(leaf, path->slots[0])); | |
3807 | tmp = kmalloc(inline_size, GFP_NOFS); | |
3808 | ptr = btrfs_file_extent_inline_start(item); | |
3809 | ||
3810 | read_extent_buffer(leaf, tmp, ptr, inline_size); | |
3811 | ||
3812 | max_size = min(PAGE_CACHE_SIZE, max_size); | |
3813 | ret = btrfs_zlib_decompress(tmp, page, extent_offset, | |
3814 | inline_size, max_size); | |
3815 | if (ret) { | |
3816 | char *kaddr = kmap_atomic(page, KM_USER0); | |
3817 | unsigned long copy_size = min_t(u64, | |
3818 | PAGE_CACHE_SIZE - pg_offset, | |
3819 | max_size - extent_offset); | |
3820 | memset(kaddr + pg_offset, 0, copy_size); | |
3821 | kunmap_atomic(kaddr, KM_USER0); | |
3822 | } | |
3823 | kfree(tmp); | |
3824 | return 0; | |
3825 | } | |
3826 | ||
d352ac68 CM |
3827 | /* |
3828 | * a bit scary, this does extent mapping from logical file offset to the disk. | |
3829 | * the ugly parts come from merging extents from the disk with the | |
3830 | * in-ram representation. This gets more complex because of the data=ordered code, | |
3831 | * where the in-ram extents might be locked pending data=ordered completion. | |
3832 | * | |
3833 | * This also copies inline extents directly into the page. | |
3834 | */ | |
a52d9a80 | 3835 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, |
70dec807 | 3836 | size_t pg_offset, u64 start, u64 len, |
a52d9a80 CM |
3837 | int create) |
3838 | { | |
3839 | int ret; | |
3840 | int err = 0; | |
db94535d | 3841 | u64 bytenr; |
a52d9a80 CM |
3842 | u64 extent_start = 0; |
3843 | u64 extent_end = 0; | |
3844 | u64 objectid = inode->i_ino; | |
3845 | u32 found_type; | |
f421950f | 3846 | struct btrfs_path *path = NULL; |
a52d9a80 CM |
3847 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3848 | struct btrfs_file_extent_item *item; | |
5f39d397 CM |
3849 | struct extent_buffer *leaf; |
3850 | struct btrfs_key found_key; | |
a52d9a80 CM |
3851 | struct extent_map *em = NULL; |
3852 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
d1310b2e | 3853 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
a52d9a80 | 3854 | struct btrfs_trans_handle *trans = NULL; |
c8b97818 | 3855 | int compressed; |
a52d9a80 | 3856 | |
a52d9a80 | 3857 | again: |
d1310b2e CM |
3858 | spin_lock(&em_tree->lock); |
3859 | em = lookup_extent_mapping(em_tree, start, len); | |
a061fc8d CM |
3860 | if (em) |
3861 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
d1310b2e CM |
3862 | spin_unlock(&em_tree->lock); |
3863 | ||
a52d9a80 | 3864 | if (em) { |
e1c4b745 CM |
3865 | if (em->start > start || em->start + em->len <= start) |
3866 | free_extent_map(em); | |
3867 | else if (em->block_start == EXTENT_MAP_INLINE && page) | |
70dec807 CM |
3868 | free_extent_map(em); |
3869 | else | |
3870 | goto out; | |
a52d9a80 | 3871 | } |
d1310b2e | 3872 | em = alloc_extent_map(GFP_NOFS); |
a52d9a80 | 3873 | if (!em) { |
d1310b2e CM |
3874 | err = -ENOMEM; |
3875 | goto out; | |
a52d9a80 | 3876 | } |
e6dcd2dc | 3877 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
d1310b2e CM |
3878 | em->start = EXTENT_MAP_HOLE; |
3879 | em->len = (u64)-1; | |
c8b97818 | 3880 | em->block_len = (u64)-1; |
f421950f CM |
3881 | |
3882 | if (!path) { | |
3883 | path = btrfs_alloc_path(); | |
3884 | BUG_ON(!path); | |
3885 | } | |
3886 | ||
179e29e4 CM |
3887 | ret = btrfs_lookup_file_extent(trans, root, path, |
3888 | objectid, start, trans != NULL); | |
a52d9a80 CM |
3889 | if (ret < 0) { |
3890 | err = ret; | |
3891 | goto out; | |
3892 | } | |
3893 | ||
3894 | if (ret != 0) { | |
3895 | if (path->slots[0] == 0) | |
3896 | goto not_found; | |
3897 | path->slots[0]--; | |
3898 | } | |
3899 | ||
5f39d397 CM |
3900 | leaf = path->nodes[0]; |
3901 | item = btrfs_item_ptr(leaf, path->slots[0], | |
a52d9a80 | 3902 | struct btrfs_file_extent_item); |
a52d9a80 | 3903 | /* are we inside the extent that was found? */ |
5f39d397 CM |
3904 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
3905 | found_type = btrfs_key_type(&found_key); | |
3906 | if (found_key.objectid != objectid || | |
a52d9a80 CM |
3907 | found_type != BTRFS_EXTENT_DATA_KEY) { |
3908 | goto not_found; | |
3909 | } | |
3910 | ||
5f39d397 CM |
3911 | found_type = btrfs_file_extent_type(leaf, item); |
3912 | extent_start = found_key.offset; | |
c8b97818 | 3913 | compressed = btrfs_file_extent_compression(leaf, item); |
d899e052 YZ |
3914 | if (found_type == BTRFS_FILE_EXTENT_REG || |
3915 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
a52d9a80 | 3916 | extent_end = extent_start + |
db94535d | 3917 | btrfs_file_extent_num_bytes(leaf, item); |
9036c102 YZ |
3918 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { |
3919 | size_t size; | |
3920 | size = btrfs_file_extent_inline_len(leaf, item); | |
3921 | extent_end = (extent_start + size + root->sectorsize - 1) & | |
3922 | ~((u64)root->sectorsize - 1); | |
3923 | } | |
3924 | ||
3925 | if (start >= extent_end) { | |
3926 | path->slots[0]++; | |
3927 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
3928 | ret = btrfs_next_leaf(root, path); | |
3929 | if (ret < 0) { | |
3930 | err = ret; | |
3931 | goto out; | |
a52d9a80 | 3932 | } |
9036c102 YZ |
3933 | if (ret > 0) |
3934 | goto not_found; | |
3935 | leaf = path->nodes[0]; | |
a52d9a80 | 3936 | } |
9036c102 YZ |
3937 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
3938 | if (found_key.objectid != objectid || | |
3939 | found_key.type != BTRFS_EXTENT_DATA_KEY) | |
3940 | goto not_found; | |
3941 | if (start + len <= found_key.offset) | |
3942 | goto not_found; | |
3943 | em->start = start; | |
3944 | em->len = found_key.offset - start; | |
3945 | goto not_found_em; | |
3946 | } | |
3947 | ||
d899e052 YZ |
3948 | if (found_type == BTRFS_FILE_EXTENT_REG || |
3949 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
9036c102 YZ |
3950 | em->start = extent_start; |
3951 | em->len = extent_end - extent_start; | |
ff5b7ee3 YZ |
3952 | em->orig_start = extent_start - |
3953 | btrfs_file_extent_offset(leaf, item); | |
db94535d CM |
3954 | bytenr = btrfs_file_extent_disk_bytenr(leaf, item); |
3955 | if (bytenr == 0) { | |
5f39d397 | 3956 | em->block_start = EXTENT_MAP_HOLE; |
a52d9a80 CM |
3957 | goto insert; |
3958 | } | |
c8b97818 CM |
3959 | if (compressed) { |
3960 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
3961 | em->block_start = bytenr; | |
3962 | em->block_len = btrfs_file_extent_disk_num_bytes(leaf, | |
3963 | item); | |
3964 | } else { | |
3965 | bytenr += btrfs_file_extent_offset(leaf, item); | |
3966 | em->block_start = bytenr; | |
3967 | em->block_len = em->len; | |
d899e052 YZ |
3968 | if (found_type == BTRFS_FILE_EXTENT_PREALLOC) |
3969 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
c8b97818 | 3970 | } |
a52d9a80 CM |
3971 | goto insert; |
3972 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 3973 | unsigned long ptr; |
a52d9a80 | 3974 | char *map; |
3326d1b0 CM |
3975 | size_t size; |
3976 | size_t extent_offset; | |
3977 | size_t copy_size; | |
a52d9a80 | 3978 | |
689f9346 | 3979 | em->block_start = EXTENT_MAP_INLINE; |
c8b97818 | 3980 | if (!page || create) { |
689f9346 | 3981 | em->start = extent_start; |
9036c102 | 3982 | em->len = extent_end - extent_start; |
689f9346 Y |
3983 | goto out; |
3984 | } | |
5f39d397 | 3985 | |
9036c102 YZ |
3986 | size = btrfs_file_extent_inline_len(leaf, item); |
3987 | extent_offset = page_offset(page) + pg_offset - extent_start; | |
70dec807 | 3988 | copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, |
3326d1b0 | 3989 | size - extent_offset); |
3326d1b0 | 3990 | em->start = extent_start + extent_offset; |
70dec807 CM |
3991 | em->len = (copy_size + root->sectorsize - 1) & |
3992 | ~((u64)root->sectorsize - 1); | |
ff5b7ee3 | 3993 | em->orig_start = EXTENT_MAP_INLINE; |
c8b97818 CM |
3994 | if (compressed) |
3995 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
689f9346 | 3996 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; |
179e29e4 | 3997 | if (create == 0 && !PageUptodate(page)) { |
c8b97818 CM |
3998 | if (btrfs_file_extent_compression(leaf, item) == |
3999 | BTRFS_COMPRESS_ZLIB) { | |
4000 | ret = uncompress_inline(path, inode, page, | |
4001 | pg_offset, | |
4002 | extent_offset, item); | |
4003 | BUG_ON(ret); | |
4004 | } else { | |
4005 | map = kmap(page); | |
4006 | read_extent_buffer(leaf, map + pg_offset, ptr, | |
4007 | copy_size); | |
4008 | kunmap(page); | |
4009 | } | |
179e29e4 CM |
4010 | flush_dcache_page(page); |
4011 | } else if (create && PageUptodate(page)) { | |
4012 | if (!trans) { | |
4013 | kunmap(page); | |
4014 | free_extent_map(em); | |
4015 | em = NULL; | |
4016 | btrfs_release_path(root, path); | |
f9295749 | 4017 | trans = btrfs_join_transaction(root, 1); |
179e29e4 CM |
4018 | goto again; |
4019 | } | |
c8b97818 | 4020 | map = kmap(page); |
70dec807 | 4021 | write_extent_buffer(leaf, map + pg_offset, ptr, |
179e29e4 | 4022 | copy_size); |
c8b97818 | 4023 | kunmap(page); |
179e29e4 | 4024 | btrfs_mark_buffer_dirty(leaf); |
a52d9a80 | 4025 | } |
d1310b2e CM |
4026 | set_extent_uptodate(io_tree, em->start, |
4027 | extent_map_end(em) - 1, GFP_NOFS); | |
a52d9a80 CM |
4028 | goto insert; |
4029 | } else { | |
4030 | printk("unkknown found_type %d\n", found_type); | |
4031 | WARN_ON(1); | |
4032 | } | |
4033 | not_found: | |
4034 | em->start = start; | |
d1310b2e | 4035 | em->len = len; |
a52d9a80 | 4036 | not_found_em: |
5f39d397 | 4037 | em->block_start = EXTENT_MAP_HOLE; |
9036c102 | 4038 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); |
a52d9a80 CM |
4039 | insert: |
4040 | btrfs_release_path(root, path); | |
d1310b2e CM |
4041 | if (em->start > start || extent_map_end(em) <= start) { |
4042 | printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len); | |
a52d9a80 CM |
4043 | err = -EIO; |
4044 | goto out; | |
4045 | } | |
d1310b2e CM |
4046 | |
4047 | err = 0; | |
4048 | spin_lock(&em_tree->lock); | |
a52d9a80 | 4049 | ret = add_extent_mapping(em_tree, em); |
3b951516 CM |
4050 | /* it is possible that someone inserted the extent into the tree |
4051 | * while we had the lock dropped. It is also possible that | |
4052 | * an overlapping map exists in the tree | |
4053 | */ | |
a52d9a80 | 4054 | if (ret == -EEXIST) { |
3b951516 | 4055 | struct extent_map *existing; |
e6dcd2dc CM |
4056 | |
4057 | ret = 0; | |
4058 | ||
3b951516 | 4059 | existing = lookup_extent_mapping(em_tree, start, len); |
e1c4b745 CM |
4060 | if (existing && (existing->start > start || |
4061 | existing->start + existing->len <= start)) { | |
4062 | free_extent_map(existing); | |
4063 | existing = NULL; | |
4064 | } | |
3b951516 CM |
4065 | if (!existing) { |
4066 | existing = lookup_extent_mapping(em_tree, em->start, | |
4067 | em->len); | |
4068 | if (existing) { | |
4069 | err = merge_extent_mapping(em_tree, existing, | |
e6dcd2dc CM |
4070 | em, start, |
4071 | root->sectorsize); | |
3b951516 CM |
4072 | free_extent_map(existing); |
4073 | if (err) { | |
4074 | free_extent_map(em); | |
4075 | em = NULL; | |
4076 | } | |
4077 | } else { | |
4078 | err = -EIO; | |
4079 | printk("failing to insert %Lu %Lu\n", | |
4080 | start, len); | |
4081 | free_extent_map(em); | |
4082 | em = NULL; | |
4083 | } | |
4084 | } else { | |
4085 | free_extent_map(em); | |
4086 | em = existing; | |
e6dcd2dc | 4087 | err = 0; |
a52d9a80 | 4088 | } |
a52d9a80 | 4089 | } |
d1310b2e | 4090 | spin_unlock(&em_tree->lock); |
a52d9a80 | 4091 | out: |
f421950f CM |
4092 | if (path) |
4093 | btrfs_free_path(path); | |
a52d9a80 CM |
4094 | if (trans) { |
4095 | ret = btrfs_end_transaction(trans, root); | |
e6dcd2dc | 4096 | if (!err) { |
a52d9a80 | 4097 | err = ret; |
e6dcd2dc | 4098 | } |
a52d9a80 | 4099 | } |
a52d9a80 CM |
4100 | if (err) { |
4101 | free_extent_map(em); | |
4102 | WARN_ON(1); | |
4103 | return ERR_PTR(err); | |
4104 | } | |
4105 | return em; | |
4106 | } | |
4107 | ||
16432985 CM |
4108 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, |
4109 | const struct iovec *iov, loff_t offset, | |
4110 | unsigned long nr_segs) | |
4111 | { | |
e1c4b745 | 4112 | return -EINVAL; |
16432985 CM |
4113 | } |
4114 | ||
d396c6f5 | 4115 | static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) |
39279cc3 | 4116 | { |
d396c6f5 | 4117 | return extent_bmap(mapping, iblock, btrfs_get_extent); |
39279cc3 CM |
4118 | } |
4119 | ||
a52d9a80 | 4120 | int btrfs_readpage(struct file *file, struct page *page) |
9ebefb18 | 4121 | { |
d1310b2e CM |
4122 | struct extent_io_tree *tree; |
4123 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
a52d9a80 | 4124 | return extent_read_full_page(tree, page, btrfs_get_extent); |
9ebefb18 | 4125 | } |
1832a6d5 | 4126 | |
a52d9a80 | 4127 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) |
39279cc3 | 4128 | { |
d1310b2e | 4129 | struct extent_io_tree *tree; |
b888db2b CM |
4130 | |
4131 | ||
4132 | if (current->flags & PF_MEMALLOC) { | |
4133 | redirty_page_for_writepage(wbc, page); | |
4134 | unlock_page(page); | |
4135 | return 0; | |
4136 | } | |
d1310b2e | 4137 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
a52d9a80 | 4138 | return extent_write_full_page(tree, page, btrfs_get_extent, wbc); |
9ebefb18 CM |
4139 | } |
4140 | ||
f421950f CM |
4141 | int btrfs_writepages(struct address_space *mapping, |
4142 | struct writeback_control *wbc) | |
b293f02e | 4143 | { |
d1310b2e | 4144 | struct extent_io_tree *tree; |
771ed689 | 4145 | |
d1310b2e | 4146 | tree = &BTRFS_I(mapping->host)->io_tree; |
b293f02e CM |
4147 | return extent_writepages(tree, mapping, btrfs_get_extent, wbc); |
4148 | } | |
4149 | ||
3ab2fb5a CM |
4150 | static int |
4151 | btrfs_readpages(struct file *file, struct address_space *mapping, | |
4152 | struct list_head *pages, unsigned nr_pages) | |
4153 | { | |
d1310b2e CM |
4154 | struct extent_io_tree *tree; |
4155 | tree = &BTRFS_I(mapping->host)->io_tree; | |
3ab2fb5a CM |
4156 | return extent_readpages(tree, mapping, pages, nr_pages, |
4157 | btrfs_get_extent); | |
4158 | } | |
e6dcd2dc | 4159 | static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
9ebefb18 | 4160 | { |
d1310b2e CM |
4161 | struct extent_io_tree *tree; |
4162 | struct extent_map_tree *map; | |
a52d9a80 | 4163 | int ret; |
8c2383c3 | 4164 | |
d1310b2e CM |
4165 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
4166 | map = &BTRFS_I(page->mapping->host)->extent_tree; | |
70dec807 | 4167 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); |
a52d9a80 CM |
4168 | if (ret == 1) { |
4169 | ClearPagePrivate(page); | |
4170 | set_page_private(page, 0); | |
4171 | page_cache_release(page); | |
39279cc3 | 4172 | } |
a52d9a80 | 4173 | return ret; |
39279cc3 CM |
4174 | } |
4175 | ||
e6dcd2dc CM |
4176 | static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
4177 | { | |
98509cfc CM |
4178 | if (PageWriteback(page) || PageDirty(page)) |
4179 | return 0; | |
e6dcd2dc CM |
4180 | return __btrfs_releasepage(page, gfp_flags); |
4181 | } | |
4182 | ||
a52d9a80 | 4183 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) |
39279cc3 | 4184 | { |
d1310b2e | 4185 | struct extent_io_tree *tree; |
e6dcd2dc CM |
4186 | struct btrfs_ordered_extent *ordered; |
4187 | u64 page_start = page_offset(page); | |
4188 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | |
39279cc3 | 4189 | |
e6dcd2dc | 4190 | wait_on_page_writeback(page); |
d1310b2e | 4191 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
e6dcd2dc CM |
4192 | if (offset) { |
4193 | btrfs_releasepage(page, GFP_NOFS); | |
4194 | return; | |
4195 | } | |
4196 | ||
4197 | lock_extent(tree, page_start, page_end, GFP_NOFS); | |
4198 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, | |
4199 | page_offset(page)); | |
4200 | if (ordered) { | |
eb84ae03 CM |
4201 | /* |
4202 | * IO on this page will never be started, so we need | |
4203 | * to account for any ordered extents now | |
4204 | */ | |
e6dcd2dc CM |
4205 | clear_extent_bit(tree, page_start, page_end, |
4206 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
4207 | EXTENT_LOCKED, 1, 0, GFP_NOFS); | |
211f90e6 CM |
4208 | btrfs_finish_ordered_io(page->mapping->host, |
4209 | page_start, page_end); | |
e6dcd2dc CM |
4210 | btrfs_put_ordered_extent(ordered); |
4211 | lock_extent(tree, page_start, page_end, GFP_NOFS); | |
4212 | } | |
4213 | clear_extent_bit(tree, page_start, page_end, | |
4214 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | | |
4215 | EXTENT_ORDERED, | |
4216 | 1, 1, GFP_NOFS); | |
4217 | __btrfs_releasepage(page, GFP_NOFS); | |
4218 | ||
4a096752 | 4219 | ClearPageChecked(page); |
9ad6b7bc | 4220 | if (PagePrivate(page)) { |
9ad6b7bc CM |
4221 | ClearPagePrivate(page); |
4222 | set_page_private(page, 0); | |
4223 | page_cache_release(page); | |
4224 | } | |
39279cc3 CM |
4225 | } |
4226 | ||
9ebefb18 CM |
4227 | /* |
4228 | * btrfs_page_mkwrite() is not allowed to change the file size as it gets | |
4229 | * called from a page fault handler when a page is first dirtied. Hence we must | |
4230 | * be careful to check for EOF conditions here. We set the page up correctly | |
4231 | * for a written page which means we get ENOSPC checking when writing into | |
4232 | * holes and correct delalloc and unwritten extent mapping on filesystems that | |
4233 | * support these features. | |
4234 | * | |
4235 | * We are not allowed to take the i_mutex here so we have to play games to | |
4236 | * protect against truncate races as the page could now be beyond EOF. Because | |
4237 | * vmtruncate() writes the inode size before removing pages, once we have the | |
4238 | * page lock we can determine safely if the page is beyond EOF. If it is not | |
4239 | * beyond EOF, then the page is guaranteed safe against truncation until we | |
4240 | * unlock the page. | |
4241 | */ | |
4242 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page) | |
4243 | { | |
6da6abae | 4244 | struct inode *inode = fdentry(vma->vm_file)->d_inode; |
1832a6d5 | 4245 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
4246 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
4247 | struct btrfs_ordered_extent *ordered; | |
4248 | char *kaddr; | |
4249 | unsigned long zero_start; | |
9ebefb18 | 4250 | loff_t size; |
1832a6d5 | 4251 | int ret; |
a52d9a80 | 4252 | u64 page_start; |
e6dcd2dc | 4253 | u64 page_end; |
9ebefb18 | 4254 | |
1832a6d5 | 4255 | ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0); |
1832a6d5 CM |
4256 | if (ret) |
4257 | goto out; | |
4258 | ||
4259 | ret = -EINVAL; | |
e6dcd2dc | 4260 | again: |
9ebefb18 | 4261 | lock_page(page); |
9ebefb18 | 4262 | size = i_size_read(inode); |
e6dcd2dc CM |
4263 | page_start = page_offset(page); |
4264 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
a52d9a80 | 4265 | |
9ebefb18 | 4266 | if ((page->mapping != inode->i_mapping) || |
e6dcd2dc | 4267 | (page_start >= size)) { |
9ebefb18 CM |
4268 | /* page got truncated out from underneath us */ |
4269 | goto out_unlock; | |
4270 | } | |
e6dcd2dc CM |
4271 | wait_on_page_writeback(page); |
4272 | ||
4273 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
4274 | set_page_extent_mapped(page); | |
4275 | ||
eb84ae03 CM |
4276 | /* |
4277 | * we can't set the delalloc bits if there are pending ordered | |
4278 | * extents. Drop our locks and wait for them to finish | |
4279 | */ | |
e6dcd2dc CM |
4280 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
4281 | if (ordered) { | |
4282 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
4283 | unlock_page(page); | |
eb84ae03 | 4284 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
4285 | btrfs_put_ordered_extent(ordered); |
4286 | goto again; | |
4287 | } | |
4288 | ||
ea8c2819 | 4289 | btrfs_set_extent_delalloc(inode, page_start, page_end); |
e6dcd2dc | 4290 | ret = 0; |
9ebefb18 CM |
4291 | |
4292 | /* page is wholly or partially inside EOF */ | |
a52d9a80 | 4293 | if (page_start + PAGE_CACHE_SIZE > size) |
e6dcd2dc | 4294 | zero_start = size & ~PAGE_CACHE_MASK; |
9ebefb18 | 4295 | else |
e6dcd2dc | 4296 | zero_start = PAGE_CACHE_SIZE; |
9ebefb18 | 4297 | |
e6dcd2dc CM |
4298 | if (zero_start != PAGE_CACHE_SIZE) { |
4299 | kaddr = kmap(page); | |
4300 | memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); | |
4301 | flush_dcache_page(page); | |
4302 | kunmap(page); | |
4303 | } | |
247e743c | 4304 | ClearPageChecked(page); |
e6dcd2dc CM |
4305 | set_page_dirty(page); |
4306 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | |
9ebefb18 CM |
4307 | |
4308 | out_unlock: | |
4309 | unlock_page(page); | |
1832a6d5 | 4310 | out: |
9ebefb18 CM |
4311 | return ret; |
4312 | } | |
4313 | ||
39279cc3 CM |
4314 | static void btrfs_truncate(struct inode *inode) |
4315 | { | |
4316 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4317 | int ret; | |
4318 | struct btrfs_trans_handle *trans; | |
d3c2fdcf | 4319 | unsigned long nr; |
dbe674a9 | 4320 | u64 mask = root->sectorsize - 1; |
39279cc3 CM |
4321 | |
4322 | if (!S_ISREG(inode->i_mode)) | |
4323 | return; | |
4324 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | |
4325 | return; | |
4326 | ||
4327 | btrfs_truncate_page(inode->i_mapping, inode->i_size); | |
4a096752 | 4328 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); |
39279cc3 | 4329 | |
39279cc3 CM |
4330 | trans = btrfs_start_transaction(root, 1); |
4331 | btrfs_set_trans_block_group(trans, inode); | |
dbe674a9 | 4332 | btrfs_i_size_write(inode, inode->i_size); |
39279cc3 | 4333 | |
7b128766 JB |
4334 | ret = btrfs_orphan_add(trans, inode); |
4335 | if (ret) | |
4336 | goto out; | |
39279cc3 | 4337 | /* FIXME, add redo link to tree so we don't leak on crash */ |
e02119d5 | 4338 | ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, |
85e21bac | 4339 | BTRFS_EXTENT_DATA_KEY); |
39279cc3 | 4340 | btrfs_update_inode(trans, root, inode); |
5f39d397 | 4341 | |
7b128766 JB |
4342 | ret = btrfs_orphan_del(trans, inode); |
4343 | BUG_ON(ret); | |
4344 | ||
4345 | out: | |
4346 | nr = trans->blocks_used; | |
89ce8a63 | 4347 | ret = btrfs_end_transaction_throttle(trans, root); |
39279cc3 | 4348 | BUG_ON(ret); |
d3c2fdcf | 4349 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
4350 | } |
4351 | ||
3b96362c SW |
4352 | /* |
4353 | * Invalidate a single dcache entry at the root of the filesystem. | |
4354 | * Needed after creation of snapshot or subvolume. | |
4355 | */ | |
4356 | void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, | |
4357 | int namelen) | |
4358 | { | |
4359 | struct dentry *alias, *entry; | |
4360 | struct qstr qstr; | |
4361 | ||
4362 | alias = d_find_alias(root->fs_info->sb->s_root->d_inode); | |
4363 | if (alias) { | |
4364 | qstr.name = name; | |
4365 | qstr.len = namelen; | |
4366 | /* change me if btrfs ever gets a d_hash operation */ | |
4367 | qstr.hash = full_name_hash(qstr.name, qstr.len); | |
4368 | entry = d_lookup(alias, &qstr); | |
4369 | dput(alias); | |
4370 | if (entry) { | |
4371 | d_invalidate(entry); | |
4372 | dput(entry); | |
4373 | } | |
4374 | } | |
4375 | } | |
4376 | ||
d352ac68 CM |
4377 | /* |
4378 | * create a new subvolume directory/inode (helper for the ioctl). | |
4379 | */ | |
cb8e7090 | 4380 | int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry, |
f46b5a66 CH |
4381 | struct btrfs_trans_handle *trans, u64 new_dirid, |
4382 | struct btrfs_block_group_cache *block_group) | |
39279cc3 | 4383 | { |
39279cc3 | 4384 | struct inode *inode; |
cb8e7090 | 4385 | int error; |
00e4e6b3 | 4386 | u64 index = 0; |
39279cc3 | 4387 | |
aec7477b | 4388 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, |
00e4e6b3 | 4389 | new_dirid, block_group, S_IFDIR | 0700, &index); |
54aa1f4d | 4390 | if (IS_ERR(inode)) |
f46b5a66 | 4391 | return PTR_ERR(inode); |
39279cc3 CM |
4392 | inode->i_op = &btrfs_dir_inode_operations; |
4393 | inode->i_fop = &btrfs_dir_file_operations; | |
34088780 | 4394 | new_root->inode = inode; |
39279cc3 | 4395 | |
39279cc3 | 4396 | inode->i_nlink = 1; |
dbe674a9 | 4397 | btrfs_i_size_write(inode, 0); |
3b96362c | 4398 | |
cb8e7090 CH |
4399 | error = btrfs_update_inode(trans, new_root, inode); |
4400 | if (error) | |
4401 | return error; | |
4402 | ||
d899e052 | 4403 | atomic_inc(&inode->i_count); |
cb8e7090 CH |
4404 | d_instantiate(dentry, inode); |
4405 | return 0; | |
39279cc3 CM |
4406 | } |
4407 | ||
d352ac68 CM |
4408 | /* helper function for file defrag and space balancing. This |
4409 | * forces readahead on a given range of bytes in an inode | |
4410 | */ | |
edbd8d4e | 4411 | unsigned long btrfs_force_ra(struct address_space *mapping, |
86479a04 CM |
4412 | struct file_ra_state *ra, struct file *file, |
4413 | pgoff_t offset, pgoff_t last_index) | |
4414 | { | |
8e7bf94f | 4415 | pgoff_t req_size = last_index - offset + 1; |
86479a04 | 4416 | |
86479a04 CM |
4417 | page_cache_sync_readahead(mapping, ra, file, offset, req_size); |
4418 | return offset + req_size; | |
86479a04 CM |
4419 | } |
4420 | ||
39279cc3 CM |
4421 | struct inode *btrfs_alloc_inode(struct super_block *sb) |
4422 | { | |
4423 | struct btrfs_inode *ei; | |
4424 | ||
4425 | ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); | |
4426 | if (!ei) | |
4427 | return NULL; | |
15ee9bc7 | 4428 | ei->last_trans = 0; |
e02119d5 | 4429 | ei->logged_trans = 0; |
e6dcd2dc | 4430 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
33268eaf JB |
4431 | ei->i_acl = BTRFS_ACL_NOT_CACHED; |
4432 | ei->i_default_acl = BTRFS_ACL_NOT_CACHED; | |
7b128766 | 4433 | INIT_LIST_HEAD(&ei->i_orphan); |
39279cc3 CM |
4434 | return &ei->vfs_inode; |
4435 | } | |
4436 | ||
4437 | void btrfs_destroy_inode(struct inode *inode) | |
4438 | { | |
e6dcd2dc | 4439 | struct btrfs_ordered_extent *ordered; |
39279cc3 CM |
4440 | WARN_ON(!list_empty(&inode->i_dentry)); |
4441 | WARN_ON(inode->i_data.nrpages); | |
4442 | ||
33268eaf JB |
4443 | if (BTRFS_I(inode)->i_acl && |
4444 | BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED) | |
4445 | posix_acl_release(BTRFS_I(inode)->i_acl); | |
4446 | if (BTRFS_I(inode)->i_default_acl && | |
4447 | BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED) | |
4448 | posix_acl_release(BTRFS_I(inode)->i_default_acl); | |
4449 | ||
bcc63abb | 4450 | spin_lock(&BTRFS_I(inode)->root->list_lock); |
7b128766 JB |
4451 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { |
4452 | printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan" | |
4453 | " list\n", inode->i_ino); | |
4454 | dump_stack(); | |
4455 | } | |
bcc63abb | 4456 | spin_unlock(&BTRFS_I(inode)->root->list_lock); |
7b128766 | 4457 | |
e6dcd2dc CM |
4458 | while(1) { |
4459 | ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); | |
4460 | if (!ordered) | |
4461 | break; | |
4462 | else { | |
4463 | printk("found ordered extent %Lu %Lu\n", | |
4464 | ordered->file_offset, ordered->len); | |
4465 | btrfs_remove_ordered_extent(inode, ordered); | |
4466 | btrfs_put_ordered_extent(ordered); | |
4467 | btrfs_put_ordered_extent(ordered); | |
4468 | } | |
4469 | } | |
5b21f2ed | 4470 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); |
39279cc3 CM |
4471 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); |
4472 | } | |
4473 | ||
0ee0fda0 | 4474 | static void init_once(void *foo) |
39279cc3 CM |
4475 | { |
4476 | struct btrfs_inode *ei = (struct btrfs_inode *) foo; | |
4477 | ||
4478 | inode_init_once(&ei->vfs_inode); | |
4479 | } | |
4480 | ||
4481 | void btrfs_destroy_cachep(void) | |
4482 | { | |
4483 | if (btrfs_inode_cachep) | |
4484 | kmem_cache_destroy(btrfs_inode_cachep); | |
4485 | if (btrfs_trans_handle_cachep) | |
4486 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
4487 | if (btrfs_transaction_cachep) | |
4488 | kmem_cache_destroy(btrfs_transaction_cachep); | |
4489 | if (btrfs_bit_radix_cachep) | |
4490 | kmem_cache_destroy(btrfs_bit_radix_cachep); | |
4491 | if (btrfs_path_cachep) | |
4492 | kmem_cache_destroy(btrfs_path_cachep); | |
4493 | } | |
4494 | ||
86479a04 | 4495 | struct kmem_cache *btrfs_cache_create(const char *name, size_t size, |
92fee66d | 4496 | unsigned long extra_flags, |
2b1f55b0 | 4497 | void (*ctor)(void *)) |
92fee66d CM |
4498 | { |
4499 | return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT | | |
2b1f55b0 | 4500 | SLAB_MEM_SPREAD | extra_flags), ctor); |
92fee66d CM |
4501 | } |
4502 | ||
39279cc3 CM |
4503 | int btrfs_init_cachep(void) |
4504 | { | |
86479a04 | 4505 | btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache", |
92fee66d CM |
4506 | sizeof(struct btrfs_inode), |
4507 | 0, init_once); | |
39279cc3 CM |
4508 | if (!btrfs_inode_cachep) |
4509 | goto fail; | |
86479a04 CM |
4510 | btrfs_trans_handle_cachep = |
4511 | btrfs_cache_create("btrfs_trans_handle_cache", | |
4512 | sizeof(struct btrfs_trans_handle), | |
4513 | 0, NULL); | |
39279cc3 CM |
4514 | if (!btrfs_trans_handle_cachep) |
4515 | goto fail; | |
86479a04 | 4516 | btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache", |
39279cc3 | 4517 | sizeof(struct btrfs_transaction), |
92fee66d | 4518 | 0, NULL); |
39279cc3 CM |
4519 | if (!btrfs_transaction_cachep) |
4520 | goto fail; | |
86479a04 | 4521 | btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache", |
23223584 | 4522 | sizeof(struct btrfs_path), |
92fee66d | 4523 | 0, NULL); |
39279cc3 CM |
4524 | if (!btrfs_path_cachep) |
4525 | goto fail; | |
86479a04 | 4526 | btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256, |
92fee66d | 4527 | SLAB_DESTROY_BY_RCU, NULL); |
39279cc3 CM |
4528 | if (!btrfs_bit_radix_cachep) |
4529 | goto fail; | |
4530 | return 0; | |
4531 | fail: | |
4532 | btrfs_destroy_cachep(); | |
4533 | return -ENOMEM; | |
4534 | } | |
4535 | ||
4536 | static int btrfs_getattr(struct vfsmount *mnt, | |
4537 | struct dentry *dentry, struct kstat *stat) | |
4538 | { | |
4539 | struct inode *inode = dentry->d_inode; | |
4540 | generic_fillattr(inode, stat); | |
d6667462 | 4541 | stat->blksize = PAGE_CACHE_SIZE; |
a76a3cd4 YZ |
4542 | stat->blocks = (inode_get_bytes(inode) + |
4543 | BTRFS_I(inode)->delalloc_bytes) >> 9; | |
39279cc3 CM |
4544 | return 0; |
4545 | } | |
4546 | ||
4547 | static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, | |
4548 | struct inode * new_dir,struct dentry *new_dentry) | |
4549 | { | |
4550 | struct btrfs_trans_handle *trans; | |
4551 | struct btrfs_root *root = BTRFS_I(old_dir)->root; | |
4552 | struct inode *new_inode = new_dentry->d_inode; | |
4553 | struct inode *old_inode = old_dentry->d_inode; | |
4554 | struct timespec ctime = CURRENT_TIME; | |
00e4e6b3 | 4555 | u64 index = 0; |
39279cc3 CM |
4556 | int ret; |
4557 | ||
4558 | if (S_ISDIR(old_inode->i_mode) && new_inode && | |
4559 | new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) { | |
4560 | return -ENOTEMPTY; | |
4561 | } | |
5f39d397 | 4562 | |
1832a6d5 CM |
4563 | ret = btrfs_check_free_space(root, 1, 0); |
4564 | if (ret) | |
4565 | goto out_unlock; | |
4566 | ||
39279cc3 | 4567 | trans = btrfs_start_transaction(root, 1); |
5f39d397 | 4568 | |
39279cc3 | 4569 | btrfs_set_trans_block_group(trans, new_dir); |
39279cc3 | 4570 | |
e02119d5 | 4571 | btrfs_inc_nlink(old_dentry->d_inode); |
39279cc3 CM |
4572 | old_dir->i_ctime = old_dir->i_mtime = ctime; |
4573 | new_dir->i_ctime = new_dir->i_mtime = ctime; | |
4574 | old_inode->i_ctime = ctime; | |
5f39d397 | 4575 | |
e02119d5 CM |
4576 | ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode, |
4577 | old_dentry->d_name.name, | |
4578 | old_dentry->d_name.len); | |
39279cc3 CM |
4579 | if (ret) |
4580 | goto out_fail; | |
4581 | ||
4582 | if (new_inode) { | |
4583 | new_inode->i_ctime = CURRENT_TIME; | |
e02119d5 CM |
4584 | ret = btrfs_unlink_inode(trans, root, new_dir, |
4585 | new_dentry->d_inode, | |
4586 | new_dentry->d_name.name, | |
4587 | new_dentry->d_name.len); | |
39279cc3 CM |
4588 | if (ret) |
4589 | goto out_fail; | |
7b128766 | 4590 | if (new_inode->i_nlink == 0) { |
e02119d5 | 4591 | ret = btrfs_orphan_add(trans, new_dentry->d_inode); |
7b128766 JB |
4592 | if (ret) |
4593 | goto out_fail; | |
4594 | } | |
e02119d5 | 4595 | |
39279cc3 | 4596 | } |
00e4e6b3 | 4597 | ret = btrfs_set_inode_index(new_dir, old_inode, &index); |
aec7477b JB |
4598 | if (ret) |
4599 | goto out_fail; | |
4600 | ||
e02119d5 CM |
4601 | ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode, |
4602 | old_inode, new_dentry->d_name.name, | |
4603 | new_dentry->d_name.len, 1, index); | |
39279cc3 CM |
4604 | if (ret) |
4605 | goto out_fail; | |
4606 | ||
4607 | out_fail: | |
ab78c84d | 4608 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 4609 | out_unlock: |
39279cc3 CM |
4610 | return ret; |
4611 | } | |
4612 | ||
d352ac68 CM |
4613 | /* |
4614 | * some fairly slow code that needs optimization. This walks the list | |
4615 | * of all the inodes with pending delalloc and forces them to disk. | |
4616 | */ | |
ea8c2819 CM |
4617 | int btrfs_start_delalloc_inodes(struct btrfs_root *root) |
4618 | { | |
4619 | struct list_head *head = &root->fs_info->delalloc_inodes; | |
4620 | struct btrfs_inode *binode; | |
5b21f2ed | 4621 | struct inode *inode; |
ea8c2819 CM |
4622 | unsigned long flags; |
4623 | ||
4624 | spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); | |
4625 | while(!list_empty(head)) { | |
4626 | binode = list_entry(head->next, struct btrfs_inode, | |
4627 | delalloc_inodes); | |
5b21f2ed ZY |
4628 | inode = igrab(&binode->vfs_inode); |
4629 | if (!inode) | |
4630 | list_del_init(&binode->delalloc_inodes); | |
ea8c2819 | 4631 | spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); |
5b21f2ed | 4632 | if (inode) { |
8c8bee1d | 4633 | filemap_flush(inode->i_mapping); |
5b21f2ed ZY |
4634 | iput(inode); |
4635 | } | |
4636 | cond_resched(); | |
ea8c2819 CM |
4637 | spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); |
4638 | } | |
4639 | spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); | |
8c8bee1d CM |
4640 | |
4641 | /* the filemap_flush will queue IO into the worker threads, but | |
4642 | * we have to make sure the IO is actually started and that | |
4643 | * ordered extents get created before we return | |
4644 | */ | |
4645 | atomic_inc(&root->fs_info->async_submit_draining); | |
771ed689 CM |
4646 | while(atomic_read(&root->fs_info->nr_async_submits) || |
4647 | atomic_read(&root->fs_info->async_delalloc_pages)) { | |
8c8bee1d | 4648 | wait_event(root->fs_info->async_submit_wait, |
771ed689 CM |
4649 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && |
4650 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | |
8c8bee1d CM |
4651 | } |
4652 | atomic_dec(&root->fs_info->async_submit_draining); | |
ea8c2819 CM |
4653 | return 0; |
4654 | } | |
4655 | ||
39279cc3 CM |
4656 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, |
4657 | const char *symname) | |
4658 | { | |
4659 | struct btrfs_trans_handle *trans; | |
4660 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4661 | struct btrfs_path *path; | |
4662 | struct btrfs_key key; | |
1832a6d5 | 4663 | struct inode *inode = NULL; |
39279cc3 CM |
4664 | int err; |
4665 | int drop_inode = 0; | |
4666 | u64 objectid; | |
00e4e6b3 | 4667 | u64 index = 0 ; |
39279cc3 CM |
4668 | int name_len; |
4669 | int datasize; | |
5f39d397 | 4670 | unsigned long ptr; |
39279cc3 | 4671 | struct btrfs_file_extent_item *ei; |
5f39d397 | 4672 | struct extent_buffer *leaf; |
1832a6d5 | 4673 | unsigned long nr = 0; |
39279cc3 CM |
4674 | |
4675 | name_len = strlen(symname) + 1; | |
4676 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) | |
4677 | return -ENAMETOOLONG; | |
1832a6d5 | 4678 | |
1832a6d5 CM |
4679 | err = btrfs_check_free_space(root, 1, 0); |
4680 | if (err) | |
4681 | goto out_fail; | |
4682 | ||
39279cc3 CM |
4683 | trans = btrfs_start_transaction(root, 1); |
4684 | btrfs_set_trans_block_group(trans, dir); | |
4685 | ||
4686 | err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); | |
4687 | if (err) { | |
4688 | err = -ENOSPC; | |
4689 | goto out_unlock; | |
4690 | } | |
4691 | ||
aec7477b | 4692 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
9c58309d CM |
4693 | dentry->d_name.len, |
4694 | dentry->d_parent->d_inode->i_ino, objectid, | |
00e4e6b3 CM |
4695 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, |
4696 | &index); | |
39279cc3 CM |
4697 | err = PTR_ERR(inode); |
4698 | if (IS_ERR(inode)) | |
4699 | goto out_unlock; | |
4700 | ||
33268eaf JB |
4701 | err = btrfs_init_acl(inode, dir); |
4702 | if (err) { | |
4703 | drop_inode = 1; | |
4704 | goto out_unlock; | |
4705 | } | |
4706 | ||
39279cc3 | 4707 | btrfs_set_trans_block_group(trans, inode); |
00e4e6b3 | 4708 | err = btrfs_add_nondir(trans, dentry, inode, 0, index); |
39279cc3 CM |
4709 | if (err) |
4710 | drop_inode = 1; | |
4711 | else { | |
4712 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 4713 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 CM |
4714 | inode->i_fop = &btrfs_file_operations; |
4715 | inode->i_op = &btrfs_file_inode_operations; | |
d1310b2e | 4716 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
4717 | } |
4718 | dir->i_sb->s_dirt = 1; | |
4719 | btrfs_update_inode_block_group(trans, inode); | |
4720 | btrfs_update_inode_block_group(trans, dir); | |
4721 | if (drop_inode) | |
4722 | goto out_unlock; | |
4723 | ||
4724 | path = btrfs_alloc_path(); | |
4725 | BUG_ON(!path); | |
4726 | key.objectid = inode->i_ino; | |
4727 | key.offset = 0; | |
39279cc3 CM |
4728 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); |
4729 | datasize = btrfs_file_extent_calc_inline_size(name_len); | |
4730 | err = btrfs_insert_empty_item(trans, root, path, &key, | |
4731 | datasize); | |
54aa1f4d CM |
4732 | if (err) { |
4733 | drop_inode = 1; | |
4734 | goto out_unlock; | |
4735 | } | |
5f39d397 CM |
4736 | leaf = path->nodes[0]; |
4737 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
4738 | struct btrfs_file_extent_item); | |
4739 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
4740 | btrfs_set_file_extent_type(leaf, ei, | |
39279cc3 | 4741 | BTRFS_FILE_EXTENT_INLINE); |
c8b97818 CM |
4742 | btrfs_set_file_extent_encryption(leaf, ei, 0); |
4743 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
4744 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
4745 | btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); | |
4746 | ||
39279cc3 | 4747 | ptr = btrfs_file_extent_inline_start(ei); |
5f39d397 CM |
4748 | write_extent_buffer(leaf, symname, ptr, name_len); |
4749 | btrfs_mark_buffer_dirty(leaf); | |
39279cc3 | 4750 | btrfs_free_path(path); |
5f39d397 | 4751 | |
39279cc3 CM |
4752 | inode->i_op = &btrfs_symlink_inode_operations; |
4753 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 4754 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d899e052 | 4755 | inode_set_bytes(inode, name_len); |
dbe674a9 | 4756 | btrfs_i_size_write(inode, name_len - 1); |
54aa1f4d CM |
4757 | err = btrfs_update_inode(trans, root, inode); |
4758 | if (err) | |
4759 | drop_inode = 1; | |
39279cc3 CM |
4760 | |
4761 | out_unlock: | |
d3c2fdcf | 4762 | nr = trans->blocks_used; |
ab78c84d | 4763 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 4764 | out_fail: |
39279cc3 CM |
4765 | if (drop_inode) { |
4766 | inode_dec_link_count(inode); | |
4767 | iput(inode); | |
4768 | } | |
d3c2fdcf | 4769 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
4770 | return err; |
4771 | } | |
16432985 | 4772 | |
d899e052 YZ |
4773 | static int prealloc_file_range(struct inode *inode, u64 start, u64 end, |
4774 | u64 alloc_hint, int mode) | |
4775 | { | |
4776 | struct btrfs_trans_handle *trans; | |
4777 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4778 | struct btrfs_key ins; | |
4779 | u64 alloc_size; | |
4780 | u64 cur_offset = start; | |
4781 | u64 num_bytes = end - start; | |
4782 | int ret = 0; | |
4783 | ||
4784 | trans = btrfs_join_transaction(root, 1); | |
4785 | BUG_ON(!trans); | |
4786 | btrfs_set_trans_block_group(trans, inode); | |
4787 | ||
4788 | while (num_bytes > 0) { | |
4789 | alloc_size = min(num_bytes, root->fs_info->max_extent); | |
4790 | ret = btrfs_reserve_extent(trans, root, alloc_size, | |
4791 | root->sectorsize, 0, alloc_hint, | |
4792 | (u64)-1, &ins, 1); | |
4793 | if (ret) { | |
4794 | WARN_ON(1); | |
4795 | goto out; | |
4796 | } | |
4797 | ret = insert_reserved_file_extent(trans, inode, | |
4798 | cur_offset, ins.objectid, | |
4799 | ins.offset, ins.offset, | |
4800 | ins.offset, 0, 0, 0, | |
4801 | BTRFS_FILE_EXTENT_PREALLOC); | |
4802 | BUG_ON(ret); | |
4803 | num_bytes -= ins.offset; | |
4804 | cur_offset += ins.offset; | |
4805 | alloc_hint = ins.objectid + ins.offset; | |
4806 | } | |
4807 | out: | |
4808 | if (cur_offset > start) { | |
4809 | inode->i_ctime = CURRENT_TIME; | |
4810 | btrfs_set_flag(inode, PREALLOC); | |
4811 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
4812 | cur_offset > i_size_read(inode)) | |
4813 | btrfs_i_size_write(inode, cur_offset); | |
4814 | ret = btrfs_update_inode(trans, root, inode); | |
4815 | BUG_ON(ret); | |
4816 | } | |
4817 | ||
4818 | btrfs_end_transaction(trans, root); | |
4819 | return ret; | |
4820 | } | |
4821 | ||
4822 | static long btrfs_fallocate(struct inode *inode, int mode, | |
4823 | loff_t offset, loff_t len) | |
4824 | { | |
4825 | u64 cur_offset; | |
4826 | u64 last_byte; | |
4827 | u64 alloc_start; | |
4828 | u64 alloc_end; | |
4829 | u64 alloc_hint = 0; | |
4830 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | |
4831 | struct extent_map *em; | |
4832 | int ret; | |
4833 | ||
4834 | alloc_start = offset & ~mask; | |
4835 | alloc_end = (offset + len + mask) & ~mask; | |
4836 | ||
4837 | mutex_lock(&inode->i_mutex); | |
4838 | if (alloc_start > inode->i_size) { | |
4839 | ret = btrfs_cont_expand(inode, alloc_start); | |
4840 | if (ret) | |
4841 | goto out; | |
4842 | } | |
4843 | ||
4844 | while (1) { | |
4845 | struct btrfs_ordered_extent *ordered; | |
4846 | lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, | |
4847 | alloc_end - 1, GFP_NOFS); | |
4848 | ordered = btrfs_lookup_first_ordered_extent(inode, | |
4849 | alloc_end - 1); | |
4850 | if (ordered && | |
4851 | ordered->file_offset + ordered->len > alloc_start && | |
4852 | ordered->file_offset < alloc_end) { | |
4853 | btrfs_put_ordered_extent(ordered); | |
4854 | unlock_extent(&BTRFS_I(inode)->io_tree, | |
4855 | alloc_start, alloc_end - 1, GFP_NOFS); | |
4856 | btrfs_wait_ordered_range(inode, alloc_start, | |
4857 | alloc_end - alloc_start); | |
4858 | } else { | |
4859 | if (ordered) | |
4860 | btrfs_put_ordered_extent(ordered); | |
4861 | break; | |
4862 | } | |
4863 | } | |
4864 | ||
4865 | cur_offset = alloc_start; | |
4866 | while (1) { | |
4867 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
4868 | alloc_end - cur_offset, 0); | |
4869 | BUG_ON(IS_ERR(em) || !em); | |
4870 | last_byte = min(extent_map_end(em), alloc_end); | |
4871 | last_byte = (last_byte + mask) & ~mask; | |
4872 | if (em->block_start == EXTENT_MAP_HOLE) { | |
4873 | ret = prealloc_file_range(inode, cur_offset, | |
4874 | last_byte, alloc_hint, mode); | |
4875 | if (ret < 0) { | |
4876 | free_extent_map(em); | |
4877 | break; | |
4878 | } | |
4879 | } | |
4880 | if (em->block_start <= EXTENT_MAP_LAST_BYTE) | |
4881 | alloc_hint = em->block_start; | |
4882 | free_extent_map(em); | |
4883 | ||
4884 | cur_offset = last_byte; | |
4885 | if (cur_offset >= alloc_end) { | |
4886 | ret = 0; | |
4887 | break; | |
4888 | } | |
4889 | } | |
4890 | unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1, | |
4891 | GFP_NOFS); | |
4892 | out: | |
4893 | mutex_unlock(&inode->i_mutex); | |
4894 | return ret; | |
4895 | } | |
4896 | ||
e6dcd2dc CM |
4897 | static int btrfs_set_page_dirty(struct page *page) |
4898 | { | |
e6dcd2dc CM |
4899 | return __set_page_dirty_nobuffers(page); |
4900 | } | |
4901 | ||
0ee0fda0 | 4902 | static int btrfs_permission(struct inode *inode, int mask) |
fdebe2bd Y |
4903 | { |
4904 | if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE)) | |
4905 | return -EACCES; | |
33268eaf | 4906 | return generic_permission(inode, mask, btrfs_check_acl); |
fdebe2bd | 4907 | } |
39279cc3 CM |
4908 | |
4909 | static struct inode_operations btrfs_dir_inode_operations = { | |
4910 | .lookup = btrfs_lookup, | |
4911 | .create = btrfs_create, | |
4912 | .unlink = btrfs_unlink, | |
4913 | .link = btrfs_link, | |
4914 | .mkdir = btrfs_mkdir, | |
4915 | .rmdir = btrfs_rmdir, | |
4916 | .rename = btrfs_rename, | |
4917 | .symlink = btrfs_symlink, | |
4918 | .setattr = btrfs_setattr, | |
618e21d5 | 4919 | .mknod = btrfs_mknod, |
95819c05 CH |
4920 | .setxattr = btrfs_setxattr, |
4921 | .getxattr = btrfs_getxattr, | |
5103e947 | 4922 | .listxattr = btrfs_listxattr, |
95819c05 | 4923 | .removexattr = btrfs_removexattr, |
fdebe2bd | 4924 | .permission = btrfs_permission, |
39279cc3 | 4925 | }; |
39279cc3 CM |
4926 | static struct inode_operations btrfs_dir_ro_inode_operations = { |
4927 | .lookup = btrfs_lookup, | |
fdebe2bd | 4928 | .permission = btrfs_permission, |
39279cc3 | 4929 | }; |
39279cc3 CM |
4930 | static struct file_operations btrfs_dir_file_operations = { |
4931 | .llseek = generic_file_llseek, | |
4932 | .read = generic_read_dir, | |
cbdf5a24 | 4933 | .readdir = btrfs_real_readdir, |
34287aa3 | 4934 | .unlocked_ioctl = btrfs_ioctl, |
39279cc3 | 4935 | #ifdef CONFIG_COMPAT |
34287aa3 | 4936 | .compat_ioctl = btrfs_ioctl, |
39279cc3 | 4937 | #endif |
6bf13c0c | 4938 | .release = btrfs_release_file, |
e02119d5 | 4939 | .fsync = btrfs_sync_file, |
39279cc3 CM |
4940 | }; |
4941 | ||
d1310b2e | 4942 | static struct extent_io_ops btrfs_extent_io_ops = { |
07157aac | 4943 | .fill_delalloc = run_delalloc_range, |
065631f6 | 4944 | .submit_bio_hook = btrfs_submit_bio_hook, |
239b14b3 | 4945 | .merge_bio_hook = btrfs_merge_bio_hook, |
07157aac | 4946 | .readpage_end_io_hook = btrfs_readpage_end_io_hook, |
e6dcd2dc | 4947 | .writepage_end_io_hook = btrfs_writepage_end_io_hook, |
247e743c | 4948 | .writepage_start_hook = btrfs_writepage_start_hook, |
1259ab75 | 4949 | .readpage_io_failed_hook = btrfs_io_failed_hook, |
b0c68f8b CM |
4950 | .set_bit_hook = btrfs_set_bit_hook, |
4951 | .clear_bit_hook = btrfs_clear_bit_hook, | |
07157aac CM |
4952 | }; |
4953 | ||
39279cc3 CM |
4954 | static struct address_space_operations btrfs_aops = { |
4955 | .readpage = btrfs_readpage, | |
4956 | .writepage = btrfs_writepage, | |
b293f02e | 4957 | .writepages = btrfs_writepages, |
3ab2fb5a | 4958 | .readpages = btrfs_readpages, |
39279cc3 | 4959 | .sync_page = block_sync_page, |
39279cc3 | 4960 | .bmap = btrfs_bmap, |
16432985 | 4961 | .direct_IO = btrfs_direct_IO, |
a52d9a80 CM |
4962 | .invalidatepage = btrfs_invalidatepage, |
4963 | .releasepage = btrfs_releasepage, | |
e6dcd2dc | 4964 | .set_page_dirty = btrfs_set_page_dirty, |
39279cc3 CM |
4965 | }; |
4966 | ||
4967 | static struct address_space_operations btrfs_symlink_aops = { | |
4968 | .readpage = btrfs_readpage, | |
4969 | .writepage = btrfs_writepage, | |
2bf5a725 CM |
4970 | .invalidatepage = btrfs_invalidatepage, |
4971 | .releasepage = btrfs_releasepage, | |
39279cc3 CM |
4972 | }; |
4973 | ||
4974 | static struct inode_operations btrfs_file_inode_operations = { | |
4975 | .truncate = btrfs_truncate, | |
4976 | .getattr = btrfs_getattr, | |
4977 | .setattr = btrfs_setattr, | |
95819c05 CH |
4978 | .setxattr = btrfs_setxattr, |
4979 | .getxattr = btrfs_getxattr, | |
5103e947 | 4980 | .listxattr = btrfs_listxattr, |
95819c05 | 4981 | .removexattr = btrfs_removexattr, |
fdebe2bd | 4982 | .permission = btrfs_permission, |
d899e052 | 4983 | .fallocate = btrfs_fallocate, |
39279cc3 | 4984 | }; |
618e21d5 JB |
4985 | static struct inode_operations btrfs_special_inode_operations = { |
4986 | .getattr = btrfs_getattr, | |
4987 | .setattr = btrfs_setattr, | |
fdebe2bd | 4988 | .permission = btrfs_permission, |
95819c05 CH |
4989 | .setxattr = btrfs_setxattr, |
4990 | .getxattr = btrfs_getxattr, | |
33268eaf | 4991 | .listxattr = btrfs_listxattr, |
95819c05 | 4992 | .removexattr = btrfs_removexattr, |
618e21d5 | 4993 | }; |
39279cc3 CM |
4994 | static struct inode_operations btrfs_symlink_inode_operations = { |
4995 | .readlink = generic_readlink, | |
4996 | .follow_link = page_follow_link_light, | |
4997 | .put_link = page_put_link, | |
fdebe2bd | 4998 | .permission = btrfs_permission, |
39279cc3 | 4999 | }; |