]>
Commit | Line | Data |
---|---|---|
6cbd5570 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
8f18cf13 | 19 | #include <linux/kernel.h> |
065631f6 | 20 | #include <linux/bio.h> |
39279cc3 | 21 | #include <linux/buffer_head.h> |
f2eb0a24 | 22 | #include <linux/file.h> |
39279cc3 CM |
23 | #include <linux/fs.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
39279cc3 CM |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/mpage.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/writeback.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/compat.h> | |
9ebefb18 | 35 | #include <linux/bit_spinlock.h> |
5103e947 | 36 | #include <linux/xattr.h> |
33268eaf | 37 | #include <linux/posix_acl.h> |
d899e052 | 38 | #include <linux/falloc.h> |
5a0e3ad6 | 39 | #include <linux/slab.h> |
4b4e25f2 | 40 | #include "compat.h" |
39279cc3 CM |
41 | #include "ctree.h" |
42 | #include "disk-io.h" | |
43 | #include "transaction.h" | |
44 | #include "btrfs_inode.h" | |
45 | #include "ioctl.h" | |
46 | #include "print-tree.h" | |
0b86a832 | 47 | #include "volumes.h" |
e6dcd2dc | 48 | #include "ordered-data.h" |
95819c05 | 49 | #include "xattr.h" |
e02119d5 | 50 | #include "tree-log.h" |
c8b97818 | 51 | #include "compression.h" |
b4ce94de | 52 | #include "locking.h" |
dc89e982 | 53 | #include "free-space-cache.h" |
39279cc3 CM |
54 | |
55 | struct btrfs_iget_args { | |
56 | u64 ino; | |
57 | struct btrfs_root *root; | |
58 | }; | |
59 | ||
6e1d5dcc AD |
60 | static const struct inode_operations btrfs_dir_inode_operations; |
61 | static const struct inode_operations btrfs_symlink_inode_operations; | |
62 | static const struct inode_operations btrfs_dir_ro_inode_operations; | |
63 | static const struct inode_operations btrfs_special_inode_operations; | |
64 | static const struct inode_operations btrfs_file_inode_operations; | |
7f09410b AD |
65 | static const struct address_space_operations btrfs_aops; |
66 | static const struct address_space_operations btrfs_symlink_aops; | |
828c0950 | 67 | static const struct file_operations btrfs_dir_file_operations; |
d1310b2e | 68 | static struct extent_io_ops btrfs_extent_io_ops; |
39279cc3 CM |
69 | |
70 | static struct kmem_cache *btrfs_inode_cachep; | |
71 | struct kmem_cache *btrfs_trans_handle_cachep; | |
72 | struct kmem_cache *btrfs_transaction_cachep; | |
39279cc3 | 73 | struct kmem_cache *btrfs_path_cachep; |
dc89e982 | 74 | struct kmem_cache *btrfs_free_space_cachep; |
39279cc3 CM |
75 | |
76 | #define S_SHIFT 12 | |
77 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |
78 | [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, | |
79 | [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, | |
80 | [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, | |
81 | [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, | |
82 | [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, | |
83 | [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, | |
84 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | |
85 | }; | |
86 | ||
a41ad394 JB |
87 | static int btrfs_setsize(struct inode *inode, loff_t newsize); |
88 | static int btrfs_truncate(struct inode *inode); | |
c8b97818 | 89 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); |
771ed689 CM |
90 | static noinline int cow_file_range(struct inode *inode, |
91 | struct page *locked_page, | |
92 | u64 start, u64 end, int *page_started, | |
93 | unsigned long *nr_written, int unlock); | |
7b128766 | 94 | |
f34f57a3 | 95 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
2a7dba39 EP |
96 | struct inode *inode, struct inode *dir, |
97 | const struct qstr *qstr) | |
0279b4cd JO |
98 | { |
99 | int err; | |
100 | ||
f34f57a3 | 101 | err = btrfs_init_acl(trans, inode, dir); |
0279b4cd | 102 | if (!err) |
2a7dba39 | 103 | err = btrfs_xattr_security_init(trans, inode, dir, qstr); |
0279b4cd JO |
104 | return err; |
105 | } | |
106 | ||
c8b97818 CM |
107 | /* |
108 | * this does all the hard work for inserting an inline extent into | |
109 | * the btree. The caller should have done a btrfs_drop_extents so that | |
110 | * no overlapping inline items exist in the btree | |
111 | */ | |
d397712b | 112 | static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, |
c8b97818 CM |
113 | struct btrfs_root *root, struct inode *inode, |
114 | u64 start, size_t size, size_t compressed_size, | |
fe3f566c | 115 | int compress_type, |
c8b97818 CM |
116 | struct page **compressed_pages) |
117 | { | |
118 | struct btrfs_key key; | |
119 | struct btrfs_path *path; | |
120 | struct extent_buffer *leaf; | |
121 | struct page *page = NULL; | |
122 | char *kaddr; | |
123 | unsigned long ptr; | |
124 | struct btrfs_file_extent_item *ei; | |
125 | int err = 0; | |
126 | int ret; | |
127 | size_t cur_size = size; | |
128 | size_t datasize; | |
129 | unsigned long offset; | |
c8b97818 | 130 | |
fe3f566c | 131 | if (compressed_size && compressed_pages) |
c8b97818 | 132 | cur_size = compressed_size; |
c8b97818 | 133 | |
d397712b CM |
134 | path = btrfs_alloc_path(); |
135 | if (!path) | |
c8b97818 CM |
136 | return -ENOMEM; |
137 | ||
b9473439 | 138 | path->leave_spinning = 1; |
c8b97818 CM |
139 | btrfs_set_trans_block_group(trans, inode); |
140 | ||
141 | key.objectid = inode->i_ino; | |
142 | key.offset = start; | |
143 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | |
c8b97818 CM |
144 | datasize = btrfs_file_extent_calc_inline_size(cur_size); |
145 | ||
146 | inode_add_bytes(inode, size); | |
147 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
148 | datasize); | |
149 | BUG_ON(ret); | |
150 | if (ret) { | |
151 | err = ret; | |
c8b97818 CM |
152 | goto fail; |
153 | } | |
154 | leaf = path->nodes[0]; | |
155 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
156 | struct btrfs_file_extent_item); | |
157 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
158 | btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); | |
159 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
160 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
161 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | |
162 | ptr = btrfs_file_extent_inline_start(ei); | |
163 | ||
261507a0 | 164 | if (compress_type != BTRFS_COMPRESS_NONE) { |
c8b97818 CM |
165 | struct page *cpage; |
166 | int i = 0; | |
d397712b | 167 | while (compressed_size > 0) { |
c8b97818 | 168 | cpage = compressed_pages[i]; |
5b050f04 | 169 | cur_size = min_t(unsigned long, compressed_size, |
c8b97818 CM |
170 | PAGE_CACHE_SIZE); |
171 | ||
b9473439 | 172 | kaddr = kmap_atomic(cpage, KM_USER0); |
c8b97818 | 173 | write_extent_buffer(leaf, kaddr, ptr, cur_size); |
b9473439 | 174 | kunmap_atomic(kaddr, KM_USER0); |
c8b97818 CM |
175 | |
176 | i++; | |
177 | ptr += cur_size; | |
178 | compressed_size -= cur_size; | |
179 | } | |
180 | btrfs_set_file_extent_compression(leaf, ei, | |
261507a0 | 181 | compress_type); |
c8b97818 CM |
182 | } else { |
183 | page = find_get_page(inode->i_mapping, | |
184 | start >> PAGE_CACHE_SHIFT); | |
185 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
186 | kaddr = kmap_atomic(page, KM_USER0); | |
187 | offset = start & (PAGE_CACHE_SIZE - 1); | |
188 | write_extent_buffer(leaf, kaddr + offset, ptr, size); | |
189 | kunmap_atomic(kaddr, KM_USER0); | |
190 | page_cache_release(page); | |
191 | } | |
192 | btrfs_mark_buffer_dirty(leaf); | |
193 | btrfs_free_path(path); | |
194 | ||
c2167754 YZ |
195 | /* |
196 | * we're an inline extent, so nobody can | |
197 | * extend the file past i_size without locking | |
198 | * a page we already have locked. | |
199 | * | |
200 | * We must do any isize and inode updates | |
201 | * before we unlock the pages. Otherwise we | |
202 | * could end up racing with unlink. | |
203 | */ | |
c8b97818 CM |
204 | BTRFS_I(inode)->disk_i_size = inode->i_size; |
205 | btrfs_update_inode(trans, root, inode); | |
c2167754 | 206 | |
c8b97818 CM |
207 | return 0; |
208 | fail: | |
209 | btrfs_free_path(path); | |
210 | return err; | |
211 | } | |
212 | ||
213 | ||
214 | /* | |
215 | * conditionally insert an inline extent into the file. This | |
216 | * does the checks required to make sure the data is small enough | |
217 | * to fit as an inline extent. | |
218 | */ | |
7f366cfe | 219 | static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, |
c8b97818 CM |
220 | struct btrfs_root *root, |
221 | struct inode *inode, u64 start, u64 end, | |
fe3f566c | 222 | size_t compressed_size, int compress_type, |
c8b97818 CM |
223 | struct page **compressed_pages) |
224 | { | |
225 | u64 isize = i_size_read(inode); | |
226 | u64 actual_end = min(end + 1, isize); | |
227 | u64 inline_len = actual_end - start; | |
228 | u64 aligned_end = (end + root->sectorsize - 1) & | |
229 | ~((u64)root->sectorsize - 1); | |
230 | u64 hint_byte; | |
231 | u64 data_len = inline_len; | |
232 | int ret; | |
233 | ||
234 | if (compressed_size) | |
235 | data_len = compressed_size; | |
236 | ||
237 | if (start > 0 || | |
70b99e69 | 238 | actual_end >= PAGE_CACHE_SIZE || |
c8b97818 CM |
239 | data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || |
240 | (!compressed_size && | |
241 | (actual_end & (root->sectorsize - 1)) == 0) || | |
242 | end + 1 < isize || | |
243 | data_len > root->fs_info->max_inline) { | |
244 | return 1; | |
245 | } | |
246 | ||
920bbbfb | 247 | ret = btrfs_drop_extents(trans, inode, start, aligned_end, |
a1ed835e | 248 | &hint_byte, 1); |
c8b97818 CM |
249 | BUG_ON(ret); |
250 | ||
251 | if (isize > actual_end) | |
252 | inline_len = min_t(u64, isize, actual_end); | |
253 | ret = insert_inline_extent(trans, root, inode, start, | |
254 | inline_len, compressed_size, | |
fe3f566c | 255 | compress_type, compressed_pages); |
c8b97818 | 256 | BUG_ON(ret); |
0ca1f7ce | 257 | btrfs_delalloc_release_metadata(inode, end + 1 - start); |
a1ed835e | 258 | btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); |
c8b97818 CM |
259 | return 0; |
260 | } | |
261 | ||
771ed689 CM |
262 | struct async_extent { |
263 | u64 start; | |
264 | u64 ram_size; | |
265 | u64 compressed_size; | |
266 | struct page **pages; | |
267 | unsigned long nr_pages; | |
261507a0 | 268 | int compress_type; |
771ed689 CM |
269 | struct list_head list; |
270 | }; | |
271 | ||
272 | struct async_cow { | |
273 | struct inode *inode; | |
274 | struct btrfs_root *root; | |
275 | struct page *locked_page; | |
276 | u64 start; | |
277 | u64 end; | |
278 | struct list_head extents; | |
279 | struct btrfs_work work; | |
280 | }; | |
281 | ||
282 | static noinline int add_async_extent(struct async_cow *cow, | |
283 | u64 start, u64 ram_size, | |
284 | u64 compressed_size, | |
285 | struct page **pages, | |
261507a0 LZ |
286 | unsigned long nr_pages, |
287 | int compress_type) | |
771ed689 CM |
288 | { |
289 | struct async_extent *async_extent; | |
290 | ||
291 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | |
dac97e51 | 292 | BUG_ON(!async_extent); |
771ed689 CM |
293 | async_extent->start = start; |
294 | async_extent->ram_size = ram_size; | |
295 | async_extent->compressed_size = compressed_size; | |
296 | async_extent->pages = pages; | |
297 | async_extent->nr_pages = nr_pages; | |
261507a0 | 298 | async_extent->compress_type = compress_type; |
771ed689 CM |
299 | list_add_tail(&async_extent->list, &cow->extents); |
300 | return 0; | |
301 | } | |
302 | ||
d352ac68 | 303 | /* |
771ed689 CM |
304 | * we create compressed extents in two phases. The first |
305 | * phase compresses a range of pages that have already been | |
306 | * locked (both pages and state bits are locked). | |
c8b97818 | 307 | * |
771ed689 CM |
308 | * This is done inside an ordered work queue, and the compression |
309 | * is spread across many cpus. The actual IO submission is step | |
310 | * two, and the ordered work queue takes care of making sure that | |
311 | * happens in the same order things were put onto the queue by | |
312 | * writepages and friends. | |
c8b97818 | 313 | * |
771ed689 CM |
314 | * If this code finds it can't get good compression, it puts an |
315 | * entry onto the work queue to write the uncompressed bytes. This | |
316 | * makes sure that both compressed inodes and uncompressed inodes | |
317 | * are written in the same order that pdflush sent them down. | |
d352ac68 | 318 | */ |
771ed689 CM |
319 | static noinline int compress_file_range(struct inode *inode, |
320 | struct page *locked_page, | |
321 | u64 start, u64 end, | |
322 | struct async_cow *async_cow, | |
323 | int *num_added) | |
b888db2b CM |
324 | { |
325 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
326 | struct btrfs_trans_handle *trans; | |
db94535d | 327 | u64 num_bytes; |
db94535d | 328 | u64 blocksize = root->sectorsize; |
c8b97818 | 329 | u64 actual_end; |
42dc7bab | 330 | u64 isize = i_size_read(inode); |
e6dcd2dc | 331 | int ret = 0; |
c8b97818 CM |
332 | struct page **pages = NULL; |
333 | unsigned long nr_pages; | |
334 | unsigned long nr_pages_ret = 0; | |
335 | unsigned long total_compressed = 0; | |
336 | unsigned long total_in = 0; | |
337 | unsigned long max_compressed = 128 * 1024; | |
771ed689 | 338 | unsigned long max_uncompressed = 128 * 1024; |
c8b97818 CM |
339 | int i; |
340 | int will_compress; | |
261507a0 | 341 | int compress_type = root->fs_info->compress_type; |
b888db2b | 342 | |
42dc7bab | 343 | actual_end = min_t(u64, isize, end + 1); |
c8b97818 CM |
344 | again: |
345 | will_compress = 0; | |
346 | nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; | |
347 | nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); | |
be20aa9d | 348 | |
f03d9301 CM |
349 | /* |
350 | * we don't want to send crud past the end of i_size through | |
351 | * compression, that's just a waste of CPU time. So, if the | |
352 | * end of the file is before the start of our current | |
353 | * requested range of bytes, we bail out to the uncompressed | |
354 | * cleanup code that can deal with all of this. | |
355 | * | |
356 | * It isn't really the fastest way to fix things, but this is a | |
357 | * very uncommon corner. | |
358 | */ | |
359 | if (actual_end <= start) | |
360 | goto cleanup_and_bail_uncompressed; | |
361 | ||
c8b97818 CM |
362 | total_compressed = actual_end - start; |
363 | ||
364 | /* we want to make sure that amount of ram required to uncompress | |
365 | * an extent is reasonable, so we limit the total size in ram | |
771ed689 CM |
366 | * of a compressed extent to 128k. This is a crucial number |
367 | * because it also controls how easily we can spread reads across | |
368 | * cpus for decompression. | |
369 | * | |
370 | * We also want to make sure the amount of IO required to do | |
371 | * a random read is reasonably small, so we limit the size of | |
372 | * a compressed extent to 128k. | |
c8b97818 CM |
373 | */ |
374 | total_compressed = min(total_compressed, max_uncompressed); | |
db94535d | 375 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); |
be20aa9d | 376 | num_bytes = max(blocksize, num_bytes); |
c8b97818 CM |
377 | total_in = 0; |
378 | ret = 0; | |
db94535d | 379 | |
771ed689 CM |
380 | /* |
381 | * we do compression for mount -o compress and when the | |
382 | * inode has not been flagged as nocompress. This flag can | |
383 | * change at any time if we discover bad compression ratios. | |
c8b97818 | 384 | */ |
6cbff00f | 385 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && |
1e701a32 | 386 | (btrfs_test_opt(root, COMPRESS) || |
75e7cb7f LB |
387 | (BTRFS_I(inode)->force_compress) || |
388 | (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { | |
c8b97818 | 389 | WARN_ON(pages); |
cfbc246e | 390 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
dac97e51 | 391 | BUG_ON(!pages); |
c8b97818 | 392 | |
261507a0 LZ |
393 | if (BTRFS_I(inode)->force_compress) |
394 | compress_type = BTRFS_I(inode)->force_compress; | |
395 | ||
396 | ret = btrfs_compress_pages(compress_type, | |
397 | inode->i_mapping, start, | |
398 | total_compressed, pages, | |
399 | nr_pages, &nr_pages_ret, | |
400 | &total_in, | |
401 | &total_compressed, | |
402 | max_compressed); | |
c8b97818 CM |
403 | |
404 | if (!ret) { | |
405 | unsigned long offset = total_compressed & | |
406 | (PAGE_CACHE_SIZE - 1); | |
407 | struct page *page = pages[nr_pages_ret - 1]; | |
408 | char *kaddr; | |
409 | ||
410 | /* zero the tail end of the last page, we might be | |
411 | * sending it down to disk | |
412 | */ | |
413 | if (offset) { | |
414 | kaddr = kmap_atomic(page, KM_USER0); | |
415 | memset(kaddr + offset, 0, | |
416 | PAGE_CACHE_SIZE - offset); | |
417 | kunmap_atomic(kaddr, KM_USER0); | |
418 | } | |
419 | will_compress = 1; | |
420 | } | |
421 | } | |
422 | if (start == 0) { | |
771ed689 | 423 | trans = btrfs_join_transaction(root, 1); |
3612b495 | 424 | BUG_ON(IS_ERR(trans)); |
771ed689 | 425 | btrfs_set_trans_block_group(trans, inode); |
0ca1f7ce | 426 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
771ed689 | 427 | |
c8b97818 | 428 | /* lets try to make an inline extent */ |
771ed689 | 429 | if (ret || total_in < (actual_end - start)) { |
c8b97818 | 430 | /* we didn't compress the entire range, try |
771ed689 | 431 | * to make an uncompressed inline extent. |
c8b97818 CM |
432 | */ |
433 | ret = cow_file_range_inline(trans, root, inode, | |
fe3f566c | 434 | start, end, 0, 0, NULL); |
c8b97818 | 435 | } else { |
771ed689 | 436 | /* try making a compressed inline extent */ |
c8b97818 CM |
437 | ret = cow_file_range_inline(trans, root, inode, |
438 | start, end, | |
fe3f566c LZ |
439 | total_compressed, |
440 | compress_type, pages); | |
c8b97818 CM |
441 | } |
442 | if (ret == 0) { | |
771ed689 CM |
443 | /* |
444 | * inline extent creation worked, we don't need | |
445 | * to create any more async work items. Unlock | |
446 | * and free up our temp pages. | |
447 | */ | |
c8b97818 | 448 | extent_clear_unlock_delalloc(inode, |
a791e35e CM |
449 | &BTRFS_I(inode)->io_tree, |
450 | start, end, NULL, | |
451 | EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | | |
a3429ab7 | 452 | EXTENT_CLEAR_DELALLOC | |
a791e35e | 453 | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); |
c2167754 YZ |
454 | |
455 | btrfs_end_transaction(trans, root); | |
c8b97818 CM |
456 | goto free_pages_out; |
457 | } | |
c2167754 | 458 | btrfs_end_transaction(trans, root); |
c8b97818 CM |
459 | } |
460 | ||
461 | if (will_compress) { | |
462 | /* | |
463 | * we aren't doing an inline extent round the compressed size | |
464 | * up to a block size boundary so the allocator does sane | |
465 | * things | |
466 | */ | |
467 | total_compressed = (total_compressed + blocksize - 1) & | |
468 | ~(blocksize - 1); | |
469 | ||
470 | /* | |
471 | * one last check to make sure the compression is really a | |
472 | * win, compare the page count read with the blocks on disk | |
473 | */ | |
474 | total_in = (total_in + PAGE_CACHE_SIZE - 1) & | |
475 | ~(PAGE_CACHE_SIZE - 1); | |
476 | if (total_compressed >= total_in) { | |
477 | will_compress = 0; | |
478 | } else { | |
c8b97818 CM |
479 | num_bytes = total_in; |
480 | } | |
481 | } | |
482 | if (!will_compress && pages) { | |
483 | /* | |
484 | * the compression code ran but failed to make things smaller, | |
485 | * free any pages it allocated and our page pointer array | |
486 | */ | |
487 | for (i = 0; i < nr_pages_ret; i++) { | |
70b99e69 | 488 | WARN_ON(pages[i]->mapping); |
c8b97818 CM |
489 | page_cache_release(pages[i]); |
490 | } | |
491 | kfree(pages); | |
492 | pages = NULL; | |
493 | total_compressed = 0; | |
494 | nr_pages_ret = 0; | |
495 | ||
496 | /* flag the file so we don't compress in the future */ | |
1e701a32 CM |
497 | if (!btrfs_test_opt(root, FORCE_COMPRESS) && |
498 | !(BTRFS_I(inode)->force_compress)) { | |
a555f810 | 499 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
1e701a32 | 500 | } |
c8b97818 | 501 | } |
771ed689 CM |
502 | if (will_compress) { |
503 | *num_added += 1; | |
c8b97818 | 504 | |
771ed689 CM |
505 | /* the async work queues will take care of doing actual |
506 | * allocation on disk for these compressed pages, | |
507 | * and will submit them to the elevator. | |
508 | */ | |
509 | add_async_extent(async_cow, start, num_bytes, | |
261507a0 LZ |
510 | total_compressed, pages, nr_pages_ret, |
511 | compress_type); | |
179e29e4 | 512 | |
24ae6365 | 513 | if (start + num_bytes < end) { |
771ed689 CM |
514 | start += num_bytes; |
515 | pages = NULL; | |
516 | cond_resched(); | |
517 | goto again; | |
518 | } | |
519 | } else { | |
f03d9301 | 520 | cleanup_and_bail_uncompressed: |
771ed689 CM |
521 | /* |
522 | * No compression, but we still need to write the pages in | |
523 | * the file we've been given so far. redirty the locked | |
524 | * page if it corresponds to our extent and set things up | |
525 | * for the async work queue to run cow_file_range to do | |
526 | * the normal delalloc dance | |
527 | */ | |
528 | if (page_offset(locked_page) >= start && | |
529 | page_offset(locked_page) <= end) { | |
530 | __set_page_dirty_nobuffers(locked_page); | |
531 | /* unlocked later on in the async handlers */ | |
532 | } | |
261507a0 LZ |
533 | add_async_extent(async_cow, start, end - start + 1, |
534 | 0, NULL, 0, BTRFS_COMPRESS_NONE); | |
771ed689 CM |
535 | *num_added += 1; |
536 | } | |
3b951516 | 537 | |
771ed689 CM |
538 | out: |
539 | return 0; | |
540 | ||
541 | free_pages_out: | |
542 | for (i = 0; i < nr_pages_ret; i++) { | |
543 | WARN_ON(pages[i]->mapping); | |
544 | page_cache_release(pages[i]); | |
545 | } | |
d397712b | 546 | kfree(pages); |
771ed689 CM |
547 | |
548 | goto out; | |
549 | } | |
550 | ||
551 | /* | |
552 | * phase two of compressed writeback. This is the ordered portion | |
553 | * of the code, which only gets called in the order the work was | |
554 | * queued. We walk all the async extents created by compress_file_range | |
555 | * and send them down to the disk. | |
556 | */ | |
557 | static noinline int submit_compressed_extents(struct inode *inode, | |
558 | struct async_cow *async_cow) | |
559 | { | |
560 | struct async_extent *async_extent; | |
561 | u64 alloc_hint = 0; | |
562 | struct btrfs_trans_handle *trans; | |
563 | struct btrfs_key ins; | |
564 | struct extent_map *em; | |
565 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
566 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
567 | struct extent_io_tree *io_tree; | |
f5a84ee3 | 568 | int ret = 0; |
771ed689 CM |
569 | |
570 | if (list_empty(&async_cow->extents)) | |
571 | return 0; | |
572 | ||
771ed689 | 573 | |
d397712b | 574 | while (!list_empty(&async_cow->extents)) { |
771ed689 CM |
575 | async_extent = list_entry(async_cow->extents.next, |
576 | struct async_extent, list); | |
577 | list_del(&async_extent->list); | |
c8b97818 | 578 | |
771ed689 CM |
579 | io_tree = &BTRFS_I(inode)->io_tree; |
580 | ||
f5a84ee3 | 581 | retry: |
771ed689 CM |
582 | /* did the compression code fall back to uncompressed IO? */ |
583 | if (!async_extent->pages) { | |
584 | int page_started = 0; | |
585 | unsigned long nr_written = 0; | |
586 | ||
587 | lock_extent(io_tree, async_extent->start, | |
2ac55d41 JB |
588 | async_extent->start + |
589 | async_extent->ram_size - 1, GFP_NOFS); | |
771ed689 CM |
590 | |
591 | /* allocate blocks */ | |
f5a84ee3 JB |
592 | ret = cow_file_range(inode, async_cow->locked_page, |
593 | async_extent->start, | |
594 | async_extent->start + | |
595 | async_extent->ram_size - 1, | |
596 | &page_started, &nr_written, 0); | |
771ed689 CM |
597 | |
598 | /* | |
599 | * if page_started, cow_file_range inserted an | |
600 | * inline extent and took care of all the unlocking | |
601 | * and IO for us. Otherwise, we need to submit | |
602 | * all those pages down to the drive. | |
603 | */ | |
f5a84ee3 | 604 | if (!page_started && !ret) |
771ed689 CM |
605 | extent_write_locked_range(io_tree, |
606 | inode, async_extent->start, | |
d397712b | 607 | async_extent->start + |
771ed689 CM |
608 | async_extent->ram_size - 1, |
609 | btrfs_get_extent, | |
610 | WB_SYNC_ALL); | |
611 | kfree(async_extent); | |
612 | cond_resched(); | |
613 | continue; | |
614 | } | |
615 | ||
616 | lock_extent(io_tree, async_extent->start, | |
617 | async_extent->start + async_extent->ram_size - 1, | |
618 | GFP_NOFS); | |
771ed689 | 619 | |
c2167754 | 620 | trans = btrfs_join_transaction(root, 1); |
3612b495 | 621 | BUG_ON(IS_ERR(trans)); |
771ed689 CM |
622 | ret = btrfs_reserve_extent(trans, root, |
623 | async_extent->compressed_size, | |
624 | async_extent->compressed_size, | |
625 | 0, alloc_hint, | |
626 | (u64)-1, &ins, 1); | |
c2167754 YZ |
627 | btrfs_end_transaction(trans, root); |
628 | ||
f5a84ee3 JB |
629 | if (ret) { |
630 | int i; | |
631 | for (i = 0; i < async_extent->nr_pages; i++) { | |
632 | WARN_ON(async_extent->pages[i]->mapping); | |
633 | page_cache_release(async_extent->pages[i]); | |
634 | } | |
635 | kfree(async_extent->pages); | |
636 | async_extent->nr_pages = 0; | |
637 | async_extent->pages = NULL; | |
638 | unlock_extent(io_tree, async_extent->start, | |
639 | async_extent->start + | |
640 | async_extent->ram_size - 1, GFP_NOFS); | |
641 | goto retry; | |
642 | } | |
643 | ||
c2167754 YZ |
644 | /* |
645 | * here we're doing allocation and writeback of the | |
646 | * compressed pages | |
647 | */ | |
648 | btrfs_drop_extent_cache(inode, async_extent->start, | |
649 | async_extent->start + | |
650 | async_extent->ram_size - 1, 0); | |
651 | ||
771ed689 | 652 | em = alloc_extent_map(GFP_NOFS); |
c26a9203 | 653 | BUG_ON(!em); |
771ed689 CM |
654 | em->start = async_extent->start; |
655 | em->len = async_extent->ram_size; | |
445a6944 | 656 | em->orig_start = em->start; |
c8b97818 | 657 | |
771ed689 CM |
658 | em->block_start = ins.objectid; |
659 | em->block_len = ins.offset; | |
660 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
261507a0 | 661 | em->compress_type = async_extent->compress_type; |
771ed689 CM |
662 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
663 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
664 | ||
d397712b | 665 | while (1) { |
890871be | 666 | write_lock(&em_tree->lock); |
771ed689 | 667 | ret = add_extent_mapping(em_tree, em); |
890871be | 668 | write_unlock(&em_tree->lock); |
771ed689 CM |
669 | if (ret != -EEXIST) { |
670 | free_extent_map(em); | |
671 | break; | |
672 | } | |
673 | btrfs_drop_extent_cache(inode, async_extent->start, | |
674 | async_extent->start + | |
675 | async_extent->ram_size - 1, 0); | |
676 | } | |
677 | ||
261507a0 LZ |
678 | ret = btrfs_add_ordered_extent_compress(inode, |
679 | async_extent->start, | |
680 | ins.objectid, | |
681 | async_extent->ram_size, | |
682 | ins.offset, | |
683 | BTRFS_ORDERED_COMPRESSED, | |
684 | async_extent->compress_type); | |
771ed689 CM |
685 | BUG_ON(ret); |
686 | ||
771ed689 CM |
687 | /* |
688 | * clear dirty, set writeback and unlock the pages. | |
689 | */ | |
690 | extent_clear_unlock_delalloc(inode, | |
a791e35e CM |
691 | &BTRFS_I(inode)->io_tree, |
692 | async_extent->start, | |
693 | async_extent->start + | |
694 | async_extent->ram_size - 1, | |
695 | NULL, EXTENT_CLEAR_UNLOCK_PAGE | | |
696 | EXTENT_CLEAR_UNLOCK | | |
a3429ab7 | 697 | EXTENT_CLEAR_DELALLOC | |
a791e35e | 698 | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK); |
771ed689 CM |
699 | |
700 | ret = btrfs_submit_compressed_write(inode, | |
d397712b CM |
701 | async_extent->start, |
702 | async_extent->ram_size, | |
703 | ins.objectid, | |
704 | ins.offset, async_extent->pages, | |
705 | async_extent->nr_pages); | |
771ed689 CM |
706 | |
707 | BUG_ON(ret); | |
771ed689 CM |
708 | alloc_hint = ins.objectid + ins.offset; |
709 | kfree(async_extent); | |
710 | cond_resched(); | |
711 | } | |
712 | ||
771ed689 CM |
713 | return 0; |
714 | } | |
715 | ||
4b46fce2 JB |
716 | static u64 get_extent_allocation_hint(struct inode *inode, u64 start, |
717 | u64 num_bytes) | |
718 | { | |
719 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
720 | struct extent_map *em; | |
721 | u64 alloc_hint = 0; | |
722 | ||
723 | read_lock(&em_tree->lock); | |
724 | em = search_extent_mapping(em_tree, start, num_bytes); | |
725 | if (em) { | |
726 | /* | |
727 | * if block start isn't an actual block number then find the | |
728 | * first block in this inode and use that as a hint. If that | |
729 | * block is also bogus then just don't worry about it. | |
730 | */ | |
731 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | |
732 | free_extent_map(em); | |
733 | em = search_extent_mapping(em_tree, 0, 0); | |
734 | if (em && em->block_start < EXTENT_MAP_LAST_BYTE) | |
735 | alloc_hint = em->block_start; | |
736 | if (em) | |
737 | free_extent_map(em); | |
738 | } else { | |
739 | alloc_hint = em->block_start; | |
740 | free_extent_map(em); | |
741 | } | |
742 | } | |
743 | read_unlock(&em_tree->lock); | |
744 | ||
745 | return alloc_hint; | |
746 | } | |
747 | ||
771ed689 CM |
748 | /* |
749 | * when extent_io.c finds a delayed allocation range in the file, | |
750 | * the call backs end up in this code. The basic idea is to | |
751 | * allocate extents on disk for the range, and create ordered data structs | |
752 | * in ram to track those extents. | |
753 | * | |
754 | * locked_page is the page that writepage had locked already. We use | |
755 | * it to make sure we don't do extra locks or unlocks. | |
756 | * | |
757 | * *page_started is set to one if we unlock locked_page and do everything | |
758 | * required to start IO on it. It may be clean and already done with | |
759 | * IO when we return. | |
760 | */ | |
761 | static noinline int cow_file_range(struct inode *inode, | |
762 | struct page *locked_page, | |
763 | u64 start, u64 end, int *page_started, | |
764 | unsigned long *nr_written, | |
765 | int unlock) | |
766 | { | |
767 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
768 | struct btrfs_trans_handle *trans; | |
769 | u64 alloc_hint = 0; | |
770 | u64 num_bytes; | |
771 | unsigned long ram_size; | |
772 | u64 disk_num_bytes; | |
773 | u64 cur_alloc_size; | |
774 | u64 blocksize = root->sectorsize; | |
771ed689 CM |
775 | struct btrfs_key ins; |
776 | struct extent_map *em; | |
777 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
778 | int ret = 0; | |
779 | ||
0cb59c99 | 780 | BUG_ON(root == root->fs_info->tree_root); |
771ed689 | 781 | trans = btrfs_join_transaction(root, 1); |
3612b495 | 782 | BUG_ON(IS_ERR(trans)); |
771ed689 | 783 | btrfs_set_trans_block_group(trans, inode); |
0ca1f7ce | 784 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
771ed689 | 785 | |
771ed689 CM |
786 | num_bytes = (end - start + blocksize) & ~(blocksize - 1); |
787 | num_bytes = max(blocksize, num_bytes); | |
788 | disk_num_bytes = num_bytes; | |
789 | ret = 0; | |
790 | ||
791 | if (start == 0) { | |
792 | /* lets try to make an inline extent */ | |
793 | ret = cow_file_range_inline(trans, root, inode, | |
fe3f566c | 794 | start, end, 0, 0, NULL); |
771ed689 CM |
795 | if (ret == 0) { |
796 | extent_clear_unlock_delalloc(inode, | |
a791e35e CM |
797 | &BTRFS_I(inode)->io_tree, |
798 | start, end, NULL, | |
799 | EXTENT_CLEAR_UNLOCK_PAGE | | |
800 | EXTENT_CLEAR_UNLOCK | | |
801 | EXTENT_CLEAR_DELALLOC | | |
802 | EXTENT_CLEAR_DIRTY | | |
803 | EXTENT_SET_WRITEBACK | | |
804 | EXTENT_END_WRITEBACK); | |
c2167754 | 805 | |
771ed689 CM |
806 | *nr_written = *nr_written + |
807 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | |
808 | *page_started = 1; | |
809 | ret = 0; | |
810 | goto out; | |
811 | } | |
812 | } | |
813 | ||
814 | BUG_ON(disk_num_bytes > | |
815 | btrfs_super_total_bytes(&root->fs_info->super_copy)); | |
816 | ||
4b46fce2 | 817 | alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); |
771ed689 CM |
818 | btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); |
819 | ||
d397712b | 820 | while (disk_num_bytes > 0) { |
a791e35e CM |
821 | unsigned long op; |
822 | ||
287a0ab9 | 823 | cur_alloc_size = disk_num_bytes; |
e6dcd2dc | 824 | ret = btrfs_reserve_extent(trans, root, cur_alloc_size, |
771ed689 | 825 | root->sectorsize, 0, alloc_hint, |
e6dcd2dc | 826 | (u64)-1, &ins, 1); |
d397712b CM |
827 | BUG_ON(ret); |
828 | ||
e6dcd2dc | 829 | em = alloc_extent_map(GFP_NOFS); |
c26a9203 | 830 | BUG_ON(!em); |
e6dcd2dc | 831 | em->start = start; |
445a6944 | 832 | em->orig_start = em->start; |
771ed689 CM |
833 | ram_size = ins.offset; |
834 | em->len = ins.offset; | |
c8b97818 | 835 | |
e6dcd2dc | 836 | em->block_start = ins.objectid; |
c8b97818 | 837 | em->block_len = ins.offset; |
e6dcd2dc | 838 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
7f3c74fb | 839 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
c8b97818 | 840 | |
d397712b | 841 | while (1) { |
890871be | 842 | write_lock(&em_tree->lock); |
e6dcd2dc | 843 | ret = add_extent_mapping(em_tree, em); |
890871be | 844 | write_unlock(&em_tree->lock); |
e6dcd2dc CM |
845 | if (ret != -EEXIST) { |
846 | free_extent_map(em); | |
847 | break; | |
848 | } | |
849 | btrfs_drop_extent_cache(inode, start, | |
c8b97818 | 850 | start + ram_size - 1, 0); |
e6dcd2dc CM |
851 | } |
852 | ||
98d20f67 | 853 | cur_alloc_size = ins.offset; |
e6dcd2dc | 854 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
771ed689 | 855 | ram_size, cur_alloc_size, 0); |
e6dcd2dc | 856 | BUG_ON(ret); |
c8b97818 | 857 | |
17d217fe YZ |
858 | if (root->root_key.objectid == |
859 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
860 | ret = btrfs_reloc_clone_csums(inode, start, | |
861 | cur_alloc_size); | |
862 | BUG_ON(ret); | |
863 | } | |
864 | ||
d397712b | 865 | if (disk_num_bytes < cur_alloc_size) |
3b951516 | 866 | break; |
d397712b | 867 | |
c8b97818 CM |
868 | /* we're not doing compressed IO, don't unlock the first |
869 | * page (which the caller expects to stay locked), don't | |
870 | * clear any dirty bits and don't set any writeback bits | |
8b62b72b CM |
871 | * |
872 | * Do set the Private2 bit so we know this page was properly | |
873 | * setup for writepage | |
c8b97818 | 874 | */ |
a791e35e CM |
875 | op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0; |
876 | op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | | |
877 | EXTENT_SET_PRIVATE2; | |
878 | ||
c8b97818 CM |
879 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, |
880 | start, start + ram_size - 1, | |
a791e35e | 881 | locked_page, op); |
c8b97818 | 882 | disk_num_bytes -= cur_alloc_size; |
c59f8951 CM |
883 | num_bytes -= cur_alloc_size; |
884 | alloc_hint = ins.objectid + ins.offset; | |
885 | start += cur_alloc_size; | |
b888db2b | 886 | } |
b888db2b | 887 | out: |
771ed689 | 888 | ret = 0; |
b888db2b | 889 | btrfs_end_transaction(trans, root); |
c8b97818 | 890 | |
be20aa9d | 891 | return ret; |
771ed689 | 892 | } |
c8b97818 | 893 | |
771ed689 CM |
894 | /* |
895 | * work queue call back to started compression on a file and pages | |
896 | */ | |
897 | static noinline void async_cow_start(struct btrfs_work *work) | |
898 | { | |
899 | struct async_cow *async_cow; | |
900 | int num_added = 0; | |
901 | async_cow = container_of(work, struct async_cow, work); | |
902 | ||
903 | compress_file_range(async_cow->inode, async_cow->locked_page, | |
904 | async_cow->start, async_cow->end, async_cow, | |
905 | &num_added); | |
906 | if (num_added == 0) | |
907 | async_cow->inode = NULL; | |
908 | } | |
909 | ||
910 | /* | |
911 | * work queue call back to submit previously compressed pages | |
912 | */ | |
913 | static noinline void async_cow_submit(struct btrfs_work *work) | |
914 | { | |
915 | struct async_cow *async_cow; | |
916 | struct btrfs_root *root; | |
917 | unsigned long nr_pages; | |
918 | ||
919 | async_cow = container_of(work, struct async_cow, work); | |
920 | ||
921 | root = async_cow->root; | |
922 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> | |
923 | PAGE_CACHE_SHIFT; | |
924 | ||
925 | atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); | |
926 | ||
927 | if (atomic_read(&root->fs_info->async_delalloc_pages) < | |
928 | 5 * 1042 * 1024 && | |
929 | waitqueue_active(&root->fs_info->async_submit_wait)) | |
930 | wake_up(&root->fs_info->async_submit_wait); | |
931 | ||
d397712b | 932 | if (async_cow->inode) |
771ed689 | 933 | submit_compressed_extents(async_cow->inode, async_cow); |
771ed689 | 934 | } |
c8b97818 | 935 | |
771ed689 CM |
936 | static noinline void async_cow_free(struct btrfs_work *work) |
937 | { | |
938 | struct async_cow *async_cow; | |
939 | async_cow = container_of(work, struct async_cow, work); | |
940 | kfree(async_cow); | |
941 | } | |
942 | ||
943 | static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |
944 | u64 start, u64 end, int *page_started, | |
945 | unsigned long *nr_written) | |
946 | { | |
947 | struct async_cow *async_cow; | |
948 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
949 | unsigned long nr_pages; | |
950 | u64 cur_end; | |
951 | int limit = 10 * 1024 * 1042; | |
952 | ||
a3429ab7 CM |
953 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, |
954 | 1, 0, NULL, GFP_NOFS); | |
d397712b | 955 | while (start < end) { |
771ed689 | 956 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
8d413713 | 957 | BUG_ON(!async_cow); |
771ed689 CM |
958 | async_cow->inode = inode; |
959 | async_cow->root = root; | |
960 | async_cow->locked_page = locked_page; | |
961 | async_cow->start = start; | |
962 | ||
6cbff00f | 963 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) |
771ed689 CM |
964 | cur_end = end; |
965 | else | |
966 | cur_end = min(end, start + 512 * 1024 - 1); | |
967 | ||
968 | async_cow->end = cur_end; | |
969 | INIT_LIST_HEAD(&async_cow->extents); | |
970 | ||
971 | async_cow->work.func = async_cow_start; | |
972 | async_cow->work.ordered_func = async_cow_submit; | |
973 | async_cow->work.ordered_free = async_cow_free; | |
974 | async_cow->work.flags = 0; | |
975 | ||
771ed689 CM |
976 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> |
977 | PAGE_CACHE_SHIFT; | |
978 | atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); | |
979 | ||
980 | btrfs_queue_worker(&root->fs_info->delalloc_workers, | |
981 | &async_cow->work); | |
982 | ||
983 | if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { | |
984 | wait_event(root->fs_info->async_submit_wait, | |
985 | (atomic_read(&root->fs_info->async_delalloc_pages) < | |
986 | limit)); | |
987 | } | |
988 | ||
d397712b | 989 | while (atomic_read(&root->fs_info->async_submit_draining) && |
771ed689 CM |
990 | atomic_read(&root->fs_info->async_delalloc_pages)) { |
991 | wait_event(root->fs_info->async_submit_wait, | |
992 | (atomic_read(&root->fs_info->async_delalloc_pages) == | |
993 | 0)); | |
994 | } | |
995 | ||
996 | *nr_written += nr_pages; | |
997 | start = cur_end + 1; | |
998 | } | |
999 | *page_started = 1; | |
1000 | return 0; | |
be20aa9d CM |
1001 | } |
1002 | ||
d397712b | 1003 | static noinline int csum_exist_in_range(struct btrfs_root *root, |
17d217fe YZ |
1004 | u64 bytenr, u64 num_bytes) |
1005 | { | |
1006 | int ret; | |
1007 | struct btrfs_ordered_sum *sums; | |
1008 | LIST_HEAD(list); | |
1009 | ||
07d400a6 YZ |
1010 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, |
1011 | bytenr + num_bytes - 1, &list); | |
17d217fe YZ |
1012 | if (ret == 0 && list_empty(&list)) |
1013 | return 0; | |
1014 | ||
1015 | while (!list_empty(&list)) { | |
1016 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | |
1017 | list_del(&sums->list); | |
1018 | kfree(sums); | |
1019 | } | |
1020 | return 1; | |
1021 | } | |
1022 | ||
d352ac68 CM |
1023 | /* |
1024 | * when nowcow writeback call back. This checks for snapshots or COW copies | |
1025 | * of the extents that exist in the file, and COWs the file as required. | |
1026 | * | |
1027 | * If no cow copies or snapshots exist, we write directly to the existing | |
1028 | * blocks on disk | |
1029 | */ | |
7f366cfe CM |
1030 | static noinline int run_delalloc_nocow(struct inode *inode, |
1031 | struct page *locked_page, | |
771ed689 CM |
1032 | u64 start, u64 end, int *page_started, int force, |
1033 | unsigned long *nr_written) | |
be20aa9d | 1034 | { |
be20aa9d | 1035 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7ea394f1 | 1036 | struct btrfs_trans_handle *trans; |
be20aa9d | 1037 | struct extent_buffer *leaf; |
be20aa9d | 1038 | struct btrfs_path *path; |
80ff3856 | 1039 | struct btrfs_file_extent_item *fi; |
be20aa9d | 1040 | struct btrfs_key found_key; |
80ff3856 YZ |
1041 | u64 cow_start; |
1042 | u64 cur_offset; | |
1043 | u64 extent_end; | |
5d4f98a2 | 1044 | u64 extent_offset; |
80ff3856 YZ |
1045 | u64 disk_bytenr; |
1046 | u64 num_bytes; | |
1047 | int extent_type; | |
1048 | int ret; | |
d899e052 | 1049 | int type; |
80ff3856 YZ |
1050 | int nocow; |
1051 | int check_prev = 1; | |
0cb59c99 | 1052 | bool nolock = false; |
be20aa9d CM |
1053 | |
1054 | path = btrfs_alloc_path(); | |
1055 | BUG_ON(!path); | |
0cb59c99 JB |
1056 | if (root == root->fs_info->tree_root) { |
1057 | nolock = true; | |
1058 | trans = btrfs_join_transaction_nolock(root, 1); | |
1059 | } else { | |
1060 | trans = btrfs_join_transaction(root, 1); | |
1061 | } | |
3612b495 | 1062 | BUG_ON(IS_ERR(trans)); |
be20aa9d | 1063 | |
80ff3856 YZ |
1064 | cow_start = (u64)-1; |
1065 | cur_offset = start; | |
1066 | while (1) { | |
1067 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | |
1068 | cur_offset, 0); | |
1069 | BUG_ON(ret < 0); | |
1070 | if (ret > 0 && path->slots[0] > 0 && check_prev) { | |
1071 | leaf = path->nodes[0]; | |
1072 | btrfs_item_key_to_cpu(leaf, &found_key, | |
1073 | path->slots[0] - 1); | |
1074 | if (found_key.objectid == inode->i_ino && | |
1075 | found_key.type == BTRFS_EXTENT_DATA_KEY) | |
1076 | path->slots[0]--; | |
1077 | } | |
1078 | check_prev = 0; | |
1079 | next_slot: | |
1080 | leaf = path->nodes[0]; | |
1081 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
1082 | ret = btrfs_next_leaf(root, path); | |
1083 | if (ret < 0) | |
1084 | BUG_ON(1); | |
1085 | if (ret > 0) | |
1086 | break; | |
1087 | leaf = path->nodes[0]; | |
1088 | } | |
be20aa9d | 1089 | |
80ff3856 YZ |
1090 | nocow = 0; |
1091 | disk_bytenr = 0; | |
17d217fe | 1092 | num_bytes = 0; |
80ff3856 YZ |
1093 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
1094 | ||
1095 | if (found_key.objectid > inode->i_ino || | |
1096 | found_key.type > BTRFS_EXTENT_DATA_KEY || | |
1097 | found_key.offset > end) | |
1098 | break; | |
1099 | ||
1100 | if (found_key.offset > cur_offset) { | |
1101 | extent_end = found_key.offset; | |
e9061e21 | 1102 | extent_type = 0; |
80ff3856 YZ |
1103 | goto out_check; |
1104 | } | |
1105 | ||
1106 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1107 | struct btrfs_file_extent_item); | |
1108 | extent_type = btrfs_file_extent_type(leaf, fi); | |
1109 | ||
d899e052 YZ |
1110 | if (extent_type == BTRFS_FILE_EXTENT_REG || |
1111 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
80ff3856 | 1112 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
5d4f98a2 | 1113 | extent_offset = btrfs_file_extent_offset(leaf, fi); |
80ff3856 YZ |
1114 | extent_end = found_key.offset + |
1115 | btrfs_file_extent_num_bytes(leaf, fi); | |
1116 | if (extent_end <= start) { | |
1117 | path->slots[0]++; | |
1118 | goto next_slot; | |
1119 | } | |
17d217fe YZ |
1120 | if (disk_bytenr == 0) |
1121 | goto out_check; | |
80ff3856 YZ |
1122 | if (btrfs_file_extent_compression(leaf, fi) || |
1123 | btrfs_file_extent_encryption(leaf, fi) || | |
1124 | btrfs_file_extent_other_encoding(leaf, fi)) | |
1125 | goto out_check; | |
d899e052 YZ |
1126 | if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
1127 | goto out_check; | |
d2fb3437 | 1128 | if (btrfs_extent_readonly(root, disk_bytenr)) |
80ff3856 | 1129 | goto out_check; |
17d217fe | 1130 | if (btrfs_cross_ref_exist(trans, root, inode->i_ino, |
5d4f98a2 YZ |
1131 | found_key.offset - |
1132 | extent_offset, disk_bytenr)) | |
17d217fe | 1133 | goto out_check; |
5d4f98a2 | 1134 | disk_bytenr += extent_offset; |
17d217fe YZ |
1135 | disk_bytenr += cur_offset - found_key.offset; |
1136 | num_bytes = min(end + 1, extent_end) - cur_offset; | |
1137 | /* | |
1138 | * force cow if csum exists in the range. | |
1139 | * this ensure that csum for a given extent are | |
1140 | * either valid or do not exist. | |
1141 | */ | |
1142 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) | |
1143 | goto out_check; | |
80ff3856 YZ |
1144 | nocow = 1; |
1145 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
1146 | extent_end = found_key.offset + | |
1147 | btrfs_file_extent_inline_len(leaf, fi); | |
1148 | extent_end = ALIGN(extent_end, root->sectorsize); | |
1149 | } else { | |
1150 | BUG_ON(1); | |
1151 | } | |
1152 | out_check: | |
1153 | if (extent_end <= start) { | |
1154 | path->slots[0]++; | |
1155 | goto next_slot; | |
1156 | } | |
1157 | if (!nocow) { | |
1158 | if (cow_start == (u64)-1) | |
1159 | cow_start = cur_offset; | |
1160 | cur_offset = extent_end; | |
1161 | if (cur_offset > end) | |
1162 | break; | |
1163 | path->slots[0]++; | |
1164 | goto next_slot; | |
7ea394f1 YZ |
1165 | } |
1166 | ||
1167 | btrfs_release_path(root, path); | |
80ff3856 YZ |
1168 | if (cow_start != (u64)-1) { |
1169 | ret = cow_file_range(inode, locked_page, cow_start, | |
771ed689 CM |
1170 | found_key.offset - 1, page_started, |
1171 | nr_written, 1); | |
80ff3856 YZ |
1172 | BUG_ON(ret); |
1173 | cow_start = (u64)-1; | |
7ea394f1 | 1174 | } |
80ff3856 | 1175 | |
d899e052 YZ |
1176 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
1177 | struct extent_map *em; | |
1178 | struct extent_map_tree *em_tree; | |
1179 | em_tree = &BTRFS_I(inode)->extent_tree; | |
1180 | em = alloc_extent_map(GFP_NOFS); | |
c26a9203 | 1181 | BUG_ON(!em); |
d899e052 | 1182 | em->start = cur_offset; |
445a6944 | 1183 | em->orig_start = em->start; |
d899e052 YZ |
1184 | em->len = num_bytes; |
1185 | em->block_len = num_bytes; | |
1186 | em->block_start = disk_bytenr; | |
1187 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
1188 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | |
1189 | while (1) { | |
890871be | 1190 | write_lock(&em_tree->lock); |
d899e052 | 1191 | ret = add_extent_mapping(em_tree, em); |
890871be | 1192 | write_unlock(&em_tree->lock); |
d899e052 YZ |
1193 | if (ret != -EEXIST) { |
1194 | free_extent_map(em); | |
1195 | break; | |
1196 | } | |
1197 | btrfs_drop_extent_cache(inode, em->start, | |
1198 | em->start + em->len - 1, 0); | |
1199 | } | |
1200 | type = BTRFS_ORDERED_PREALLOC; | |
1201 | } else { | |
1202 | type = BTRFS_ORDERED_NOCOW; | |
1203 | } | |
80ff3856 YZ |
1204 | |
1205 | ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, | |
d899e052 YZ |
1206 | num_bytes, num_bytes, type); |
1207 | BUG_ON(ret); | |
771ed689 | 1208 | |
efa56464 YZ |
1209 | if (root->root_key.objectid == |
1210 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
1211 | ret = btrfs_reloc_clone_csums(inode, cur_offset, | |
1212 | num_bytes); | |
1213 | BUG_ON(ret); | |
1214 | } | |
1215 | ||
d899e052 | 1216 | extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, |
a791e35e CM |
1217 | cur_offset, cur_offset + num_bytes - 1, |
1218 | locked_page, EXTENT_CLEAR_UNLOCK_PAGE | | |
1219 | EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | | |
1220 | EXTENT_SET_PRIVATE2); | |
80ff3856 YZ |
1221 | cur_offset = extent_end; |
1222 | if (cur_offset > end) | |
1223 | break; | |
be20aa9d | 1224 | } |
80ff3856 YZ |
1225 | btrfs_release_path(root, path); |
1226 | ||
1227 | if (cur_offset <= end && cow_start == (u64)-1) | |
1228 | cow_start = cur_offset; | |
1229 | if (cow_start != (u64)-1) { | |
1230 | ret = cow_file_range(inode, locked_page, cow_start, end, | |
771ed689 | 1231 | page_started, nr_written, 1); |
80ff3856 YZ |
1232 | BUG_ON(ret); |
1233 | } | |
1234 | ||
0cb59c99 JB |
1235 | if (nolock) { |
1236 | ret = btrfs_end_transaction_nolock(trans, root); | |
1237 | BUG_ON(ret); | |
1238 | } else { | |
1239 | ret = btrfs_end_transaction(trans, root); | |
1240 | BUG_ON(ret); | |
1241 | } | |
7ea394f1 | 1242 | btrfs_free_path(path); |
80ff3856 | 1243 | return 0; |
be20aa9d CM |
1244 | } |
1245 | ||
d352ac68 CM |
1246 | /* |
1247 | * extent_io.c call back to do delayed allocation processing | |
1248 | */ | |
c8b97818 | 1249 | static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
771ed689 CM |
1250 | u64 start, u64 end, int *page_started, |
1251 | unsigned long *nr_written) | |
be20aa9d | 1252 | { |
be20aa9d | 1253 | int ret; |
7f366cfe | 1254 | struct btrfs_root *root = BTRFS_I(inode)->root; |
a2135011 | 1255 | |
6cbff00f | 1256 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) |
c8b97818 | 1257 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1258 | page_started, 1, nr_written); |
6cbff00f | 1259 | else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) |
d899e052 | 1260 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1261 | page_started, 0, nr_written); |
1e701a32 | 1262 | else if (!btrfs_test_opt(root, COMPRESS) && |
75e7cb7f LB |
1263 | !(BTRFS_I(inode)->force_compress) && |
1264 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) | |
7f366cfe CM |
1265 | ret = cow_file_range(inode, locked_page, start, end, |
1266 | page_started, nr_written, 1); | |
be20aa9d | 1267 | else |
771ed689 | 1268 | ret = cow_file_range_async(inode, locked_page, start, end, |
d397712b | 1269 | page_started, nr_written); |
b888db2b CM |
1270 | return ret; |
1271 | } | |
1272 | ||
9ed74f2d | 1273 | static int btrfs_split_extent_hook(struct inode *inode, |
0ca1f7ce | 1274 | struct extent_state *orig, u64 split) |
9ed74f2d | 1275 | { |
0ca1f7ce | 1276 | /* not delalloc, ignore it */ |
9ed74f2d JB |
1277 | if (!(orig->state & EXTENT_DELALLOC)) |
1278 | return 0; | |
1279 | ||
0ca1f7ce | 1280 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); |
9ed74f2d JB |
1281 | return 0; |
1282 | } | |
1283 | ||
1284 | /* | |
1285 | * extent_io.c merge_extent_hook, used to track merged delayed allocation | |
1286 | * extents so we can keep track of new extents that are just merged onto old | |
1287 | * extents, such as when we are doing sequential writes, so we can properly | |
1288 | * account for the metadata space we'll need. | |
1289 | */ | |
1290 | static int btrfs_merge_extent_hook(struct inode *inode, | |
1291 | struct extent_state *new, | |
1292 | struct extent_state *other) | |
1293 | { | |
9ed74f2d JB |
1294 | /* not delalloc, ignore it */ |
1295 | if (!(other->state & EXTENT_DELALLOC)) | |
1296 | return 0; | |
1297 | ||
0ca1f7ce | 1298 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); |
9ed74f2d JB |
1299 | return 0; |
1300 | } | |
1301 | ||
d352ac68 CM |
1302 | /* |
1303 | * extent_io.c set_bit_hook, used to track delayed allocation | |
1304 | * bytes in this file, and to maintain the list of inodes that | |
1305 | * have pending delalloc work to be done. | |
1306 | */ | |
0ca1f7ce YZ |
1307 | static int btrfs_set_bit_hook(struct inode *inode, |
1308 | struct extent_state *state, int *bits) | |
291d673e | 1309 | { |
9ed74f2d | 1310 | |
75eff68e CM |
1311 | /* |
1312 | * set_bit and clear bit hooks normally require _irqsave/restore | |
1313 | * but in this case, we are only testeing for the DELALLOC | |
1314 | * bit, which is only set or cleared with irqs on | |
1315 | */ | |
0ca1f7ce | 1316 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
291d673e | 1317 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1318 | u64 len = state->end + 1 - state->start; |
0cb59c99 JB |
1319 | int do_list = (root->root_key.objectid != |
1320 | BTRFS_ROOT_TREE_OBJECTID); | |
9ed74f2d | 1321 | |
0ca1f7ce YZ |
1322 | if (*bits & EXTENT_FIRST_DELALLOC) |
1323 | *bits &= ~EXTENT_FIRST_DELALLOC; | |
1324 | else | |
1325 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); | |
287a0ab9 | 1326 | |
75eff68e | 1327 | spin_lock(&root->fs_info->delalloc_lock); |
0ca1f7ce YZ |
1328 | BTRFS_I(inode)->delalloc_bytes += len; |
1329 | root->fs_info->delalloc_bytes += len; | |
0cb59c99 | 1330 | if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
ea8c2819 CM |
1331 | list_add_tail(&BTRFS_I(inode)->delalloc_inodes, |
1332 | &root->fs_info->delalloc_inodes); | |
1333 | } | |
75eff68e | 1334 | spin_unlock(&root->fs_info->delalloc_lock); |
291d673e CM |
1335 | } |
1336 | return 0; | |
1337 | } | |
1338 | ||
d352ac68 CM |
1339 | /* |
1340 | * extent_io.c clear_bit_hook, see set_bit_hook for why | |
1341 | */ | |
9ed74f2d | 1342 | static int btrfs_clear_bit_hook(struct inode *inode, |
0ca1f7ce | 1343 | struct extent_state *state, int *bits) |
291d673e | 1344 | { |
75eff68e CM |
1345 | /* |
1346 | * set_bit and clear bit hooks normally require _irqsave/restore | |
1347 | * but in this case, we are only testeing for the DELALLOC | |
1348 | * bit, which is only set or cleared with irqs on | |
1349 | */ | |
0ca1f7ce | 1350 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
291d673e | 1351 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1352 | u64 len = state->end + 1 - state->start; |
0cb59c99 JB |
1353 | int do_list = (root->root_key.objectid != |
1354 | BTRFS_ROOT_TREE_OBJECTID); | |
bcbfce8a | 1355 | |
0ca1f7ce YZ |
1356 | if (*bits & EXTENT_FIRST_DELALLOC) |
1357 | *bits &= ~EXTENT_FIRST_DELALLOC; | |
1358 | else if (!(*bits & EXTENT_DO_ACCOUNTING)) | |
1359 | atomic_dec(&BTRFS_I(inode)->outstanding_extents); | |
1360 | ||
1361 | if (*bits & EXTENT_DO_ACCOUNTING) | |
1362 | btrfs_delalloc_release_metadata(inode, len); | |
1363 | ||
0cb59c99 JB |
1364 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
1365 | && do_list) | |
0ca1f7ce | 1366 | btrfs_free_reserved_data_space(inode, len); |
9ed74f2d | 1367 | |
75eff68e | 1368 | spin_lock(&root->fs_info->delalloc_lock); |
0ca1f7ce YZ |
1369 | root->fs_info->delalloc_bytes -= len; |
1370 | BTRFS_I(inode)->delalloc_bytes -= len; | |
1371 | ||
0cb59c99 | 1372 | if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && |
ea8c2819 CM |
1373 | !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { |
1374 | list_del_init(&BTRFS_I(inode)->delalloc_inodes); | |
1375 | } | |
75eff68e | 1376 | spin_unlock(&root->fs_info->delalloc_lock); |
291d673e CM |
1377 | } |
1378 | return 0; | |
1379 | } | |
1380 | ||
d352ac68 CM |
1381 | /* |
1382 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | |
1383 | * we don't create bios that span stripes or chunks | |
1384 | */ | |
239b14b3 | 1385 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
c8b97818 CM |
1386 | size_t size, struct bio *bio, |
1387 | unsigned long bio_flags) | |
239b14b3 CM |
1388 | { |
1389 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
1390 | struct btrfs_mapping_tree *map_tree; | |
a62b9401 | 1391 | u64 logical = (u64)bio->bi_sector << 9; |
239b14b3 CM |
1392 | u64 length = 0; |
1393 | u64 map_length; | |
239b14b3 CM |
1394 | int ret; |
1395 | ||
771ed689 CM |
1396 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
1397 | return 0; | |
1398 | ||
f2d8d74d | 1399 | length = bio->bi_size; |
239b14b3 CM |
1400 | map_tree = &root->fs_info->mapping_tree; |
1401 | map_length = length; | |
cea9e445 | 1402 | ret = btrfs_map_block(map_tree, READ, logical, |
f188591e | 1403 | &map_length, NULL, 0); |
cea9e445 | 1404 | |
d397712b | 1405 | if (map_length < length + size) |
239b14b3 | 1406 | return 1; |
411fc6bc | 1407 | return ret; |
239b14b3 CM |
1408 | } |
1409 | ||
d352ac68 CM |
1410 | /* |
1411 | * in order to insert checksums into the metadata in large chunks, | |
1412 | * we wait until bio submission time. All the pages in the bio are | |
1413 | * checksummed and sums are attached onto the ordered extent record. | |
1414 | * | |
1415 | * At IO completion time the cums attached on the ordered extent record | |
1416 | * are inserted into the btree | |
1417 | */ | |
d397712b CM |
1418 | static int __btrfs_submit_bio_start(struct inode *inode, int rw, |
1419 | struct bio *bio, int mirror_num, | |
eaf25d93 CM |
1420 | unsigned long bio_flags, |
1421 | u64 bio_offset) | |
065631f6 | 1422 | { |
065631f6 | 1423 | struct btrfs_root *root = BTRFS_I(inode)->root; |
065631f6 | 1424 | int ret = 0; |
e015640f | 1425 | |
d20f7043 | 1426 | ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); |
44b8bd7e | 1427 | BUG_ON(ret); |
4a69a410 CM |
1428 | return 0; |
1429 | } | |
e015640f | 1430 | |
4a69a410 CM |
1431 | /* |
1432 | * in order to insert checksums into the metadata in large chunks, | |
1433 | * we wait until bio submission time. All the pages in the bio are | |
1434 | * checksummed and sums are attached onto the ordered extent record. | |
1435 | * | |
1436 | * At IO completion time the cums attached on the ordered extent record | |
1437 | * are inserted into the btree | |
1438 | */ | |
b2950863 | 1439 | static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, |
eaf25d93 CM |
1440 | int mirror_num, unsigned long bio_flags, |
1441 | u64 bio_offset) | |
4a69a410 CM |
1442 | { |
1443 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
8b712842 | 1444 | return btrfs_map_bio(root, rw, bio, mirror_num, 1); |
44b8bd7e CM |
1445 | } |
1446 | ||
d352ac68 | 1447 | /* |
cad321ad CM |
1448 | * extent_io.c submission hook. This does the right thing for csum calculation |
1449 | * on write, or reading the csums from the tree before a read | |
d352ac68 | 1450 | */ |
b2950863 | 1451 | static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
eaf25d93 CM |
1452 | int mirror_num, unsigned long bio_flags, |
1453 | u64 bio_offset) | |
44b8bd7e CM |
1454 | { |
1455 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1456 | int ret = 0; | |
19b9bdb0 | 1457 | int skip_sum; |
44b8bd7e | 1458 | |
6cbff00f | 1459 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
cad321ad | 1460 | |
0cb59c99 JB |
1461 | if (root == root->fs_info->tree_root) |
1462 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); | |
1463 | else | |
1464 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | |
e6dcd2dc | 1465 | BUG_ON(ret); |
065631f6 | 1466 | |
7b6d91da | 1467 | if (!(rw & REQ_WRITE)) { |
d20f7043 | 1468 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
c8b97818 CM |
1469 | return btrfs_submit_compressed_read(inode, bio, |
1470 | mirror_num, bio_flags); | |
c2db1073 TI |
1471 | } else if (!skip_sum) { |
1472 | ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); | |
1473 | if (ret) | |
1474 | return ret; | |
1475 | } | |
4d1b5fb4 | 1476 | goto mapit; |
19b9bdb0 | 1477 | } else if (!skip_sum) { |
17d217fe YZ |
1478 | /* csum items have already been cloned */ |
1479 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | |
1480 | goto mapit; | |
19b9bdb0 CM |
1481 | /* we're doing a write, do the async checksumming */ |
1482 | return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, | |
44b8bd7e | 1483 | inode, rw, bio, mirror_num, |
eaf25d93 CM |
1484 | bio_flags, bio_offset, |
1485 | __btrfs_submit_bio_start, | |
4a69a410 | 1486 | __btrfs_submit_bio_done); |
19b9bdb0 CM |
1487 | } |
1488 | ||
0b86a832 | 1489 | mapit: |
8b712842 | 1490 | return btrfs_map_bio(root, rw, bio, mirror_num, 0); |
065631f6 | 1491 | } |
6885f308 | 1492 | |
d352ac68 CM |
1493 | /* |
1494 | * given a list of ordered sums record them in the inode. This happens | |
1495 | * at IO completion time based on sums calculated at bio submission time. | |
1496 | */ | |
ba1da2f4 | 1497 | static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
e6dcd2dc CM |
1498 | struct inode *inode, u64 file_offset, |
1499 | struct list_head *list) | |
1500 | { | |
e6dcd2dc CM |
1501 | struct btrfs_ordered_sum *sum; |
1502 | ||
1503 | btrfs_set_trans_block_group(trans, inode); | |
c6e30871 QF |
1504 | |
1505 | list_for_each_entry(sum, list, list) { | |
d20f7043 CM |
1506 | btrfs_csum_file_blocks(trans, |
1507 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | |
e6dcd2dc CM |
1508 | } |
1509 | return 0; | |
1510 | } | |
1511 | ||
2ac55d41 JB |
1512 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
1513 | struct extent_state **cached_state) | |
ea8c2819 | 1514 | { |
d397712b | 1515 | if ((end & (PAGE_CACHE_SIZE - 1)) == 0) |
771ed689 | 1516 | WARN_ON(1); |
ea8c2819 | 1517 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
2ac55d41 | 1518 | cached_state, GFP_NOFS); |
ea8c2819 CM |
1519 | } |
1520 | ||
d352ac68 | 1521 | /* see btrfs_writepage_start_hook for details on why this is required */ |
247e743c CM |
1522 | struct btrfs_writepage_fixup { |
1523 | struct page *page; | |
1524 | struct btrfs_work work; | |
1525 | }; | |
1526 | ||
b2950863 | 1527 | static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
247e743c CM |
1528 | { |
1529 | struct btrfs_writepage_fixup *fixup; | |
1530 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 1531 | struct extent_state *cached_state = NULL; |
247e743c CM |
1532 | struct page *page; |
1533 | struct inode *inode; | |
1534 | u64 page_start; | |
1535 | u64 page_end; | |
1536 | ||
1537 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | |
1538 | page = fixup->page; | |
4a096752 | 1539 | again: |
247e743c CM |
1540 | lock_page(page); |
1541 | if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { | |
1542 | ClearPageChecked(page); | |
1543 | goto out_page; | |
1544 | } | |
1545 | ||
1546 | inode = page->mapping->host; | |
1547 | page_start = page_offset(page); | |
1548 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; | |
1549 | ||
2ac55d41 JB |
1550 | lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, |
1551 | &cached_state, GFP_NOFS); | |
4a096752 CM |
1552 | |
1553 | /* already ordered? We're done */ | |
8b62b72b | 1554 | if (PagePrivate2(page)) |
247e743c | 1555 | goto out; |
4a096752 CM |
1556 | |
1557 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
1558 | if (ordered) { | |
2ac55d41 JB |
1559 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
1560 | page_end, &cached_state, GFP_NOFS); | |
4a096752 CM |
1561 | unlock_page(page); |
1562 | btrfs_start_ordered_extent(inode, ordered, 1); | |
1563 | goto again; | |
1564 | } | |
247e743c | 1565 | |
0ca1f7ce | 1566 | BUG(); |
2ac55d41 | 1567 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
247e743c CM |
1568 | ClearPageChecked(page); |
1569 | out: | |
2ac55d41 JB |
1570 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1571 | &cached_state, GFP_NOFS); | |
247e743c CM |
1572 | out_page: |
1573 | unlock_page(page); | |
1574 | page_cache_release(page); | |
b897abec | 1575 | kfree(fixup); |
247e743c CM |
1576 | } |
1577 | ||
1578 | /* | |
1579 | * There are a few paths in the higher layers of the kernel that directly | |
1580 | * set the page dirty bit without asking the filesystem if it is a | |
1581 | * good idea. This causes problems because we want to make sure COW | |
1582 | * properly happens and the data=ordered rules are followed. | |
1583 | * | |
c8b97818 | 1584 | * In our case any range that doesn't have the ORDERED bit set |
247e743c CM |
1585 | * hasn't been properly setup for IO. We kick off an async process |
1586 | * to fix it up. The async helper will wait for ordered extents, set | |
1587 | * the delalloc bit and make it safe to write the page. | |
1588 | */ | |
b2950863 | 1589 | static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
247e743c CM |
1590 | { |
1591 | struct inode *inode = page->mapping->host; | |
1592 | struct btrfs_writepage_fixup *fixup; | |
1593 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
247e743c | 1594 | |
8b62b72b CM |
1595 | /* this page is properly in the ordered list */ |
1596 | if (TestClearPagePrivate2(page)) | |
247e743c CM |
1597 | return 0; |
1598 | ||
1599 | if (PageChecked(page)) | |
1600 | return -EAGAIN; | |
1601 | ||
1602 | fixup = kzalloc(sizeof(*fixup), GFP_NOFS); | |
1603 | if (!fixup) | |
1604 | return -EAGAIN; | |
f421950f | 1605 | |
247e743c CM |
1606 | SetPageChecked(page); |
1607 | page_cache_get(page); | |
1608 | fixup->work.func = btrfs_writepage_fixup_worker; | |
1609 | fixup->page = page; | |
1610 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | |
1611 | return -EAGAIN; | |
1612 | } | |
1613 | ||
d899e052 YZ |
1614 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
1615 | struct inode *inode, u64 file_pos, | |
1616 | u64 disk_bytenr, u64 disk_num_bytes, | |
1617 | u64 num_bytes, u64 ram_bytes, | |
1618 | u8 compression, u8 encryption, | |
1619 | u16 other_encoding, int extent_type) | |
1620 | { | |
1621 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1622 | struct btrfs_file_extent_item *fi; | |
1623 | struct btrfs_path *path; | |
1624 | struct extent_buffer *leaf; | |
1625 | struct btrfs_key ins; | |
1626 | u64 hint; | |
1627 | int ret; | |
1628 | ||
1629 | path = btrfs_alloc_path(); | |
1630 | BUG_ON(!path); | |
1631 | ||
b9473439 | 1632 | path->leave_spinning = 1; |
a1ed835e CM |
1633 | |
1634 | /* | |
1635 | * we may be replacing one extent in the tree with another. | |
1636 | * The new extent is pinned in the extent map, and we don't want | |
1637 | * to drop it from the cache until it is completely in the btree. | |
1638 | * | |
1639 | * So, tell btrfs_drop_extents to leave this extent in the cache. | |
1640 | * the caller is expected to unpin it and allow it to be merged | |
1641 | * with the others. | |
1642 | */ | |
920bbbfb YZ |
1643 | ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, |
1644 | &hint, 0); | |
d899e052 YZ |
1645 | BUG_ON(ret); |
1646 | ||
1647 | ins.objectid = inode->i_ino; | |
1648 | ins.offset = file_pos; | |
1649 | ins.type = BTRFS_EXTENT_DATA_KEY; | |
1650 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); | |
1651 | BUG_ON(ret); | |
1652 | leaf = path->nodes[0]; | |
1653 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1654 | struct btrfs_file_extent_item); | |
1655 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | |
1656 | btrfs_set_file_extent_type(leaf, fi, extent_type); | |
1657 | btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); | |
1658 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); | |
1659 | btrfs_set_file_extent_offset(leaf, fi, 0); | |
1660 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | |
1661 | btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); | |
1662 | btrfs_set_file_extent_compression(leaf, fi, compression); | |
1663 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | |
1664 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | |
b9473439 CM |
1665 | |
1666 | btrfs_unlock_up_safe(path, 1); | |
1667 | btrfs_set_lock_blocking(leaf); | |
1668 | ||
d899e052 YZ |
1669 | btrfs_mark_buffer_dirty(leaf); |
1670 | ||
1671 | inode_add_bytes(inode, num_bytes); | |
d899e052 YZ |
1672 | |
1673 | ins.objectid = disk_bytenr; | |
1674 | ins.offset = disk_num_bytes; | |
1675 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
5d4f98a2 YZ |
1676 | ret = btrfs_alloc_reserved_file_extent(trans, root, |
1677 | root->root_key.objectid, | |
1678 | inode->i_ino, file_pos, &ins); | |
d899e052 | 1679 | BUG_ON(ret); |
d899e052 | 1680 | btrfs_free_path(path); |
b9473439 | 1681 | |
d899e052 YZ |
1682 | return 0; |
1683 | } | |
1684 | ||
5d13a98f CM |
1685 | /* |
1686 | * helper function for btrfs_finish_ordered_io, this | |
1687 | * just reads in some of the csum leaves to prime them into ram | |
1688 | * before we start the transaction. It limits the amount of btree | |
1689 | * reads required while inside the transaction. | |
1690 | */ | |
d352ac68 CM |
1691 | /* as ordered data IO finishes, this gets called so we can finish |
1692 | * an ordered extent if the range of bytes in the file it covers are | |
1693 | * fully written. | |
1694 | */ | |
211f90e6 | 1695 | static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) |
e6dcd2dc | 1696 | { |
e6dcd2dc | 1697 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1698 | struct btrfs_trans_handle *trans = NULL; |
5d13a98f | 1699 | struct btrfs_ordered_extent *ordered_extent = NULL; |
e6dcd2dc | 1700 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
2ac55d41 | 1701 | struct extent_state *cached_state = NULL; |
261507a0 | 1702 | int compress_type = 0; |
e6dcd2dc | 1703 | int ret; |
0cb59c99 | 1704 | bool nolock = false; |
e6dcd2dc | 1705 | |
5a1a3df1 JB |
1706 | ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
1707 | end - start + 1); | |
ba1da2f4 | 1708 | if (!ret) |
e6dcd2dc | 1709 | return 0; |
e6dcd2dc | 1710 | BUG_ON(!ordered_extent); |
efd049fb | 1711 | |
0cb59c99 JB |
1712 | nolock = (root == root->fs_info->tree_root); |
1713 | ||
c2167754 YZ |
1714 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
1715 | BUG_ON(!list_empty(&ordered_extent->list)); | |
1716 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); | |
1717 | if (!ret) { | |
0cb59c99 JB |
1718 | if (nolock) |
1719 | trans = btrfs_join_transaction_nolock(root, 1); | |
1720 | else | |
1721 | trans = btrfs_join_transaction(root, 1); | |
3612b495 | 1722 | BUG_ON(IS_ERR(trans)); |
0ca1f7ce YZ |
1723 | btrfs_set_trans_block_group(trans, inode); |
1724 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
c2167754 YZ |
1725 | ret = btrfs_update_inode(trans, root, inode); |
1726 | BUG_ON(ret); | |
c2167754 YZ |
1727 | } |
1728 | goto out; | |
1729 | } | |
e6dcd2dc | 1730 | |
2ac55d41 JB |
1731 | lock_extent_bits(io_tree, ordered_extent->file_offset, |
1732 | ordered_extent->file_offset + ordered_extent->len - 1, | |
1733 | 0, &cached_state, GFP_NOFS); | |
e6dcd2dc | 1734 | |
0cb59c99 JB |
1735 | if (nolock) |
1736 | trans = btrfs_join_transaction_nolock(root, 1); | |
1737 | else | |
1738 | trans = btrfs_join_transaction(root, 1); | |
3612b495 | 1739 | BUG_ON(IS_ERR(trans)); |
0ca1f7ce YZ |
1740 | btrfs_set_trans_block_group(trans, inode); |
1741 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
c2167754 | 1742 | |
c8b97818 | 1743 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
261507a0 | 1744 | compress_type = ordered_extent->compress_type; |
d899e052 | 1745 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
261507a0 | 1746 | BUG_ON(compress_type); |
920bbbfb | 1747 | ret = btrfs_mark_extent_written(trans, inode, |
d899e052 YZ |
1748 | ordered_extent->file_offset, |
1749 | ordered_extent->file_offset + | |
1750 | ordered_extent->len); | |
1751 | BUG_ON(ret); | |
1752 | } else { | |
0af3d00b | 1753 | BUG_ON(root == root->fs_info->tree_root); |
d899e052 YZ |
1754 | ret = insert_reserved_file_extent(trans, inode, |
1755 | ordered_extent->file_offset, | |
1756 | ordered_extent->start, | |
1757 | ordered_extent->disk_len, | |
1758 | ordered_extent->len, | |
1759 | ordered_extent->len, | |
261507a0 | 1760 | compress_type, 0, 0, |
d899e052 | 1761 | BTRFS_FILE_EXTENT_REG); |
a1ed835e CM |
1762 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
1763 | ordered_extent->file_offset, | |
1764 | ordered_extent->len); | |
d899e052 YZ |
1765 | BUG_ON(ret); |
1766 | } | |
2ac55d41 JB |
1767 | unlock_extent_cached(io_tree, ordered_extent->file_offset, |
1768 | ordered_extent->file_offset + | |
1769 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | |
1770 | ||
e6dcd2dc CM |
1771 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
1772 | &ordered_extent->list); | |
1773 | ||
1ef30be1 JB |
1774 | ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
1775 | if (!ret) { | |
1776 | ret = btrfs_update_inode(trans, root, inode); | |
1777 | BUG_ON(ret); | |
1778 | } | |
1779 | ret = 0; | |
c2167754 | 1780 | out: |
0cb59c99 JB |
1781 | if (nolock) { |
1782 | if (trans) | |
1783 | btrfs_end_transaction_nolock(trans, root); | |
1784 | } else { | |
1785 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); | |
1786 | if (trans) | |
1787 | btrfs_end_transaction(trans, root); | |
1788 | } | |
1789 | ||
e6dcd2dc CM |
1790 | /* once for us */ |
1791 | btrfs_put_ordered_extent(ordered_extent); | |
1792 | /* once for the tree */ | |
1793 | btrfs_put_ordered_extent(ordered_extent); | |
1794 | ||
e6dcd2dc CM |
1795 | return 0; |
1796 | } | |
1797 | ||
b2950863 | 1798 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
211f90e6 CM |
1799 | struct extent_state *state, int uptodate) |
1800 | { | |
1abe9b8a | 1801 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
1802 | ||
8b62b72b | 1803 | ClearPagePrivate2(page); |
211f90e6 CM |
1804 | return btrfs_finish_ordered_io(page->mapping->host, start, end); |
1805 | } | |
1806 | ||
d352ac68 CM |
1807 | /* |
1808 | * When IO fails, either with EIO or csum verification fails, we | |
1809 | * try other mirrors that might have a good copy of the data. This | |
1810 | * io_failure_record is used to record state as we go through all the | |
1811 | * mirrors. If another mirror has good data, the page is set up to date | |
1812 | * and things continue. If a good mirror can't be found, the original | |
1813 | * bio end_io callback is called to indicate things have failed. | |
1814 | */ | |
7e38326f CM |
1815 | struct io_failure_record { |
1816 | struct page *page; | |
1817 | u64 start; | |
1818 | u64 len; | |
1819 | u64 logical; | |
d20f7043 | 1820 | unsigned long bio_flags; |
7e38326f CM |
1821 | int last_mirror; |
1822 | }; | |
1823 | ||
b2950863 | 1824 | static int btrfs_io_failed_hook(struct bio *failed_bio, |
1259ab75 CM |
1825 | struct page *page, u64 start, u64 end, |
1826 | struct extent_state *state) | |
7e38326f CM |
1827 | { |
1828 | struct io_failure_record *failrec = NULL; | |
1829 | u64 private; | |
1830 | struct extent_map *em; | |
1831 | struct inode *inode = page->mapping->host; | |
1832 | struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; | |
3b951516 | 1833 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
7e38326f CM |
1834 | struct bio *bio; |
1835 | int num_copies; | |
1836 | int ret; | |
1259ab75 | 1837 | int rw; |
7e38326f CM |
1838 | u64 logical; |
1839 | ||
1840 | ret = get_state_private(failure_tree, start, &private); | |
1841 | if (ret) { | |
7e38326f CM |
1842 | failrec = kmalloc(sizeof(*failrec), GFP_NOFS); |
1843 | if (!failrec) | |
1844 | return -ENOMEM; | |
1845 | failrec->start = start; | |
1846 | failrec->len = end - start + 1; | |
1847 | failrec->last_mirror = 0; | |
d20f7043 | 1848 | failrec->bio_flags = 0; |
7e38326f | 1849 | |
890871be | 1850 | read_lock(&em_tree->lock); |
3b951516 CM |
1851 | em = lookup_extent_mapping(em_tree, start, failrec->len); |
1852 | if (em->start > start || em->start + em->len < start) { | |
1853 | free_extent_map(em); | |
1854 | em = NULL; | |
1855 | } | |
890871be | 1856 | read_unlock(&em_tree->lock); |
7e38326f CM |
1857 | |
1858 | if (!em || IS_ERR(em)) { | |
1859 | kfree(failrec); | |
1860 | return -EIO; | |
1861 | } | |
1862 | logical = start - em->start; | |
1863 | logical = em->block_start + logical; | |
d20f7043 CM |
1864 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
1865 | logical = em->block_start; | |
1866 | failrec->bio_flags = EXTENT_BIO_COMPRESSED; | |
261507a0 LZ |
1867 | extent_set_compress_type(&failrec->bio_flags, |
1868 | em->compress_type); | |
d20f7043 | 1869 | } |
7e38326f CM |
1870 | failrec->logical = logical; |
1871 | free_extent_map(em); | |
1872 | set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | | |
1873 | EXTENT_DIRTY, GFP_NOFS); | |
587f7704 CM |
1874 | set_state_private(failure_tree, start, |
1875 | (u64)(unsigned long)failrec); | |
7e38326f | 1876 | } else { |
587f7704 | 1877 | failrec = (struct io_failure_record *)(unsigned long)private; |
7e38326f CM |
1878 | } |
1879 | num_copies = btrfs_num_copies( | |
1880 | &BTRFS_I(inode)->root->fs_info->mapping_tree, | |
1881 | failrec->logical, failrec->len); | |
1882 | failrec->last_mirror++; | |
1883 | if (!state) { | |
cad321ad | 1884 | spin_lock(&BTRFS_I(inode)->io_tree.lock); |
7e38326f CM |
1885 | state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, |
1886 | failrec->start, | |
1887 | EXTENT_LOCKED); | |
1888 | if (state && state->start != failrec->start) | |
1889 | state = NULL; | |
cad321ad | 1890 | spin_unlock(&BTRFS_I(inode)->io_tree.lock); |
7e38326f CM |
1891 | } |
1892 | if (!state || failrec->last_mirror > num_copies) { | |
1893 | set_state_private(failure_tree, failrec->start, 0); | |
1894 | clear_extent_bits(failure_tree, failrec->start, | |
1895 | failrec->start + failrec->len - 1, | |
1896 | EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); | |
1897 | kfree(failrec); | |
1898 | return -EIO; | |
1899 | } | |
1900 | bio = bio_alloc(GFP_NOFS, 1); | |
1901 | bio->bi_private = state; | |
1902 | bio->bi_end_io = failed_bio->bi_end_io; | |
1903 | bio->bi_sector = failrec->logical >> 9; | |
1904 | bio->bi_bdev = failed_bio->bi_bdev; | |
e1c4b745 | 1905 | bio->bi_size = 0; |
d20f7043 | 1906 | |
7e38326f | 1907 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); |
7b6d91da | 1908 | if (failed_bio->bi_rw & REQ_WRITE) |
1259ab75 CM |
1909 | rw = WRITE; |
1910 | else | |
1911 | rw = READ; | |
1912 | ||
c2db1073 | 1913 | ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, |
c8b97818 | 1914 | failrec->last_mirror, |
eaf25d93 | 1915 | failrec->bio_flags, 0); |
c2db1073 | 1916 | return ret; |
1259ab75 CM |
1917 | } |
1918 | ||
d352ac68 CM |
1919 | /* |
1920 | * each time an IO finishes, we do a fast check in the IO failure tree | |
1921 | * to see if we need to process or clean up an io_failure_record | |
1922 | */ | |
b2950863 | 1923 | static int btrfs_clean_io_failures(struct inode *inode, u64 start) |
1259ab75 CM |
1924 | { |
1925 | u64 private; | |
1926 | u64 private_failure; | |
1927 | struct io_failure_record *failure; | |
1928 | int ret; | |
1929 | ||
1930 | private = 0; | |
1931 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, | |
ec29ed5b | 1932 | (u64)-1, 1, EXTENT_DIRTY, 0)) { |
1259ab75 CM |
1933 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, |
1934 | start, &private_failure); | |
1935 | if (ret == 0) { | |
1936 | failure = (struct io_failure_record *)(unsigned long) | |
1937 | private_failure; | |
1938 | set_state_private(&BTRFS_I(inode)->io_failure_tree, | |
1939 | failure->start, 0); | |
1940 | clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, | |
1941 | failure->start, | |
1942 | failure->start + failure->len - 1, | |
1943 | EXTENT_DIRTY | EXTENT_LOCKED, | |
1944 | GFP_NOFS); | |
1945 | kfree(failure); | |
1946 | } | |
1947 | } | |
7e38326f CM |
1948 | return 0; |
1949 | } | |
1950 | ||
d352ac68 CM |
1951 | /* |
1952 | * when reads are done, we need to check csums to verify the data is correct | |
1953 | * if there's a match, we allow the bio to finish. If not, we go through | |
1954 | * the io_failure_record routines to find good copies | |
1955 | */ | |
b2950863 | 1956 | static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, |
70dec807 | 1957 | struct extent_state *state) |
07157aac | 1958 | { |
35ebb934 | 1959 | size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); |
07157aac | 1960 | struct inode *inode = page->mapping->host; |
d1310b2e | 1961 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
07157aac | 1962 | char *kaddr; |
aadfeb6e | 1963 | u64 private = ~(u32)0; |
07157aac | 1964 | int ret; |
ff79f819 CM |
1965 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1966 | u32 csum = ~(u32)0; | |
d1310b2e | 1967 | |
d20f7043 CM |
1968 | if (PageChecked(page)) { |
1969 | ClearPageChecked(page); | |
1970 | goto good; | |
1971 | } | |
6cbff00f CH |
1972 | |
1973 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) | |
17d217fe YZ |
1974 | return 0; |
1975 | ||
1976 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | |
9655d298 | 1977 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
17d217fe YZ |
1978 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, |
1979 | GFP_NOFS); | |
b6cda9bc | 1980 | return 0; |
17d217fe | 1981 | } |
d20f7043 | 1982 | |
c2e639f0 | 1983 | if (state && state->start == start) { |
70dec807 CM |
1984 | private = state->private; |
1985 | ret = 0; | |
1986 | } else { | |
1987 | ret = get_state_private(io_tree, start, &private); | |
1988 | } | |
9ab86c8e | 1989 | kaddr = kmap_atomic(page, KM_USER0); |
d397712b | 1990 | if (ret) |
07157aac | 1991 | goto zeroit; |
d397712b | 1992 | |
ff79f819 CM |
1993 | csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); |
1994 | btrfs_csum_final(csum, (char *)&csum); | |
d397712b | 1995 | if (csum != private) |
07157aac | 1996 | goto zeroit; |
d397712b | 1997 | |
9ab86c8e | 1998 | kunmap_atomic(kaddr, KM_USER0); |
d20f7043 | 1999 | good: |
7e38326f CM |
2000 | /* if the io failure tree for this inode is non-empty, |
2001 | * check to see if we've recovered from a failed IO | |
2002 | */ | |
1259ab75 | 2003 | btrfs_clean_io_failures(inode, start); |
07157aac CM |
2004 | return 0; |
2005 | ||
2006 | zeroit: | |
193f284d CM |
2007 | if (printk_ratelimit()) { |
2008 | printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " | |
2009 | "private %llu\n", page->mapping->host->i_ino, | |
2010 | (unsigned long long)start, csum, | |
2011 | (unsigned long long)private); | |
2012 | } | |
db94535d CM |
2013 | memset(kaddr + offset, 1, end - start + 1); |
2014 | flush_dcache_page(page); | |
9ab86c8e | 2015 | kunmap_atomic(kaddr, KM_USER0); |
3b951516 CM |
2016 | if (private == 0) |
2017 | return 0; | |
7e38326f | 2018 | return -EIO; |
07157aac | 2019 | } |
b888db2b | 2020 | |
24bbcf04 YZ |
2021 | struct delayed_iput { |
2022 | struct list_head list; | |
2023 | struct inode *inode; | |
2024 | }; | |
2025 | ||
2026 | void btrfs_add_delayed_iput(struct inode *inode) | |
2027 | { | |
2028 | struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; | |
2029 | struct delayed_iput *delayed; | |
2030 | ||
2031 | if (atomic_add_unless(&inode->i_count, -1, 1)) | |
2032 | return; | |
2033 | ||
2034 | delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); | |
2035 | delayed->inode = inode; | |
2036 | ||
2037 | spin_lock(&fs_info->delayed_iput_lock); | |
2038 | list_add_tail(&delayed->list, &fs_info->delayed_iputs); | |
2039 | spin_unlock(&fs_info->delayed_iput_lock); | |
2040 | } | |
2041 | ||
2042 | void btrfs_run_delayed_iputs(struct btrfs_root *root) | |
2043 | { | |
2044 | LIST_HEAD(list); | |
2045 | struct btrfs_fs_info *fs_info = root->fs_info; | |
2046 | struct delayed_iput *delayed; | |
2047 | int empty; | |
2048 | ||
2049 | spin_lock(&fs_info->delayed_iput_lock); | |
2050 | empty = list_empty(&fs_info->delayed_iputs); | |
2051 | spin_unlock(&fs_info->delayed_iput_lock); | |
2052 | if (empty) | |
2053 | return; | |
2054 | ||
2055 | down_read(&root->fs_info->cleanup_work_sem); | |
2056 | spin_lock(&fs_info->delayed_iput_lock); | |
2057 | list_splice_init(&fs_info->delayed_iputs, &list); | |
2058 | spin_unlock(&fs_info->delayed_iput_lock); | |
2059 | ||
2060 | while (!list_empty(&list)) { | |
2061 | delayed = list_entry(list.next, struct delayed_iput, list); | |
2062 | list_del(&delayed->list); | |
2063 | iput(delayed->inode); | |
2064 | kfree(delayed); | |
2065 | } | |
2066 | up_read(&root->fs_info->cleanup_work_sem); | |
2067 | } | |
2068 | ||
d68fc57b YZ |
2069 | /* |
2070 | * calculate extra metadata reservation when snapshotting a subvolume | |
2071 | * contains orphan files. | |
2072 | */ | |
2073 | void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, | |
2074 | struct btrfs_pending_snapshot *pending, | |
2075 | u64 *bytes_to_reserve) | |
2076 | { | |
2077 | struct btrfs_root *root; | |
2078 | struct btrfs_block_rsv *block_rsv; | |
2079 | u64 num_bytes; | |
2080 | int index; | |
2081 | ||
2082 | root = pending->root; | |
2083 | if (!root->orphan_block_rsv || list_empty(&root->orphan_list)) | |
2084 | return; | |
2085 | ||
2086 | block_rsv = root->orphan_block_rsv; | |
2087 | ||
2088 | /* orphan block reservation for the snapshot */ | |
2089 | num_bytes = block_rsv->size; | |
2090 | ||
2091 | /* | |
2092 | * after the snapshot is created, COWing tree blocks may use more | |
2093 | * space than it frees. So we should make sure there is enough | |
2094 | * reserved space. | |
2095 | */ | |
2096 | index = trans->transid & 0x1; | |
2097 | if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) { | |
2098 | num_bytes += block_rsv->size - | |
2099 | (block_rsv->reserved + block_rsv->freed[index]); | |
2100 | } | |
2101 | ||
2102 | *bytes_to_reserve += num_bytes; | |
2103 | } | |
2104 | ||
2105 | void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans, | |
2106 | struct btrfs_pending_snapshot *pending) | |
2107 | { | |
2108 | struct btrfs_root *root = pending->root; | |
2109 | struct btrfs_root *snap = pending->snap; | |
2110 | struct btrfs_block_rsv *block_rsv; | |
2111 | u64 num_bytes; | |
2112 | int index; | |
2113 | int ret; | |
2114 | ||
2115 | if (!root->orphan_block_rsv || list_empty(&root->orphan_list)) | |
2116 | return; | |
2117 | ||
2118 | /* refill source subvolume's orphan block reservation */ | |
2119 | block_rsv = root->orphan_block_rsv; | |
2120 | index = trans->transid & 0x1; | |
2121 | if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) { | |
2122 | num_bytes = block_rsv->size - | |
2123 | (block_rsv->reserved + block_rsv->freed[index]); | |
2124 | ret = btrfs_block_rsv_migrate(&pending->block_rsv, | |
2125 | root->orphan_block_rsv, | |
2126 | num_bytes); | |
2127 | BUG_ON(ret); | |
2128 | } | |
2129 | ||
2130 | /* setup orphan block reservation for the snapshot */ | |
2131 | block_rsv = btrfs_alloc_block_rsv(snap); | |
2132 | BUG_ON(!block_rsv); | |
2133 | ||
2134 | btrfs_add_durable_block_rsv(root->fs_info, block_rsv); | |
2135 | snap->orphan_block_rsv = block_rsv; | |
2136 | ||
2137 | num_bytes = root->orphan_block_rsv->size; | |
2138 | ret = btrfs_block_rsv_migrate(&pending->block_rsv, | |
2139 | block_rsv, num_bytes); | |
2140 | BUG_ON(ret); | |
2141 | ||
2142 | #if 0 | |
2143 | /* insert orphan item for the snapshot */ | |
2144 | WARN_ON(!root->orphan_item_inserted); | |
2145 | ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, | |
2146 | snap->root_key.objectid); | |
2147 | BUG_ON(ret); | |
2148 | snap->orphan_item_inserted = 1; | |
2149 | #endif | |
2150 | } | |
2151 | ||
2152 | enum btrfs_orphan_cleanup_state { | |
2153 | ORPHAN_CLEANUP_STARTED = 1, | |
2154 | ORPHAN_CLEANUP_DONE = 2, | |
2155 | }; | |
2156 | ||
2157 | /* | |
2158 | * This is called in transaction commmit time. If there are no orphan | |
2159 | * files in the subvolume, it removes orphan item and frees block_rsv | |
2160 | * structure. | |
2161 | */ | |
2162 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, | |
2163 | struct btrfs_root *root) | |
2164 | { | |
2165 | int ret; | |
2166 | ||
2167 | if (!list_empty(&root->orphan_list) || | |
2168 | root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) | |
2169 | return; | |
2170 | ||
2171 | if (root->orphan_item_inserted && | |
2172 | btrfs_root_refs(&root->root_item) > 0) { | |
2173 | ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, | |
2174 | root->root_key.objectid); | |
2175 | BUG_ON(ret); | |
2176 | root->orphan_item_inserted = 0; | |
2177 | } | |
2178 | ||
2179 | if (root->orphan_block_rsv) { | |
2180 | WARN_ON(root->orphan_block_rsv->size > 0); | |
2181 | btrfs_free_block_rsv(root, root->orphan_block_rsv); | |
2182 | root->orphan_block_rsv = NULL; | |
2183 | } | |
2184 | } | |
2185 | ||
7b128766 JB |
2186 | /* |
2187 | * This creates an orphan entry for the given inode in case something goes | |
2188 | * wrong in the middle of an unlink/truncate. | |
d68fc57b YZ |
2189 | * |
2190 | * NOTE: caller of this function should reserve 5 units of metadata for | |
2191 | * this function. | |
7b128766 JB |
2192 | */ |
2193 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |
2194 | { | |
2195 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d68fc57b YZ |
2196 | struct btrfs_block_rsv *block_rsv = NULL; |
2197 | int reserve = 0; | |
2198 | int insert = 0; | |
2199 | int ret; | |
7b128766 | 2200 | |
d68fc57b YZ |
2201 | if (!root->orphan_block_rsv) { |
2202 | block_rsv = btrfs_alloc_block_rsv(root); | |
2203 | BUG_ON(!block_rsv); | |
2204 | } | |
7b128766 | 2205 | |
d68fc57b YZ |
2206 | spin_lock(&root->orphan_lock); |
2207 | if (!root->orphan_block_rsv) { | |
2208 | root->orphan_block_rsv = block_rsv; | |
2209 | } else if (block_rsv) { | |
2210 | btrfs_free_block_rsv(root, block_rsv); | |
2211 | block_rsv = NULL; | |
7b128766 | 2212 | } |
7b128766 | 2213 | |
d68fc57b YZ |
2214 | if (list_empty(&BTRFS_I(inode)->i_orphan)) { |
2215 | list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); | |
2216 | #if 0 | |
2217 | /* | |
2218 | * For proper ENOSPC handling, we should do orphan | |
2219 | * cleanup when mounting. But this introduces backward | |
2220 | * compatibility issue. | |
2221 | */ | |
2222 | if (!xchg(&root->orphan_item_inserted, 1)) | |
2223 | insert = 2; | |
2224 | else | |
2225 | insert = 1; | |
2226 | #endif | |
2227 | insert = 1; | |
7b128766 JB |
2228 | } |
2229 | ||
d68fc57b YZ |
2230 | if (!BTRFS_I(inode)->orphan_meta_reserved) { |
2231 | BTRFS_I(inode)->orphan_meta_reserved = 1; | |
2232 | reserve = 1; | |
2233 | } | |
2234 | spin_unlock(&root->orphan_lock); | |
7b128766 | 2235 | |
d68fc57b YZ |
2236 | if (block_rsv) |
2237 | btrfs_add_durable_block_rsv(root->fs_info, block_rsv); | |
7b128766 | 2238 | |
d68fc57b YZ |
2239 | /* grab metadata reservation from transaction handle */ |
2240 | if (reserve) { | |
2241 | ret = btrfs_orphan_reserve_metadata(trans, inode); | |
2242 | BUG_ON(ret); | |
2243 | } | |
7b128766 | 2244 | |
d68fc57b YZ |
2245 | /* insert an orphan item to track this unlinked/truncated file */ |
2246 | if (insert >= 1) { | |
2247 | ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); | |
2248 | BUG_ON(ret); | |
2249 | } | |
2250 | ||
2251 | /* insert an orphan item to track subvolume contains orphan files */ | |
2252 | if (insert >= 2) { | |
2253 | ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, | |
2254 | root->root_key.objectid); | |
2255 | BUG_ON(ret); | |
2256 | } | |
2257 | return 0; | |
7b128766 JB |
2258 | } |
2259 | ||
2260 | /* | |
2261 | * We have done the truncate/delete so we can go ahead and remove the orphan | |
2262 | * item for this particular inode. | |
2263 | */ | |
2264 | int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) | |
2265 | { | |
2266 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d68fc57b YZ |
2267 | int delete_item = 0; |
2268 | int release_rsv = 0; | |
7b128766 JB |
2269 | int ret = 0; |
2270 | ||
d68fc57b YZ |
2271 | spin_lock(&root->orphan_lock); |
2272 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { | |
2273 | list_del_init(&BTRFS_I(inode)->i_orphan); | |
2274 | delete_item = 1; | |
7b128766 JB |
2275 | } |
2276 | ||
d68fc57b YZ |
2277 | if (BTRFS_I(inode)->orphan_meta_reserved) { |
2278 | BTRFS_I(inode)->orphan_meta_reserved = 0; | |
2279 | release_rsv = 1; | |
7b128766 | 2280 | } |
d68fc57b | 2281 | spin_unlock(&root->orphan_lock); |
7b128766 | 2282 | |
d68fc57b YZ |
2283 | if (trans && delete_item) { |
2284 | ret = btrfs_del_orphan_item(trans, root, inode->i_ino); | |
2285 | BUG_ON(ret); | |
2286 | } | |
7b128766 | 2287 | |
d68fc57b YZ |
2288 | if (release_rsv) |
2289 | btrfs_orphan_release_metadata(inode); | |
7b128766 | 2290 | |
d68fc57b | 2291 | return 0; |
7b128766 JB |
2292 | } |
2293 | ||
2294 | /* | |
2295 | * this cleans up any orphans that may be left on the list from the last use | |
2296 | * of this root. | |
2297 | */ | |
66b4ffd1 | 2298 | int btrfs_orphan_cleanup(struct btrfs_root *root) |
7b128766 JB |
2299 | { |
2300 | struct btrfs_path *path; | |
2301 | struct extent_buffer *leaf; | |
7b128766 JB |
2302 | struct btrfs_key key, found_key; |
2303 | struct btrfs_trans_handle *trans; | |
2304 | struct inode *inode; | |
2305 | int ret = 0, nr_unlink = 0, nr_truncate = 0; | |
2306 | ||
d68fc57b | 2307 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) |
66b4ffd1 | 2308 | return 0; |
c71bf099 YZ |
2309 | |
2310 | path = btrfs_alloc_path(); | |
66b4ffd1 JB |
2311 | if (!path) { |
2312 | ret = -ENOMEM; | |
2313 | goto out; | |
2314 | } | |
7b128766 JB |
2315 | path->reada = -1; |
2316 | ||
2317 | key.objectid = BTRFS_ORPHAN_OBJECTID; | |
2318 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | |
2319 | key.offset = (u64)-1; | |
2320 | ||
7b128766 JB |
2321 | while (1) { |
2322 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
66b4ffd1 JB |
2323 | if (ret < 0) |
2324 | goto out; | |
7b128766 JB |
2325 | |
2326 | /* | |
2327 | * if ret == 0 means we found what we were searching for, which | |
25985edc | 2328 | * is weird, but possible, so only screw with path if we didn't |
7b128766 JB |
2329 | * find the key and see if we have stuff that matches |
2330 | */ | |
2331 | if (ret > 0) { | |
66b4ffd1 | 2332 | ret = 0; |
7b128766 JB |
2333 | if (path->slots[0] == 0) |
2334 | break; | |
2335 | path->slots[0]--; | |
2336 | } | |
2337 | ||
2338 | /* pull out the item */ | |
2339 | leaf = path->nodes[0]; | |
7b128766 JB |
2340 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
2341 | ||
2342 | /* make sure the item matches what we want */ | |
2343 | if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) | |
2344 | break; | |
2345 | if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) | |
2346 | break; | |
2347 | ||
2348 | /* release the path since we're done with it */ | |
2349 | btrfs_release_path(root, path); | |
2350 | ||
2351 | /* | |
2352 | * this is where we are basically btrfs_lookup, without the | |
2353 | * crossing root thing. we store the inode number in the | |
2354 | * offset of the orphan item. | |
2355 | */ | |
5d4f98a2 YZ |
2356 | found_key.objectid = found_key.offset; |
2357 | found_key.type = BTRFS_INODE_ITEM_KEY; | |
2358 | found_key.offset = 0; | |
73f73415 | 2359 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); |
66b4ffd1 JB |
2360 | if (IS_ERR(inode)) { |
2361 | ret = PTR_ERR(inode); | |
2362 | goto out; | |
2363 | } | |
7b128766 | 2364 | |
7b128766 JB |
2365 | /* |
2366 | * add this inode to the orphan list so btrfs_orphan_del does | |
2367 | * the proper thing when we hit it | |
2368 | */ | |
d68fc57b | 2369 | spin_lock(&root->orphan_lock); |
7b128766 | 2370 | list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); |
d68fc57b | 2371 | spin_unlock(&root->orphan_lock); |
7b128766 JB |
2372 | |
2373 | /* | |
2374 | * if this is a bad inode, means we actually succeeded in | |
2375 | * removing the inode, but not the orphan record, which means | |
2376 | * we need to manually delete the orphan since iput will just | |
2377 | * do a destroy_inode | |
2378 | */ | |
2379 | if (is_bad_inode(inode)) { | |
a22285a6 | 2380 | trans = btrfs_start_transaction(root, 0); |
66b4ffd1 JB |
2381 | if (IS_ERR(trans)) { |
2382 | ret = PTR_ERR(trans); | |
2383 | goto out; | |
2384 | } | |
7b128766 | 2385 | btrfs_orphan_del(trans, inode); |
5b21f2ed | 2386 | btrfs_end_transaction(trans, root); |
7b128766 JB |
2387 | iput(inode); |
2388 | continue; | |
2389 | } | |
2390 | ||
2391 | /* if we have links, this was a truncate, lets do that */ | |
2392 | if (inode->i_nlink) { | |
a41ad394 JB |
2393 | if (!S_ISREG(inode->i_mode)) { |
2394 | WARN_ON(1); | |
2395 | iput(inode); | |
2396 | continue; | |
2397 | } | |
7b128766 | 2398 | nr_truncate++; |
66b4ffd1 | 2399 | ret = btrfs_truncate(inode); |
7b128766 JB |
2400 | } else { |
2401 | nr_unlink++; | |
2402 | } | |
2403 | ||
2404 | /* this will do delete_inode and everything for us */ | |
2405 | iput(inode); | |
66b4ffd1 JB |
2406 | if (ret) |
2407 | goto out; | |
7b128766 | 2408 | } |
d68fc57b YZ |
2409 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; |
2410 | ||
2411 | if (root->orphan_block_rsv) | |
2412 | btrfs_block_rsv_release(root, root->orphan_block_rsv, | |
2413 | (u64)-1); | |
2414 | ||
2415 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | |
2416 | trans = btrfs_join_transaction(root, 1); | |
66b4ffd1 JB |
2417 | if (!IS_ERR(trans)) |
2418 | btrfs_end_transaction(trans, root); | |
d68fc57b | 2419 | } |
7b128766 JB |
2420 | |
2421 | if (nr_unlink) | |
2422 | printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); | |
2423 | if (nr_truncate) | |
2424 | printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); | |
66b4ffd1 JB |
2425 | |
2426 | out: | |
2427 | if (ret) | |
2428 | printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret); | |
2429 | btrfs_free_path(path); | |
2430 | return ret; | |
7b128766 JB |
2431 | } |
2432 | ||
46a53cca CM |
2433 | /* |
2434 | * very simple check to peek ahead in the leaf looking for xattrs. If we | |
2435 | * don't find any xattrs, we know there can't be any acls. | |
2436 | * | |
2437 | * slot is the slot the inode is in, objectid is the objectid of the inode | |
2438 | */ | |
2439 | static noinline int acls_after_inode_item(struct extent_buffer *leaf, | |
2440 | int slot, u64 objectid) | |
2441 | { | |
2442 | u32 nritems = btrfs_header_nritems(leaf); | |
2443 | struct btrfs_key found_key; | |
2444 | int scanned = 0; | |
2445 | ||
2446 | slot++; | |
2447 | while (slot < nritems) { | |
2448 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
2449 | ||
2450 | /* we found a different objectid, there must not be acls */ | |
2451 | if (found_key.objectid != objectid) | |
2452 | return 0; | |
2453 | ||
2454 | /* we found an xattr, assume we've got an acl */ | |
2455 | if (found_key.type == BTRFS_XATTR_ITEM_KEY) | |
2456 | return 1; | |
2457 | ||
2458 | /* | |
2459 | * we found a key greater than an xattr key, there can't | |
2460 | * be any acls later on | |
2461 | */ | |
2462 | if (found_key.type > BTRFS_XATTR_ITEM_KEY) | |
2463 | return 0; | |
2464 | ||
2465 | slot++; | |
2466 | scanned++; | |
2467 | ||
2468 | /* | |
2469 | * it goes inode, inode backrefs, xattrs, extents, | |
2470 | * so if there are a ton of hard links to an inode there can | |
2471 | * be a lot of backrefs. Don't waste time searching too hard, | |
2472 | * this is just an optimization | |
2473 | */ | |
2474 | if (scanned >= 8) | |
2475 | break; | |
2476 | } | |
2477 | /* we hit the end of the leaf before we found an xattr or | |
2478 | * something larger than an xattr. We have to assume the inode | |
2479 | * has acls | |
2480 | */ | |
2481 | return 1; | |
2482 | } | |
2483 | ||
d352ac68 CM |
2484 | /* |
2485 | * read an inode from the btree into the in-memory inode | |
2486 | */ | |
5d4f98a2 | 2487 | static void btrfs_read_locked_inode(struct inode *inode) |
39279cc3 CM |
2488 | { |
2489 | struct btrfs_path *path; | |
5f39d397 | 2490 | struct extent_buffer *leaf; |
39279cc3 | 2491 | struct btrfs_inode_item *inode_item; |
0b86a832 | 2492 | struct btrfs_timespec *tspec; |
39279cc3 CM |
2493 | struct btrfs_root *root = BTRFS_I(inode)->root; |
2494 | struct btrfs_key location; | |
46a53cca | 2495 | int maybe_acls; |
39279cc3 | 2496 | u64 alloc_group_block; |
618e21d5 | 2497 | u32 rdev; |
39279cc3 CM |
2498 | int ret; |
2499 | ||
2500 | path = btrfs_alloc_path(); | |
2501 | BUG_ON(!path); | |
39279cc3 | 2502 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); |
dc17ff8f | 2503 | |
39279cc3 | 2504 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); |
5f39d397 | 2505 | if (ret) |
39279cc3 | 2506 | goto make_bad; |
39279cc3 | 2507 | |
5f39d397 CM |
2508 | leaf = path->nodes[0]; |
2509 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
2510 | struct btrfs_inode_item); | |
2511 | ||
2512 | inode->i_mode = btrfs_inode_mode(leaf, inode_item); | |
2513 | inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); | |
2514 | inode->i_uid = btrfs_inode_uid(leaf, inode_item); | |
2515 | inode->i_gid = btrfs_inode_gid(leaf, inode_item); | |
dbe674a9 | 2516 | btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); |
5f39d397 CM |
2517 | |
2518 | tspec = btrfs_inode_atime(inode_item); | |
2519 | inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2520 | inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2521 | ||
2522 | tspec = btrfs_inode_mtime(inode_item); | |
2523 | inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2524 | inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2525 | ||
2526 | tspec = btrfs_inode_ctime(inode_item); | |
2527 | inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
2528 | inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
2529 | ||
a76a3cd4 | 2530 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); |
e02119d5 | 2531 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
c3027eb5 | 2532 | BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item); |
e02119d5 | 2533 | inode->i_generation = BTRFS_I(inode)->generation; |
618e21d5 | 2534 | inode->i_rdev = 0; |
5f39d397 CM |
2535 | rdev = btrfs_inode_rdev(leaf, inode_item); |
2536 | ||
aec7477b | 2537 | BTRFS_I(inode)->index_cnt = (u64)-1; |
d2fb3437 | 2538 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); |
aec7477b | 2539 | |
5f39d397 | 2540 | alloc_group_block = btrfs_inode_block_group(leaf, inode_item); |
b4ce94de | 2541 | |
46a53cca CM |
2542 | /* |
2543 | * try to precache a NULL acl entry for files that don't have | |
2544 | * any xattrs or acls | |
2545 | */ | |
2546 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); | |
72c04902 AV |
2547 | if (!maybe_acls) |
2548 | cache_no_acl(inode); | |
46a53cca | 2549 | |
d2fb3437 YZ |
2550 | BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, |
2551 | alloc_group_block, 0); | |
39279cc3 CM |
2552 | btrfs_free_path(path); |
2553 | inode_item = NULL; | |
2554 | ||
39279cc3 | 2555 | switch (inode->i_mode & S_IFMT) { |
39279cc3 CM |
2556 | case S_IFREG: |
2557 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 2558 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d1310b2e | 2559 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
2560 | inode->i_fop = &btrfs_file_operations; |
2561 | inode->i_op = &btrfs_file_inode_operations; | |
2562 | break; | |
2563 | case S_IFDIR: | |
2564 | inode->i_fop = &btrfs_dir_file_operations; | |
2565 | if (root == root->fs_info->tree_root) | |
2566 | inode->i_op = &btrfs_dir_ro_inode_operations; | |
2567 | else | |
2568 | inode->i_op = &btrfs_dir_inode_operations; | |
2569 | break; | |
2570 | case S_IFLNK: | |
2571 | inode->i_op = &btrfs_symlink_inode_operations; | |
2572 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 2573 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 | 2574 | break; |
618e21d5 | 2575 | default: |
0279b4cd | 2576 | inode->i_op = &btrfs_special_inode_operations; |
618e21d5 JB |
2577 | init_special_inode(inode, inode->i_mode, rdev); |
2578 | break; | |
39279cc3 | 2579 | } |
6cbff00f CH |
2580 | |
2581 | btrfs_update_iflags(inode); | |
39279cc3 CM |
2582 | return; |
2583 | ||
2584 | make_bad: | |
39279cc3 | 2585 | btrfs_free_path(path); |
39279cc3 CM |
2586 | make_bad_inode(inode); |
2587 | } | |
2588 | ||
d352ac68 CM |
2589 | /* |
2590 | * given a leaf and an inode, copy the inode fields into the leaf | |
2591 | */ | |
e02119d5 CM |
2592 | static void fill_inode_item(struct btrfs_trans_handle *trans, |
2593 | struct extent_buffer *leaf, | |
5f39d397 | 2594 | struct btrfs_inode_item *item, |
39279cc3 CM |
2595 | struct inode *inode) |
2596 | { | |
12ddb96c JB |
2597 | if (!leaf->map_token) |
2598 | map_private_extent_buffer(leaf, (unsigned long)item, | |
2599 | sizeof(struct btrfs_inode_item), | |
2600 | &leaf->map_token, &leaf->kaddr, | |
2601 | &leaf->map_start, &leaf->map_len, | |
2602 | KM_USER1); | |
2603 | ||
5f39d397 CM |
2604 | btrfs_set_inode_uid(leaf, item, inode->i_uid); |
2605 | btrfs_set_inode_gid(leaf, item, inode->i_gid); | |
dbe674a9 | 2606 | btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); |
5f39d397 CM |
2607 | btrfs_set_inode_mode(leaf, item, inode->i_mode); |
2608 | btrfs_set_inode_nlink(leaf, item, inode->i_nlink); | |
2609 | ||
2610 | btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item), | |
2611 | inode->i_atime.tv_sec); | |
2612 | btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item), | |
2613 | inode->i_atime.tv_nsec); | |
2614 | ||
2615 | btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item), | |
2616 | inode->i_mtime.tv_sec); | |
2617 | btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item), | |
2618 | inode->i_mtime.tv_nsec); | |
2619 | ||
2620 | btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item), | |
2621 | inode->i_ctime.tv_sec); | |
2622 | btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item), | |
2623 | inode->i_ctime.tv_nsec); | |
2624 | ||
a76a3cd4 | 2625 | btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); |
e02119d5 | 2626 | btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); |
c3027eb5 | 2627 | btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence); |
e02119d5 | 2628 | btrfs_set_inode_transid(leaf, item, trans->transid); |
5f39d397 | 2629 | btrfs_set_inode_rdev(leaf, item, inode->i_rdev); |
b98b6767 | 2630 | btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); |
d2fb3437 | 2631 | btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); |
12ddb96c JB |
2632 | |
2633 | if (leaf->map_token) { | |
2634 | unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); | |
2635 | leaf->map_token = NULL; | |
2636 | } | |
39279cc3 CM |
2637 | } |
2638 | ||
d352ac68 CM |
2639 | /* |
2640 | * copy everything in the in-memory inode into the btree. | |
2641 | */ | |
d397712b CM |
2642 | noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, |
2643 | struct btrfs_root *root, struct inode *inode) | |
39279cc3 CM |
2644 | { |
2645 | struct btrfs_inode_item *inode_item; | |
2646 | struct btrfs_path *path; | |
5f39d397 | 2647 | struct extent_buffer *leaf; |
39279cc3 CM |
2648 | int ret; |
2649 | ||
2650 | path = btrfs_alloc_path(); | |
2651 | BUG_ON(!path); | |
b9473439 | 2652 | path->leave_spinning = 1; |
39279cc3 CM |
2653 | ret = btrfs_lookup_inode(trans, root, path, |
2654 | &BTRFS_I(inode)->location, 1); | |
2655 | if (ret) { | |
2656 | if (ret > 0) | |
2657 | ret = -ENOENT; | |
2658 | goto failed; | |
2659 | } | |
2660 | ||
b4ce94de | 2661 | btrfs_unlock_up_safe(path, 1); |
5f39d397 CM |
2662 | leaf = path->nodes[0]; |
2663 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
39279cc3 CM |
2664 | struct btrfs_inode_item); |
2665 | ||
e02119d5 | 2666 | fill_inode_item(trans, leaf, inode_item, inode); |
5f39d397 | 2667 | btrfs_mark_buffer_dirty(leaf); |
15ee9bc7 | 2668 | btrfs_set_inode_last_trans(trans, inode); |
39279cc3 CM |
2669 | ret = 0; |
2670 | failed: | |
39279cc3 CM |
2671 | btrfs_free_path(path); |
2672 | return ret; | |
2673 | } | |
2674 | ||
2675 | ||
d352ac68 CM |
2676 | /* |
2677 | * unlink helper that gets used here in inode.c and in the tree logging | |
2678 | * recovery code. It remove a link in a directory with a given name, and | |
2679 | * also drops the back refs in the inode to the directory | |
2680 | */ | |
92986796 AV |
2681 | static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
2682 | struct btrfs_root *root, | |
2683 | struct inode *dir, struct inode *inode, | |
2684 | const char *name, int name_len) | |
39279cc3 CM |
2685 | { |
2686 | struct btrfs_path *path; | |
39279cc3 | 2687 | int ret = 0; |
5f39d397 | 2688 | struct extent_buffer *leaf; |
39279cc3 | 2689 | struct btrfs_dir_item *di; |
5f39d397 | 2690 | struct btrfs_key key; |
aec7477b | 2691 | u64 index; |
39279cc3 CM |
2692 | |
2693 | path = btrfs_alloc_path(); | |
54aa1f4d CM |
2694 | if (!path) { |
2695 | ret = -ENOMEM; | |
554233a6 | 2696 | goto out; |
54aa1f4d CM |
2697 | } |
2698 | ||
b9473439 | 2699 | path->leave_spinning = 1; |
39279cc3 CM |
2700 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, |
2701 | name, name_len, -1); | |
2702 | if (IS_ERR(di)) { | |
2703 | ret = PTR_ERR(di); | |
2704 | goto err; | |
2705 | } | |
2706 | if (!di) { | |
2707 | ret = -ENOENT; | |
2708 | goto err; | |
2709 | } | |
5f39d397 CM |
2710 | leaf = path->nodes[0]; |
2711 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
39279cc3 | 2712 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
54aa1f4d CM |
2713 | if (ret) |
2714 | goto err; | |
39279cc3 CM |
2715 | btrfs_release_path(root, path); |
2716 | ||
aec7477b | 2717 | ret = btrfs_del_inode_ref(trans, root, name, name_len, |
e02119d5 CM |
2718 | inode->i_ino, |
2719 | dir->i_ino, &index); | |
aec7477b | 2720 | if (ret) { |
d397712b | 2721 | printk(KERN_INFO "btrfs failed to delete reference to %.*s, " |
aec7477b | 2722 | "inode %lu parent %lu\n", name_len, name, |
e02119d5 | 2723 | inode->i_ino, dir->i_ino); |
aec7477b JB |
2724 | goto err; |
2725 | } | |
2726 | ||
39279cc3 | 2727 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, |
aec7477b | 2728 | index, name, name_len, -1); |
39279cc3 CM |
2729 | if (IS_ERR(di)) { |
2730 | ret = PTR_ERR(di); | |
2731 | goto err; | |
2732 | } | |
2733 | if (!di) { | |
2734 | ret = -ENOENT; | |
2735 | goto err; | |
2736 | } | |
2737 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
925baedd | 2738 | btrfs_release_path(root, path); |
39279cc3 | 2739 | |
e02119d5 CM |
2740 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, |
2741 | inode, dir->i_ino); | |
49eb7e46 | 2742 | BUG_ON(ret != 0 && ret != -ENOENT); |
e02119d5 CM |
2743 | |
2744 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, | |
2745 | dir, index); | |
6418c961 CM |
2746 | if (ret == -ENOENT) |
2747 | ret = 0; | |
39279cc3 CM |
2748 | err: |
2749 | btrfs_free_path(path); | |
e02119d5 CM |
2750 | if (ret) |
2751 | goto out; | |
2752 | ||
2753 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
2754 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; | |
2755 | btrfs_update_inode(trans, root, dir); | |
e02119d5 | 2756 | out: |
39279cc3 CM |
2757 | return ret; |
2758 | } | |
2759 | ||
92986796 AV |
2760 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
2761 | struct btrfs_root *root, | |
2762 | struct inode *dir, struct inode *inode, | |
2763 | const char *name, int name_len) | |
2764 | { | |
2765 | int ret; | |
2766 | ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); | |
2767 | if (!ret) { | |
2768 | btrfs_drop_nlink(inode); | |
2769 | ret = btrfs_update_inode(trans, root, inode); | |
2770 | } | |
2771 | return ret; | |
2772 | } | |
2773 | ||
2774 | ||
a22285a6 YZ |
2775 | /* helper to check if there is any shared block in the path */ |
2776 | static int check_path_shared(struct btrfs_root *root, | |
2777 | struct btrfs_path *path) | |
39279cc3 | 2778 | { |
a22285a6 YZ |
2779 | struct extent_buffer *eb; |
2780 | int level; | |
0e4dcbef | 2781 | u64 refs = 1; |
5df6a9f6 | 2782 | |
a22285a6 | 2783 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
dedefd72 JB |
2784 | int ret; |
2785 | ||
a22285a6 YZ |
2786 | if (!path->nodes[level]) |
2787 | break; | |
2788 | eb = path->nodes[level]; | |
2789 | if (!btrfs_block_can_be_shared(root, eb)) | |
2790 | continue; | |
2791 | ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, | |
2792 | &refs, NULL); | |
2793 | if (refs > 1) | |
2794 | return 1; | |
5df6a9f6 | 2795 | } |
dedefd72 | 2796 | return 0; |
39279cc3 CM |
2797 | } |
2798 | ||
a22285a6 YZ |
2799 | /* |
2800 | * helper to start transaction for unlink and rmdir. | |
2801 | * | |
2802 | * unlink and rmdir are special in btrfs, they do not always free space. | |
2803 | * so in enospc case, we should make sure they will free space before | |
2804 | * allowing them to use the global metadata reservation. | |
2805 | */ | |
2806 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |
2807 | struct dentry *dentry) | |
4df27c4d | 2808 | { |
39279cc3 | 2809 | struct btrfs_trans_handle *trans; |
a22285a6 | 2810 | struct btrfs_root *root = BTRFS_I(dir)->root; |
4df27c4d | 2811 | struct btrfs_path *path; |
a22285a6 | 2812 | struct btrfs_inode_ref *ref; |
4df27c4d | 2813 | struct btrfs_dir_item *di; |
7b128766 | 2814 | struct inode *inode = dentry->d_inode; |
4df27c4d | 2815 | u64 index; |
a22285a6 YZ |
2816 | int check_link = 1; |
2817 | int err = -ENOSPC; | |
4df27c4d YZ |
2818 | int ret; |
2819 | ||
a22285a6 YZ |
2820 | trans = btrfs_start_transaction(root, 10); |
2821 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | |
2822 | return trans; | |
4df27c4d | 2823 | |
a22285a6 YZ |
2824 | if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
2825 | return ERR_PTR(-ENOSPC); | |
4df27c4d | 2826 | |
a22285a6 YZ |
2827 | /* check if there is someone else holds reference */ |
2828 | if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) | |
2829 | return ERR_PTR(-ENOSPC); | |
4df27c4d | 2830 | |
a22285a6 YZ |
2831 | if (atomic_read(&inode->i_count) > 2) |
2832 | return ERR_PTR(-ENOSPC); | |
4df27c4d | 2833 | |
a22285a6 YZ |
2834 | if (xchg(&root->fs_info->enospc_unlink, 1)) |
2835 | return ERR_PTR(-ENOSPC); | |
2836 | ||
2837 | path = btrfs_alloc_path(); | |
2838 | if (!path) { | |
2839 | root->fs_info->enospc_unlink = 0; | |
2840 | return ERR_PTR(-ENOMEM); | |
4df27c4d YZ |
2841 | } |
2842 | ||
a22285a6 | 2843 | trans = btrfs_start_transaction(root, 0); |
5df6a9f6 | 2844 | if (IS_ERR(trans)) { |
a22285a6 YZ |
2845 | btrfs_free_path(path); |
2846 | root->fs_info->enospc_unlink = 0; | |
2847 | return trans; | |
2848 | } | |
4df27c4d | 2849 | |
a22285a6 YZ |
2850 | path->skip_locking = 1; |
2851 | path->search_commit_root = 1; | |
4df27c4d | 2852 | |
a22285a6 YZ |
2853 | ret = btrfs_lookup_inode(trans, root, path, |
2854 | &BTRFS_I(dir)->location, 0); | |
2855 | if (ret < 0) { | |
2856 | err = ret; | |
2857 | goto out; | |
2858 | } | |
2859 | if (ret == 0) { | |
2860 | if (check_path_shared(root, path)) | |
2861 | goto out; | |
2862 | } else { | |
2863 | check_link = 0; | |
5df6a9f6 | 2864 | } |
a22285a6 YZ |
2865 | btrfs_release_path(root, path); |
2866 | ||
2867 | ret = btrfs_lookup_inode(trans, root, path, | |
2868 | &BTRFS_I(inode)->location, 0); | |
2869 | if (ret < 0) { | |
2870 | err = ret; | |
2871 | goto out; | |
2872 | } | |
2873 | if (ret == 0) { | |
2874 | if (check_path_shared(root, path)) | |
2875 | goto out; | |
2876 | } else { | |
2877 | check_link = 0; | |
2878 | } | |
2879 | btrfs_release_path(root, path); | |
2880 | ||
2881 | if (ret == 0 && S_ISREG(inode->i_mode)) { | |
2882 | ret = btrfs_lookup_file_extent(trans, root, path, | |
2883 | inode->i_ino, (u64)-1, 0); | |
2884 | if (ret < 0) { | |
2885 | err = ret; | |
2886 | goto out; | |
2887 | } | |
2888 | BUG_ON(ret == 0); | |
2889 | if (check_path_shared(root, path)) | |
2890 | goto out; | |
2891 | btrfs_release_path(root, path); | |
2892 | } | |
2893 | ||
2894 | if (!check_link) { | |
2895 | err = 0; | |
2896 | goto out; | |
2897 | } | |
2898 | ||
2899 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | |
2900 | dentry->d_name.name, dentry->d_name.len, 0); | |
2901 | if (IS_ERR(di)) { | |
2902 | err = PTR_ERR(di); | |
2903 | goto out; | |
2904 | } | |
2905 | if (di) { | |
2906 | if (check_path_shared(root, path)) | |
2907 | goto out; | |
2908 | } else { | |
2909 | err = 0; | |
2910 | goto out; | |
2911 | } | |
2912 | btrfs_release_path(root, path); | |
2913 | ||
2914 | ref = btrfs_lookup_inode_ref(trans, root, path, | |
2915 | dentry->d_name.name, dentry->d_name.len, | |
2916 | inode->i_ino, dir->i_ino, 0); | |
2917 | if (IS_ERR(ref)) { | |
2918 | err = PTR_ERR(ref); | |
2919 | goto out; | |
2920 | } | |
2921 | BUG_ON(!ref); | |
2922 | if (check_path_shared(root, path)) | |
2923 | goto out; | |
2924 | index = btrfs_inode_ref_index(path->nodes[0], ref); | |
2925 | btrfs_release_path(root, path); | |
2926 | ||
2927 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, | |
2928 | dentry->d_name.name, dentry->d_name.len, 0); | |
2929 | if (IS_ERR(di)) { | |
2930 | err = PTR_ERR(di); | |
2931 | goto out; | |
2932 | } | |
2933 | BUG_ON(ret == -ENOENT); | |
2934 | if (check_path_shared(root, path)) | |
2935 | goto out; | |
2936 | ||
2937 | err = 0; | |
2938 | out: | |
2939 | btrfs_free_path(path); | |
2940 | if (err) { | |
2941 | btrfs_end_transaction(trans, root); | |
2942 | root->fs_info->enospc_unlink = 0; | |
2943 | return ERR_PTR(err); | |
2944 | } | |
2945 | ||
2946 | trans->block_rsv = &root->fs_info->global_block_rsv; | |
2947 | return trans; | |
2948 | } | |
2949 | ||
2950 | static void __unlink_end_trans(struct btrfs_trans_handle *trans, | |
2951 | struct btrfs_root *root) | |
2952 | { | |
2953 | if (trans->block_rsv == &root->fs_info->global_block_rsv) { | |
2954 | BUG_ON(!root->fs_info->enospc_unlink); | |
2955 | root->fs_info->enospc_unlink = 0; | |
2956 | } | |
2957 | btrfs_end_transaction_throttle(trans, root); | |
2958 | } | |
2959 | ||
2960 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | |
2961 | { | |
2962 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
2963 | struct btrfs_trans_handle *trans; | |
2964 | struct inode *inode = dentry->d_inode; | |
2965 | int ret; | |
2966 | unsigned long nr = 0; | |
2967 | ||
2968 | trans = __unlink_start_trans(dir, dentry); | |
2969 | if (IS_ERR(trans)) | |
2970 | return PTR_ERR(trans); | |
5f39d397 | 2971 | |
39279cc3 | 2972 | btrfs_set_trans_block_group(trans, dir); |
12fcfd22 CM |
2973 | |
2974 | btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); | |
2975 | ||
e02119d5 CM |
2976 | ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
2977 | dentry->d_name.name, dentry->d_name.len); | |
a22285a6 | 2978 | BUG_ON(ret); |
7b128766 | 2979 | |
a22285a6 | 2980 | if (inode->i_nlink == 0) { |
7b128766 | 2981 | ret = btrfs_orphan_add(trans, inode); |
a22285a6 YZ |
2982 | BUG_ON(ret); |
2983 | } | |
7b128766 | 2984 | |
d3c2fdcf | 2985 | nr = trans->blocks_used; |
a22285a6 | 2986 | __unlink_end_trans(trans, root); |
d3c2fdcf | 2987 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
2988 | return ret; |
2989 | } | |
2990 | ||
4df27c4d YZ |
2991 | int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, |
2992 | struct btrfs_root *root, | |
2993 | struct inode *dir, u64 objectid, | |
2994 | const char *name, int name_len) | |
2995 | { | |
2996 | struct btrfs_path *path; | |
2997 | struct extent_buffer *leaf; | |
2998 | struct btrfs_dir_item *di; | |
2999 | struct btrfs_key key; | |
3000 | u64 index; | |
3001 | int ret; | |
3002 | ||
3003 | path = btrfs_alloc_path(); | |
3004 | if (!path) | |
3005 | return -ENOMEM; | |
3006 | ||
3007 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | |
3008 | name, name_len, -1); | |
3009 | BUG_ON(!di || IS_ERR(di)); | |
3010 | ||
3011 | leaf = path->nodes[0]; | |
3012 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
3013 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | |
3014 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
3015 | BUG_ON(ret); | |
3016 | btrfs_release_path(root, path); | |
3017 | ||
3018 | ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, | |
3019 | objectid, root->root_key.objectid, | |
3020 | dir->i_ino, &index, name, name_len); | |
3021 | if (ret < 0) { | |
3022 | BUG_ON(ret != -ENOENT); | |
3023 | di = btrfs_search_dir_index_item(root, path, dir->i_ino, | |
3024 | name, name_len); | |
3025 | BUG_ON(!di || IS_ERR(di)); | |
3026 | ||
3027 | leaf = path->nodes[0]; | |
3028 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
3029 | btrfs_release_path(root, path); | |
3030 | index = key.offset; | |
3031 | } | |
3032 | ||
3033 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, | |
3034 | index, name, name_len, -1); | |
3035 | BUG_ON(!di || IS_ERR(di)); | |
3036 | ||
3037 | leaf = path->nodes[0]; | |
3038 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
3039 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | |
3040 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
3041 | BUG_ON(ret); | |
3042 | btrfs_release_path(root, path); | |
3043 | ||
3044 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
3045 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; | |
3046 | ret = btrfs_update_inode(trans, root, dir); | |
3047 | BUG_ON(ret); | |
4df27c4d YZ |
3048 | |
3049 | btrfs_free_path(path); | |
3050 | return 0; | |
3051 | } | |
3052 | ||
39279cc3 CM |
3053 | static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) |
3054 | { | |
3055 | struct inode *inode = dentry->d_inode; | |
1832a6d5 | 3056 | int err = 0; |
39279cc3 | 3057 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 | 3058 | struct btrfs_trans_handle *trans; |
1832a6d5 | 3059 | unsigned long nr = 0; |
39279cc3 | 3060 | |
3394e160 | 3061 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || |
4df27c4d | 3062 | inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) |
134d4512 Y |
3063 | return -ENOTEMPTY; |
3064 | ||
a22285a6 YZ |
3065 | trans = __unlink_start_trans(dir, dentry); |
3066 | if (IS_ERR(trans)) | |
5df6a9f6 | 3067 | return PTR_ERR(trans); |
5df6a9f6 | 3068 | |
39279cc3 | 3069 | btrfs_set_trans_block_group(trans, dir); |
39279cc3 | 3070 | |
4df27c4d YZ |
3071 | if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
3072 | err = btrfs_unlink_subvol(trans, root, dir, | |
3073 | BTRFS_I(inode)->location.objectid, | |
3074 | dentry->d_name.name, | |
3075 | dentry->d_name.len); | |
3076 | goto out; | |
3077 | } | |
3078 | ||
7b128766 JB |
3079 | err = btrfs_orphan_add(trans, inode); |
3080 | if (err) | |
4df27c4d | 3081 | goto out; |
7b128766 | 3082 | |
39279cc3 | 3083 | /* now the directory is empty */ |
e02119d5 CM |
3084 | err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
3085 | dentry->d_name.name, dentry->d_name.len); | |
d397712b | 3086 | if (!err) |
dbe674a9 | 3087 | btrfs_i_size_write(inode, 0); |
4df27c4d | 3088 | out: |
d3c2fdcf | 3089 | nr = trans->blocks_used; |
a22285a6 | 3090 | __unlink_end_trans(trans, root); |
d3c2fdcf | 3091 | btrfs_btree_balance_dirty(root, nr); |
3954401f | 3092 | |
39279cc3 CM |
3093 | return err; |
3094 | } | |
3095 | ||
d20f7043 | 3096 | #if 0 |
323ac95b CM |
3097 | /* |
3098 | * when truncating bytes in a file, it is possible to avoid reading | |
3099 | * the leaves that contain only checksum items. This can be the | |
3100 | * majority of the IO required to delete a large file, but it must | |
3101 | * be done carefully. | |
3102 | * | |
3103 | * The keys in the level just above the leaves are checked to make sure | |
3104 | * the lowest key in a given leaf is a csum key, and starts at an offset | |
3105 | * after the new size. | |
3106 | * | |
3107 | * Then the key for the next leaf is checked to make sure it also has | |
3108 | * a checksum item for the same file. If it does, we know our target leaf | |
3109 | * contains only checksum items, and it can be safely freed without reading | |
3110 | * it. | |
3111 | * | |
3112 | * This is just an optimization targeted at large files. It may do | |
3113 | * nothing. It will return 0 unless things went badly. | |
3114 | */ | |
3115 | static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, | |
3116 | struct btrfs_root *root, | |
3117 | struct btrfs_path *path, | |
3118 | struct inode *inode, u64 new_size) | |
3119 | { | |
3120 | struct btrfs_key key; | |
3121 | int ret; | |
3122 | int nritems; | |
3123 | struct btrfs_key found_key; | |
3124 | struct btrfs_key other_key; | |
5b84e8d6 YZ |
3125 | struct btrfs_leaf_ref *ref; |
3126 | u64 leaf_gen; | |
3127 | u64 leaf_start; | |
323ac95b CM |
3128 | |
3129 | path->lowest_level = 1; | |
3130 | key.objectid = inode->i_ino; | |
3131 | key.type = BTRFS_CSUM_ITEM_KEY; | |
3132 | key.offset = new_size; | |
3133 | again: | |
3134 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
3135 | if (ret < 0) | |
3136 | goto out; | |
3137 | ||
3138 | if (path->nodes[1] == NULL) { | |
3139 | ret = 0; | |
3140 | goto out; | |
3141 | } | |
3142 | ret = 0; | |
3143 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); | |
3144 | nritems = btrfs_header_nritems(path->nodes[1]); | |
3145 | ||
3146 | if (!nritems) | |
3147 | goto out; | |
3148 | ||
3149 | if (path->slots[1] >= nritems) | |
3150 | goto next_node; | |
3151 | ||
3152 | /* did we find a key greater than anything we want to delete? */ | |
3153 | if (found_key.objectid > inode->i_ino || | |
3154 | (found_key.objectid == inode->i_ino && found_key.type > key.type)) | |
3155 | goto out; | |
3156 | ||
3157 | /* we check the next key in the node to make sure the leave contains | |
3158 | * only checksum items. This comparison doesn't work if our | |
3159 | * leaf is the last one in the node | |
3160 | */ | |
3161 | if (path->slots[1] + 1 >= nritems) { | |
3162 | next_node: | |
3163 | /* search forward from the last key in the node, this | |
3164 | * will bring us into the next node in the tree | |
3165 | */ | |
3166 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); | |
3167 | ||
3168 | /* unlikely, but we inc below, so check to be safe */ | |
3169 | if (found_key.offset == (u64)-1) | |
3170 | goto out; | |
3171 | ||
3172 | /* search_forward needs a path with locks held, do the | |
3173 | * search again for the original key. It is possible | |
3174 | * this will race with a balance and return a path that | |
3175 | * we could modify, but this drop is just an optimization | |
3176 | * and is allowed to miss some leaves. | |
3177 | */ | |
3178 | btrfs_release_path(root, path); | |
3179 | found_key.offset++; | |
3180 | ||
3181 | /* setup a max key for search_forward */ | |
3182 | other_key.offset = (u64)-1; | |
3183 | other_key.type = key.type; | |
3184 | other_key.objectid = key.objectid; | |
3185 | ||
3186 | path->keep_locks = 1; | |
3187 | ret = btrfs_search_forward(root, &found_key, &other_key, | |
3188 | path, 0, 0); | |
3189 | path->keep_locks = 0; | |
3190 | if (ret || found_key.objectid != key.objectid || | |
3191 | found_key.type != key.type) { | |
3192 | ret = 0; | |
3193 | goto out; | |
3194 | } | |
3195 | ||
3196 | key.offset = found_key.offset; | |
3197 | btrfs_release_path(root, path); | |
3198 | cond_resched(); | |
3199 | goto again; | |
3200 | } | |
3201 | ||
3202 | /* we know there's one more slot after us in the tree, | |
3203 | * read that key so we can verify it is also a checksum item | |
3204 | */ | |
3205 | btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); | |
3206 | ||
3207 | if (found_key.objectid < inode->i_ino) | |
3208 | goto next_key; | |
3209 | ||
3210 | if (found_key.type != key.type || found_key.offset < new_size) | |
3211 | goto next_key; | |
3212 | ||
3213 | /* | |
3214 | * if the key for the next leaf isn't a csum key from this objectid, | |
3215 | * we can't be sure there aren't good items inside this leaf. | |
3216 | * Bail out | |
3217 | */ | |
3218 | if (other_key.objectid != inode->i_ino || other_key.type != key.type) | |
3219 | goto out; | |
3220 | ||
5b84e8d6 YZ |
3221 | leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); |
3222 | leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); | |
323ac95b CM |
3223 | /* |
3224 | * it is safe to delete this leaf, it contains only | |
3225 | * csum items from this inode at an offset >= new_size | |
3226 | */ | |
5b84e8d6 | 3227 | ret = btrfs_del_leaf(trans, root, path, leaf_start); |
323ac95b CM |
3228 | BUG_ON(ret); |
3229 | ||
5b84e8d6 YZ |
3230 | if (root->ref_cows && leaf_gen < trans->transid) { |
3231 | ref = btrfs_alloc_leaf_ref(root, 0); | |
3232 | if (ref) { | |
3233 | ref->root_gen = root->root_key.offset; | |
3234 | ref->bytenr = leaf_start; | |
3235 | ref->owner = 0; | |
3236 | ref->generation = leaf_gen; | |
3237 | ref->nritems = 0; | |
3238 | ||
bd56b302 CM |
3239 | btrfs_sort_leaf_ref(ref); |
3240 | ||
5b84e8d6 YZ |
3241 | ret = btrfs_add_leaf_ref(root, ref, 0); |
3242 | WARN_ON(ret); | |
3243 | btrfs_free_leaf_ref(root, ref); | |
3244 | } else { | |
3245 | WARN_ON(1); | |
3246 | } | |
3247 | } | |
323ac95b CM |
3248 | next_key: |
3249 | btrfs_release_path(root, path); | |
3250 | ||
3251 | if (other_key.objectid == inode->i_ino && | |
3252 | other_key.type == key.type && other_key.offset > key.offset) { | |
3253 | key.offset = other_key.offset; | |
3254 | cond_resched(); | |
3255 | goto again; | |
3256 | } | |
3257 | ret = 0; | |
3258 | out: | |
3259 | /* fixup any changes we've made to the path */ | |
3260 | path->lowest_level = 0; | |
3261 | path->keep_locks = 0; | |
3262 | btrfs_release_path(root, path); | |
3263 | return ret; | |
3264 | } | |
3265 | ||
d20f7043 CM |
3266 | #endif |
3267 | ||
39279cc3 CM |
3268 | /* |
3269 | * this can truncate away extent items, csum items and directory items. | |
3270 | * It starts at a high offset and removes keys until it can't find | |
d352ac68 | 3271 | * any higher than new_size |
39279cc3 CM |
3272 | * |
3273 | * csum items that cross the new i_size are truncated to the new size | |
3274 | * as well. | |
7b128766 JB |
3275 | * |
3276 | * min_type is the minimum key type to truncate down to. If set to 0, this | |
3277 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | |
39279cc3 | 3278 | */ |
8082510e YZ |
3279 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
3280 | struct btrfs_root *root, | |
3281 | struct inode *inode, | |
3282 | u64 new_size, u32 min_type) | |
39279cc3 | 3283 | { |
39279cc3 | 3284 | struct btrfs_path *path; |
5f39d397 | 3285 | struct extent_buffer *leaf; |
39279cc3 | 3286 | struct btrfs_file_extent_item *fi; |
8082510e YZ |
3287 | struct btrfs_key key; |
3288 | struct btrfs_key found_key; | |
39279cc3 | 3289 | u64 extent_start = 0; |
db94535d | 3290 | u64 extent_num_bytes = 0; |
5d4f98a2 | 3291 | u64 extent_offset = 0; |
39279cc3 | 3292 | u64 item_end = 0; |
8082510e YZ |
3293 | u64 mask = root->sectorsize - 1; |
3294 | u32 found_type = (u8)-1; | |
39279cc3 CM |
3295 | int found_extent; |
3296 | int del_item; | |
85e21bac CM |
3297 | int pending_del_nr = 0; |
3298 | int pending_del_slot = 0; | |
179e29e4 | 3299 | int extent_type = -1; |
771ed689 | 3300 | int encoding; |
8082510e YZ |
3301 | int ret; |
3302 | int err = 0; | |
3303 | ||
3304 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | |
39279cc3 | 3305 | |
0af3d00b | 3306 | if (root->ref_cows || root == root->fs_info->tree_root) |
5b21f2ed | 3307 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
8082510e | 3308 | |
39279cc3 CM |
3309 | path = btrfs_alloc_path(); |
3310 | BUG_ON(!path); | |
33c17ad5 | 3311 | path->reada = -1; |
5f39d397 | 3312 | |
39279cc3 CM |
3313 | key.objectid = inode->i_ino; |
3314 | key.offset = (u64)-1; | |
5f39d397 CM |
3315 | key.type = (u8)-1; |
3316 | ||
85e21bac | 3317 | search_again: |
b9473439 | 3318 | path->leave_spinning = 1; |
85e21bac | 3319 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
8082510e YZ |
3320 | if (ret < 0) { |
3321 | err = ret; | |
3322 | goto out; | |
3323 | } | |
d397712b | 3324 | |
85e21bac | 3325 | if (ret > 0) { |
e02119d5 CM |
3326 | /* there are no items in the tree for us to truncate, we're |
3327 | * done | |
3328 | */ | |
8082510e YZ |
3329 | if (path->slots[0] == 0) |
3330 | goto out; | |
85e21bac CM |
3331 | path->slots[0]--; |
3332 | } | |
3333 | ||
d397712b | 3334 | while (1) { |
39279cc3 | 3335 | fi = NULL; |
5f39d397 CM |
3336 | leaf = path->nodes[0]; |
3337 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
3338 | found_type = btrfs_key_type(&found_key); | |
771ed689 | 3339 | encoding = 0; |
39279cc3 | 3340 | |
5f39d397 | 3341 | if (found_key.objectid != inode->i_ino) |
39279cc3 | 3342 | break; |
5f39d397 | 3343 | |
85e21bac | 3344 | if (found_type < min_type) |
39279cc3 CM |
3345 | break; |
3346 | ||
5f39d397 | 3347 | item_end = found_key.offset; |
39279cc3 | 3348 | if (found_type == BTRFS_EXTENT_DATA_KEY) { |
5f39d397 | 3349 | fi = btrfs_item_ptr(leaf, path->slots[0], |
39279cc3 | 3350 | struct btrfs_file_extent_item); |
179e29e4 | 3351 | extent_type = btrfs_file_extent_type(leaf, fi); |
771ed689 CM |
3352 | encoding = btrfs_file_extent_compression(leaf, fi); |
3353 | encoding |= btrfs_file_extent_encryption(leaf, fi); | |
3354 | encoding |= btrfs_file_extent_other_encoding(leaf, fi); | |
3355 | ||
179e29e4 | 3356 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { |
5f39d397 | 3357 | item_end += |
db94535d | 3358 | btrfs_file_extent_num_bytes(leaf, fi); |
179e29e4 | 3359 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
179e29e4 | 3360 | item_end += btrfs_file_extent_inline_len(leaf, |
c8b97818 | 3361 | fi); |
39279cc3 | 3362 | } |
008630c1 | 3363 | item_end--; |
39279cc3 | 3364 | } |
8082510e YZ |
3365 | if (found_type > min_type) { |
3366 | del_item = 1; | |
3367 | } else { | |
3368 | if (item_end < new_size) | |
b888db2b | 3369 | break; |
8082510e YZ |
3370 | if (found_key.offset >= new_size) |
3371 | del_item = 1; | |
3372 | else | |
3373 | del_item = 0; | |
39279cc3 | 3374 | } |
39279cc3 | 3375 | found_extent = 0; |
39279cc3 | 3376 | /* FIXME, shrink the extent if the ref count is only 1 */ |
179e29e4 CM |
3377 | if (found_type != BTRFS_EXTENT_DATA_KEY) |
3378 | goto delete; | |
3379 | ||
3380 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
39279cc3 | 3381 | u64 num_dec; |
db94535d | 3382 | extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); |
771ed689 | 3383 | if (!del_item && !encoding) { |
db94535d CM |
3384 | u64 orig_num_bytes = |
3385 | btrfs_file_extent_num_bytes(leaf, fi); | |
e02119d5 | 3386 | extent_num_bytes = new_size - |
5f39d397 | 3387 | found_key.offset + root->sectorsize - 1; |
b1632b10 Y |
3388 | extent_num_bytes = extent_num_bytes & |
3389 | ~((u64)root->sectorsize - 1); | |
db94535d CM |
3390 | btrfs_set_file_extent_num_bytes(leaf, fi, |
3391 | extent_num_bytes); | |
3392 | num_dec = (orig_num_bytes - | |
9069218d | 3393 | extent_num_bytes); |
e02119d5 | 3394 | if (root->ref_cows && extent_start != 0) |
a76a3cd4 | 3395 | inode_sub_bytes(inode, num_dec); |
5f39d397 | 3396 | btrfs_mark_buffer_dirty(leaf); |
39279cc3 | 3397 | } else { |
db94535d CM |
3398 | extent_num_bytes = |
3399 | btrfs_file_extent_disk_num_bytes(leaf, | |
3400 | fi); | |
5d4f98a2 YZ |
3401 | extent_offset = found_key.offset - |
3402 | btrfs_file_extent_offset(leaf, fi); | |
3403 | ||
39279cc3 | 3404 | /* FIXME blocksize != 4096 */ |
9069218d | 3405 | num_dec = btrfs_file_extent_num_bytes(leaf, fi); |
39279cc3 CM |
3406 | if (extent_start != 0) { |
3407 | found_extent = 1; | |
e02119d5 | 3408 | if (root->ref_cows) |
a76a3cd4 | 3409 | inode_sub_bytes(inode, num_dec); |
e02119d5 | 3410 | } |
39279cc3 | 3411 | } |
9069218d | 3412 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
c8b97818 CM |
3413 | /* |
3414 | * we can't truncate inline items that have had | |
3415 | * special encodings | |
3416 | */ | |
3417 | if (!del_item && | |
3418 | btrfs_file_extent_compression(leaf, fi) == 0 && | |
3419 | btrfs_file_extent_encryption(leaf, fi) == 0 && | |
3420 | btrfs_file_extent_other_encoding(leaf, fi) == 0) { | |
e02119d5 CM |
3421 | u32 size = new_size - found_key.offset; |
3422 | ||
3423 | if (root->ref_cows) { | |
a76a3cd4 YZ |
3424 | inode_sub_bytes(inode, item_end + 1 - |
3425 | new_size); | |
e02119d5 CM |
3426 | } |
3427 | size = | |
3428 | btrfs_file_extent_calc_inline_size(size); | |
9069218d | 3429 | ret = btrfs_truncate_item(trans, root, path, |
e02119d5 | 3430 | size, 1); |
9069218d | 3431 | BUG_ON(ret); |
e02119d5 | 3432 | } else if (root->ref_cows) { |
a76a3cd4 YZ |
3433 | inode_sub_bytes(inode, item_end + 1 - |
3434 | found_key.offset); | |
9069218d | 3435 | } |
39279cc3 | 3436 | } |
179e29e4 | 3437 | delete: |
39279cc3 | 3438 | if (del_item) { |
85e21bac CM |
3439 | if (!pending_del_nr) { |
3440 | /* no pending yet, add ourselves */ | |
3441 | pending_del_slot = path->slots[0]; | |
3442 | pending_del_nr = 1; | |
3443 | } else if (pending_del_nr && | |
3444 | path->slots[0] + 1 == pending_del_slot) { | |
3445 | /* hop on the pending chunk */ | |
3446 | pending_del_nr++; | |
3447 | pending_del_slot = path->slots[0]; | |
3448 | } else { | |
d397712b | 3449 | BUG(); |
85e21bac | 3450 | } |
39279cc3 CM |
3451 | } else { |
3452 | break; | |
3453 | } | |
0af3d00b JB |
3454 | if (found_extent && (root->ref_cows || |
3455 | root == root->fs_info->tree_root)) { | |
b9473439 | 3456 | btrfs_set_path_blocking(path); |
39279cc3 | 3457 | ret = btrfs_free_extent(trans, root, extent_start, |
5d4f98a2 YZ |
3458 | extent_num_bytes, 0, |
3459 | btrfs_header_owner(leaf), | |
3460 | inode->i_ino, extent_offset); | |
39279cc3 CM |
3461 | BUG_ON(ret); |
3462 | } | |
85e21bac | 3463 | |
8082510e YZ |
3464 | if (found_type == BTRFS_INODE_ITEM_KEY) |
3465 | break; | |
3466 | ||
3467 | if (path->slots[0] == 0 || | |
3468 | path->slots[0] != pending_del_slot) { | |
3469 | if (root->ref_cows) { | |
3470 | err = -EAGAIN; | |
3471 | goto out; | |
3472 | } | |
3473 | if (pending_del_nr) { | |
3474 | ret = btrfs_del_items(trans, root, path, | |
3475 | pending_del_slot, | |
3476 | pending_del_nr); | |
3477 | BUG_ON(ret); | |
3478 | pending_del_nr = 0; | |
3479 | } | |
85e21bac CM |
3480 | btrfs_release_path(root, path); |
3481 | goto search_again; | |
8082510e YZ |
3482 | } else { |
3483 | path->slots[0]--; | |
85e21bac | 3484 | } |
39279cc3 | 3485 | } |
8082510e | 3486 | out: |
85e21bac CM |
3487 | if (pending_del_nr) { |
3488 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | |
3489 | pending_del_nr); | |
d68fc57b | 3490 | BUG_ON(ret); |
85e21bac | 3491 | } |
39279cc3 | 3492 | btrfs_free_path(path); |
8082510e | 3493 | return err; |
39279cc3 CM |
3494 | } |
3495 | ||
3496 | /* | |
3497 | * taken from block_truncate_page, but does cow as it zeros out | |
3498 | * any bytes left in the last page in the file. | |
3499 | */ | |
3500 | static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |
3501 | { | |
3502 | struct inode *inode = mapping->host; | |
db94535d | 3503 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
3504 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
3505 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 3506 | struct extent_state *cached_state = NULL; |
e6dcd2dc | 3507 | char *kaddr; |
db94535d | 3508 | u32 blocksize = root->sectorsize; |
39279cc3 CM |
3509 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
3510 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
3511 | struct page *page; | |
39279cc3 | 3512 | int ret = 0; |
a52d9a80 | 3513 | u64 page_start; |
e6dcd2dc | 3514 | u64 page_end; |
39279cc3 CM |
3515 | |
3516 | if ((offset & (blocksize - 1)) == 0) | |
3517 | goto out; | |
0ca1f7ce | 3518 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
5d5e103a JB |
3519 | if (ret) |
3520 | goto out; | |
39279cc3 CM |
3521 | |
3522 | ret = -ENOMEM; | |
211c17f5 | 3523 | again: |
39279cc3 | 3524 | page = grab_cache_page(mapping, index); |
5d5e103a | 3525 | if (!page) { |
0ca1f7ce | 3526 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
39279cc3 | 3527 | goto out; |
5d5e103a | 3528 | } |
e6dcd2dc CM |
3529 | |
3530 | page_start = page_offset(page); | |
3531 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
3532 | ||
39279cc3 | 3533 | if (!PageUptodate(page)) { |
9ebefb18 | 3534 | ret = btrfs_readpage(NULL, page); |
39279cc3 | 3535 | lock_page(page); |
211c17f5 CM |
3536 | if (page->mapping != mapping) { |
3537 | unlock_page(page); | |
3538 | page_cache_release(page); | |
3539 | goto again; | |
3540 | } | |
39279cc3 CM |
3541 | if (!PageUptodate(page)) { |
3542 | ret = -EIO; | |
89642229 | 3543 | goto out_unlock; |
39279cc3 CM |
3544 | } |
3545 | } | |
211c17f5 | 3546 | wait_on_page_writeback(page); |
e6dcd2dc | 3547 | |
2ac55d41 JB |
3548 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, |
3549 | GFP_NOFS); | |
e6dcd2dc CM |
3550 | set_page_extent_mapped(page); |
3551 | ||
3552 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
3553 | if (ordered) { | |
2ac55d41 JB |
3554 | unlock_extent_cached(io_tree, page_start, page_end, |
3555 | &cached_state, GFP_NOFS); | |
e6dcd2dc CM |
3556 | unlock_page(page); |
3557 | page_cache_release(page); | |
eb84ae03 | 3558 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
3559 | btrfs_put_ordered_extent(ordered); |
3560 | goto again; | |
3561 | } | |
3562 | ||
2ac55d41 | 3563 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
5d5e103a | 3564 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, |
2ac55d41 | 3565 | 0, 0, &cached_state, GFP_NOFS); |
5d5e103a | 3566 | |
2ac55d41 JB |
3567 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
3568 | &cached_state); | |
9ed74f2d | 3569 | if (ret) { |
2ac55d41 JB |
3570 | unlock_extent_cached(io_tree, page_start, page_end, |
3571 | &cached_state, GFP_NOFS); | |
9ed74f2d JB |
3572 | goto out_unlock; |
3573 | } | |
3574 | ||
e6dcd2dc CM |
3575 | ret = 0; |
3576 | if (offset != PAGE_CACHE_SIZE) { | |
3577 | kaddr = kmap(page); | |
3578 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | |
3579 | flush_dcache_page(page); | |
3580 | kunmap(page); | |
3581 | } | |
247e743c | 3582 | ClearPageChecked(page); |
e6dcd2dc | 3583 | set_page_dirty(page); |
2ac55d41 JB |
3584 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, |
3585 | GFP_NOFS); | |
39279cc3 | 3586 | |
89642229 | 3587 | out_unlock: |
5d5e103a | 3588 | if (ret) |
0ca1f7ce | 3589 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
39279cc3 CM |
3590 | unlock_page(page); |
3591 | page_cache_release(page); | |
3592 | out: | |
3593 | return ret; | |
3594 | } | |
3595 | ||
695a0d0d JB |
3596 | /* |
3597 | * This function puts in dummy file extents for the area we're creating a hole | |
3598 | * for. So if we are truncating this file to a larger size we need to insert | |
3599 | * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for | |
3600 | * the range between oldsize and size | |
3601 | */ | |
a41ad394 | 3602 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) |
39279cc3 | 3603 | { |
9036c102 YZ |
3604 | struct btrfs_trans_handle *trans; |
3605 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3606 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
a22285a6 | 3607 | struct extent_map *em = NULL; |
2ac55d41 | 3608 | struct extent_state *cached_state = NULL; |
9036c102 | 3609 | u64 mask = root->sectorsize - 1; |
a41ad394 | 3610 | u64 hole_start = (oldsize + mask) & ~mask; |
9036c102 YZ |
3611 | u64 block_end = (size + mask) & ~mask; |
3612 | u64 last_byte; | |
3613 | u64 cur_offset; | |
3614 | u64 hole_size; | |
9ed74f2d | 3615 | int err = 0; |
39279cc3 | 3616 | |
9036c102 YZ |
3617 | if (size <= hole_start) |
3618 | return 0; | |
3619 | ||
9036c102 YZ |
3620 | while (1) { |
3621 | struct btrfs_ordered_extent *ordered; | |
3622 | btrfs_wait_ordered_range(inode, hole_start, | |
3623 | block_end - hole_start); | |
2ac55d41 JB |
3624 | lock_extent_bits(io_tree, hole_start, block_end - 1, 0, |
3625 | &cached_state, GFP_NOFS); | |
9036c102 YZ |
3626 | ordered = btrfs_lookup_ordered_extent(inode, hole_start); |
3627 | if (!ordered) | |
3628 | break; | |
2ac55d41 JB |
3629 | unlock_extent_cached(io_tree, hole_start, block_end - 1, |
3630 | &cached_state, GFP_NOFS); | |
9036c102 YZ |
3631 | btrfs_put_ordered_extent(ordered); |
3632 | } | |
39279cc3 | 3633 | |
9036c102 YZ |
3634 | cur_offset = hole_start; |
3635 | while (1) { | |
3636 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
3637 | block_end - cur_offset, 0); | |
3638 | BUG_ON(IS_ERR(em) || !em); | |
3639 | last_byte = min(extent_map_end(em), block_end); | |
3640 | last_byte = (last_byte + mask) & ~mask; | |
8082510e | 3641 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
771ed689 | 3642 | u64 hint_byte = 0; |
9036c102 | 3643 | hole_size = last_byte - cur_offset; |
9ed74f2d | 3644 | |
a22285a6 YZ |
3645 | trans = btrfs_start_transaction(root, 2); |
3646 | if (IS_ERR(trans)) { | |
3647 | err = PTR_ERR(trans); | |
9ed74f2d | 3648 | break; |
a22285a6 | 3649 | } |
8082510e YZ |
3650 | btrfs_set_trans_block_group(trans, inode); |
3651 | ||
3652 | err = btrfs_drop_extents(trans, inode, cur_offset, | |
3653 | cur_offset + hole_size, | |
3654 | &hint_byte, 1); | |
3893e33b JB |
3655 | if (err) |
3656 | break; | |
8082510e | 3657 | |
9036c102 YZ |
3658 | err = btrfs_insert_file_extent(trans, root, |
3659 | inode->i_ino, cur_offset, 0, | |
3660 | 0, hole_size, 0, hole_size, | |
3661 | 0, 0, 0); | |
3893e33b JB |
3662 | if (err) |
3663 | break; | |
8082510e | 3664 | |
9036c102 YZ |
3665 | btrfs_drop_extent_cache(inode, hole_start, |
3666 | last_byte - 1, 0); | |
8082510e YZ |
3667 | |
3668 | btrfs_end_transaction(trans, root); | |
9036c102 YZ |
3669 | } |
3670 | free_extent_map(em); | |
a22285a6 | 3671 | em = NULL; |
9036c102 | 3672 | cur_offset = last_byte; |
8082510e | 3673 | if (cur_offset >= block_end) |
9036c102 YZ |
3674 | break; |
3675 | } | |
1832a6d5 | 3676 | |
a22285a6 | 3677 | free_extent_map(em); |
2ac55d41 JB |
3678 | unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, |
3679 | GFP_NOFS); | |
9036c102 YZ |
3680 | return err; |
3681 | } | |
39279cc3 | 3682 | |
a41ad394 | 3683 | static int btrfs_setsize(struct inode *inode, loff_t newsize) |
8082510e | 3684 | { |
a41ad394 | 3685 | loff_t oldsize = i_size_read(inode); |
8082510e YZ |
3686 | int ret; |
3687 | ||
a41ad394 | 3688 | if (newsize == oldsize) |
8082510e YZ |
3689 | return 0; |
3690 | ||
a41ad394 JB |
3691 | if (newsize > oldsize) { |
3692 | i_size_write(inode, newsize); | |
3693 | btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); | |
3694 | truncate_pagecache(inode, oldsize, newsize); | |
3695 | ret = btrfs_cont_expand(inode, oldsize, newsize); | |
8082510e | 3696 | if (ret) { |
a41ad394 | 3697 | btrfs_setsize(inode, oldsize); |
8082510e YZ |
3698 | return ret; |
3699 | } | |
3700 | ||
930f028a | 3701 | mark_inode_dirty(inode); |
a41ad394 | 3702 | } else { |
8082510e | 3703 | |
a41ad394 JB |
3704 | /* |
3705 | * We're truncating a file that used to have good data down to | |
3706 | * zero. Make sure it gets into the ordered flush list so that | |
3707 | * any new writes get down to disk quickly. | |
3708 | */ | |
3709 | if (newsize == 0) | |
3710 | BTRFS_I(inode)->ordered_data_close = 1; | |
8082510e | 3711 | |
a41ad394 JB |
3712 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ |
3713 | truncate_setsize(inode, newsize); | |
3714 | ret = btrfs_truncate(inode); | |
8082510e YZ |
3715 | } |
3716 | ||
a41ad394 | 3717 | return ret; |
8082510e YZ |
3718 | } |
3719 | ||
9036c102 YZ |
3720 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
3721 | { | |
3722 | struct inode *inode = dentry->d_inode; | |
b83cc969 | 3723 | struct btrfs_root *root = BTRFS_I(inode)->root; |
9036c102 | 3724 | int err; |
39279cc3 | 3725 | |
b83cc969 LZ |
3726 | if (btrfs_root_readonly(root)) |
3727 | return -EROFS; | |
3728 | ||
9036c102 YZ |
3729 | err = inode_change_ok(inode, attr); |
3730 | if (err) | |
3731 | return err; | |
2bf5a725 | 3732 | |
5a3f23d5 | 3733 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
a41ad394 | 3734 | err = btrfs_setsize(inode, attr->ia_size); |
8082510e YZ |
3735 | if (err) |
3736 | return err; | |
39279cc3 | 3737 | } |
9036c102 | 3738 | |
1025774c CH |
3739 | if (attr->ia_valid) { |
3740 | setattr_copy(inode, attr); | |
3741 | mark_inode_dirty(inode); | |
3742 | ||
3743 | if (attr->ia_valid & ATTR_MODE) | |
3744 | err = btrfs_acl_chmod(inode); | |
3745 | } | |
33268eaf | 3746 | |
39279cc3 CM |
3747 | return err; |
3748 | } | |
61295eb8 | 3749 | |
bd555975 | 3750 | void btrfs_evict_inode(struct inode *inode) |
39279cc3 CM |
3751 | { |
3752 | struct btrfs_trans_handle *trans; | |
3753 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d3c2fdcf | 3754 | unsigned long nr; |
39279cc3 CM |
3755 | int ret; |
3756 | ||
1abe9b8a | 3757 | trace_btrfs_inode_evict(inode); |
3758 | ||
39279cc3 | 3759 | truncate_inode_pages(&inode->i_data, 0); |
0af3d00b JB |
3760 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || |
3761 | root == root->fs_info->tree_root)) | |
bd555975 AV |
3762 | goto no_delete; |
3763 | ||
39279cc3 | 3764 | if (is_bad_inode(inode)) { |
7b128766 | 3765 | btrfs_orphan_del(NULL, inode); |
39279cc3 CM |
3766 | goto no_delete; |
3767 | } | |
bd555975 | 3768 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ |
4a096752 | 3769 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5f39d397 | 3770 | |
c71bf099 YZ |
3771 | if (root->fs_info->log_root_recovering) { |
3772 | BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan)); | |
3773 | goto no_delete; | |
3774 | } | |
3775 | ||
76dda93c YZ |
3776 | if (inode->i_nlink > 0) { |
3777 | BUG_ON(btrfs_root_refs(&root->root_item) != 0); | |
3778 | goto no_delete; | |
3779 | } | |
3780 | ||
dbe674a9 | 3781 | btrfs_i_size_write(inode, 0); |
5f39d397 | 3782 | |
8082510e | 3783 | while (1) { |
d68fc57b YZ |
3784 | trans = btrfs_start_transaction(root, 0); |
3785 | BUG_ON(IS_ERR(trans)); | |
8082510e | 3786 | btrfs_set_trans_block_group(trans, inode); |
d68fc57b YZ |
3787 | trans->block_rsv = root->orphan_block_rsv; |
3788 | ||
3789 | ret = btrfs_block_rsv_check(trans, root, | |
3790 | root->orphan_block_rsv, 0, 5); | |
3791 | if (ret) { | |
3792 | BUG_ON(ret != -EAGAIN); | |
3793 | ret = btrfs_commit_transaction(trans, root); | |
3794 | BUG_ON(ret); | |
3795 | continue; | |
3796 | } | |
7b128766 | 3797 | |
d68fc57b | 3798 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); |
8082510e YZ |
3799 | if (ret != -EAGAIN) |
3800 | break; | |
85e21bac | 3801 | |
8082510e YZ |
3802 | nr = trans->blocks_used; |
3803 | btrfs_end_transaction(trans, root); | |
3804 | trans = NULL; | |
3805 | btrfs_btree_balance_dirty(root, nr); | |
d68fc57b | 3806 | |
8082510e | 3807 | } |
5f39d397 | 3808 | |
8082510e YZ |
3809 | if (ret == 0) { |
3810 | ret = btrfs_orphan_del(trans, inode); | |
3811 | BUG_ON(ret); | |
3812 | } | |
54aa1f4d | 3813 | |
d3c2fdcf | 3814 | nr = trans->blocks_used; |
54aa1f4d | 3815 | btrfs_end_transaction(trans, root); |
d3c2fdcf | 3816 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 | 3817 | no_delete: |
bd555975 | 3818 | end_writeback(inode); |
8082510e | 3819 | return; |
39279cc3 CM |
3820 | } |
3821 | ||
3822 | /* | |
3823 | * this returns the key found in the dir entry in the location pointer. | |
3824 | * If no dir entries were found, location->objectid is 0. | |
3825 | */ | |
3826 | static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |
3827 | struct btrfs_key *location) | |
3828 | { | |
3829 | const char *name = dentry->d_name.name; | |
3830 | int namelen = dentry->d_name.len; | |
3831 | struct btrfs_dir_item *di; | |
3832 | struct btrfs_path *path; | |
3833 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
0d9f7f3e | 3834 | int ret = 0; |
39279cc3 CM |
3835 | |
3836 | path = btrfs_alloc_path(); | |
3837 | BUG_ON(!path); | |
3954401f | 3838 | |
39279cc3 CM |
3839 | di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, |
3840 | namelen, 0); | |
0d9f7f3e Y |
3841 | if (IS_ERR(di)) |
3842 | ret = PTR_ERR(di); | |
d397712b CM |
3843 | |
3844 | if (!di || IS_ERR(di)) | |
3954401f | 3845 | goto out_err; |
d397712b | 3846 | |
5f39d397 | 3847 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
39279cc3 | 3848 | out: |
39279cc3 CM |
3849 | btrfs_free_path(path); |
3850 | return ret; | |
3954401f CM |
3851 | out_err: |
3852 | location->objectid = 0; | |
3853 | goto out; | |
39279cc3 CM |
3854 | } |
3855 | ||
3856 | /* | |
3857 | * when we hit a tree root in a directory, the btrfs part of the inode | |
3858 | * needs to be changed to reflect the root directory of the tree root. This | |
3859 | * is kind of like crossing a mount point. | |
3860 | */ | |
3861 | static int fixup_tree_root_location(struct btrfs_root *root, | |
4df27c4d YZ |
3862 | struct inode *dir, |
3863 | struct dentry *dentry, | |
3864 | struct btrfs_key *location, | |
3865 | struct btrfs_root **sub_root) | |
39279cc3 | 3866 | { |
4df27c4d YZ |
3867 | struct btrfs_path *path; |
3868 | struct btrfs_root *new_root; | |
3869 | struct btrfs_root_ref *ref; | |
3870 | struct extent_buffer *leaf; | |
3871 | int ret; | |
3872 | int err = 0; | |
39279cc3 | 3873 | |
4df27c4d YZ |
3874 | path = btrfs_alloc_path(); |
3875 | if (!path) { | |
3876 | err = -ENOMEM; | |
3877 | goto out; | |
3878 | } | |
39279cc3 | 3879 | |
4df27c4d YZ |
3880 | err = -ENOENT; |
3881 | ret = btrfs_find_root_ref(root->fs_info->tree_root, path, | |
3882 | BTRFS_I(dir)->root->root_key.objectid, | |
3883 | location->objectid); | |
3884 | if (ret) { | |
3885 | if (ret < 0) | |
3886 | err = ret; | |
3887 | goto out; | |
3888 | } | |
39279cc3 | 3889 | |
4df27c4d YZ |
3890 | leaf = path->nodes[0]; |
3891 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | |
3892 | if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || | |
3893 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) | |
3894 | goto out; | |
39279cc3 | 3895 | |
4df27c4d YZ |
3896 | ret = memcmp_extent_buffer(leaf, dentry->d_name.name, |
3897 | (unsigned long)(ref + 1), | |
3898 | dentry->d_name.len); | |
3899 | if (ret) | |
3900 | goto out; | |
3901 | ||
3902 | btrfs_release_path(root->fs_info->tree_root, path); | |
3903 | ||
3904 | new_root = btrfs_read_fs_root_no_name(root->fs_info, location); | |
3905 | if (IS_ERR(new_root)) { | |
3906 | err = PTR_ERR(new_root); | |
3907 | goto out; | |
3908 | } | |
3909 | ||
3910 | if (btrfs_root_refs(&new_root->root_item) == 0) { | |
3911 | err = -ENOENT; | |
3912 | goto out; | |
3913 | } | |
3914 | ||
3915 | *sub_root = new_root; | |
3916 | location->objectid = btrfs_root_dirid(&new_root->root_item); | |
3917 | location->type = BTRFS_INODE_ITEM_KEY; | |
3918 | location->offset = 0; | |
3919 | err = 0; | |
3920 | out: | |
3921 | btrfs_free_path(path); | |
3922 | return err; | |
39279cc3 CM |
3923 | } |
3924 | ||
5d4f98a2 YZ |
3925 | static void inode_tree_add(struct inode *inode) |
3926 | { | |
3927 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3928 | struct btrfs_inode *entry; | |
03e860bd NP |
3929 | struct rb_node **p; |
3930 | struct rb_node *parent; | |
03e860bd NP |
3931 | again: |
3932 | p = &root->inode_tree.rb_node; | |
3933 | parent = NULL; | |
5d4f98a2 | 3934 | |
1d3382cb | 3935 | if (inode_unhashed(inode)) |
76dda93c YZ |
3936 | return; |
3937 | ||
5d4f98a2 YZ |
3938 | spin_lock(&root->inode_lock); |
3939 | while (*p) { | |
3940 | parent = *p; | |
3941 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | |
3942 | ||
3943 | if (inode->i_ino < entry->vfs_inode.i_ino) | |
03e860bd | 3944 | p = &parent->rb_left; |
5d4f98a2 | 3945 | else if (inode->i_ino > entry->vfs_inode.i_ino) |
03e860bd | 3946 | p = &parent->rb_right; |
5d4f98a2 YZ |
3947 | else { |
3948 | WARN_ON(!(entry->vfs_inode.i_state & | |
a4ffdde6 | 3949 | (I_WILL_FREE | I_FREEING))); |
03e860bd NP |
3950 | rb_erase(parent, &root->inode_tree); |
3951 | RB_CLEAR_NODE(parent); | |
3952 | spin_unlock(&root->inode_lock); | |
3953 | goto again; | |
5d4f98a2 YZ |
3954 | } |
3955 | } | |
3956 | rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); | |
3957 | rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); | |
3958 | spin_unlock(&root->inode_lock); | |
3959 | } | |
3960 | ||
3961 | static void inode_tree_del(struct inode *inode) | |
3962 | { | |
3963 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
76dda93c | 3964 | int empty = 0; |
5d4f98a2 | 3965 | |
03e860bd | 3966 | spin_lock(&root->inode_lock); |
5d4f98a2 | 3967 | if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { |
5d4f98a2 | 3968 | rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); |
5d4f98a2 | 3969 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
76dda93c | 3970 | empty = RB_EMPTY_ROOT(&root->inode_tree); |
5d4f98a2 | 3971 | } |
03e860bd | 3972 | spin_unlock(&root->inode_lock); |
76dda93c | 3973 | |
0af3d00b JB |
3974 | /* |
3975 | * Free space cache has inodes in the tree root, but the tree root has a | |
3976 | * root_refs of 0, so this could end up dropping the tree root as a | |
3977 | * snapshot, so we need the extra !root->fs_info->tree_root check to | |
3978 | * make sure we don't drop it. | |
3979 | */ | |
3980 | if (empty && btrfs_root_refs(&root->root_item) == 0 && | |
3981 | root != root->fs_info->tree_root) { | |
76dda93c YZ |
3982 | synchronize_srcu(&root->fs_info->subvol_srcu); |
3983 | spin_lock(&root->inode_lock); | |
3984 | empty = RB_EMPTY_ROOT(&root->inode_tree); | |
3985 | spin_unlock(&root->inode_lock); | |
3986 | if (empty) | |
3987 | btrfs_add_dead_root(root); | |
3988 | } | |
3989 | } | |
3990 | ||
3991 | int btrfs_invalidate_inodes(struct btrfs_root *root) | |
3992 | { | |
3993 | struct rb_node *node; | |
3994 | struct rb_node *prev; | |
3995 | struct btrfs_inode *entry; | |
3996 | struct inode *inode; | |
3997 | u64 objectid = 0; | |
3998 | ||
3999 | WARN_ON(btrfs_root_refs(&root->root_item) != 0); | |
4000 | ||
4001 | spin_lock(&root->inode_lock); | |
4002 | again: | |
4003 | node = root->inode_tree.rb_node; | |
4004 | prev = NULL; | |
4005 | while (node) { | |
4006 | prev = node; | |
4007 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4008 | ||
4009 | if (objectid < entry->vfs_inode.i_ino) | |
4010 | node = node->rb_left; | |
4011 | else if (objectid > entry->vfs_inode.i_ino) | |
4012 | node = node->rb_right; | |
4013 | else | |
4014 | break; | |
4015 | } | |
4016 | if (!node) { | |
4017 | while (prev) { | |
4018 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | |
4019 | if (objectid <= entry->vfs_inode.i_ino) { | |
4020 | node = prev; | |
4021 | break; | |
4022 | } | |
4023 | prev = rb_next(prev); | |
4024 | } | |
4025 | } | |
4026 | while (node) { | |
4027 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4028 | objectid = entry->vfs_inode.i_ino + 1; | |
4029 | inode = igrab(&entry->vfs_inode); | |
4030 | if (inode) { | |
4031 | spin_unlock(&root->inode_lock); | |
4032 | if (atomic_read(&inode->i_count) > 1) | |
4033 | d_prune_aliases(inode); | |
4034 | /* | |
45321ac5 | 4035 | * btrfs_drop_inode will have it removed from |
76dda93c YZ |
4036 | * the inode cache when its usage count |
4037 | * hits zero. | |
4038 | */ | |
4039 | iput(inode); | |
4040 | cond_resched(); | |
4041 | spin_lock(&root->inode_lock); | |
4042 | goto again; | |
4043 | } | |
4044 | ||
4045 | if (cond_resched_lock(&root->inode_lock)) | |
4046 | goto again; | |
4047 | ||
4048 | node = rb_next(node); | |
4049 | } | |
4050 | spin_unlock(&root->inode_lock); | |
4051 | return 0; | |
5d4f98a2 YZ |
4052 | } |
4053 | ||
e02119d5 CM |
4054 | static int btrfs_init_locked_inode(struct inode *inode, void *p) |
4055 | { | |
4056 | struct btrfs_iget_args *args = p; | |
4057 | inode->i_ino = args->ino; | |
e02119d5 | 4058 | BTRFS_I(inode)->root = args->root; |
6a63209f | 4059 | btrfs_set_inode_space_info(args->root, inode); |
39279cc3 CM |
4060 | return 0; |
4061 | } | |
4062 | ||
4063 | static int btrfs_find_actor(struct inode *inode, void *opaque) | |
4064 | { | |
4065 | struct btrfs_iget_args *args = opaque; | |
d397712b CM |
4066 | return args->ino == inode->i_ino && |
4067 | args->root == BTRFS_I(inode)->root; | |
39279cc3 CM |
4068 | } |
4069 | ||
5d4f98a2 YZ |
4070 | static struct inode *btrfs_iget_locked(struct super_block *s, |
4071 | u64 objectid, | |
4072 | struct btrfs_root *root) | |
39279cc3 CM |
4073 | { |
4074 | struct inode *inode; | |
4075 | struct btrfs_iget_args args; | |
4076 | args.ino = objectid; | |
4077 | args.root = root; | |
4078 | ||
4079 | inode = iget5_locked(s, objectid, btrfs_find_actor, | |
4080 | btrfs_init_locked_inode, | |
4081 | (void *)&args); | |
4082 | return inode; | |
4083 | } | |
4084 | ||
1a54ef8c BR |
4085 | /* Get an inode object given its location and corresponding root. |
4086 | * Returns in *is_new if the inode was read from disk | |
4087 | */ | |
4088 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |
73f73415 | 4089 | struct btrfs_root *root, int *new) |
1a54ef8c BR |
4090 | { |
4091 | struct inode *inode; | |
4092 | ||
4093 | inode = btrfs_iget_locked(s, location->objectid, root); | |
4094 | if (!inode) | |
5d4f98a2 | 4095 | return ERR_PTR(-ENOMEM); |
1a54ef8c BR |
4096 | |
4097 | if (inode->i_state & I_NEW) { | |
4098 | BTRFS_I(inode)->root = root; | |
4099 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); | |
4100 | btrfs_read_locked_inode(inode); | |
5d4f98a2 | 4101 | inode_tree_add(inode); |
1a54ef8c | 4102 | unlock_new_inode(inode); |
73f73415 JB |
4103 | if (new) |
4104 | *new = 1; | |
1a54ef8c BR |
4105 | } |
4106 | ||
4107 | return inode; | |
4108 | } | |
4109 | ||
4df27c4d YZ |
4110 | static struct inode *new_simple_dir(struct super_block *s, |
4111 | struct btrfs_key *key, | |
4112 | struct btrfs_root *root) | |
4113 | { | |
4114 | struct inode *inode = new_inode(s); | |
4115 | ||
4116 | if (!inode) | |
4117 | return ERR_PTR(-ENOMEM); | |
4118 | ||
4df27c4d YZ |
4119 | BTRFS_I(inode)->root = root; |
4120 | memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); | |
4121 | BTRFS_I(inode)->dummy_inode = 1; | |
4122 | ||
4123 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | |
4124 | inode->i_op = &simple_dir_inode_operations; | |
4125 | inode->i_fop = &simple_dir_operations; | |
4126 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | |
4127 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | |
4128 | ||
4129 | return inode; | |
4130 | } | |
4131 | ||
3de4586c | 4132 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) |
39279cc3 | 4133 | { |
d397712b | 4134 | struct inode *inode; |
4df27c4d | 4135 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 CM |
4136 | struct btrfs_root *sub_root = root; |
4137 | struct btrfs_key location; | |
76dda93c | 4138 | int index; |
5d4f98a2 | 4139 | int ret; |
39279cc3 CM |
4140 | |
4141 | if (dentry->d_name.len > BTRFS_NAME_LEN) | |
4142 | return ERR_PTR(-ENAMETOOLONG); | |
5f39d397 | 4143 | |
39279cc3 | 4144 | ret = btrfs_inode_by_name(dir, dentry, &location); |
5f39d397 | 4145 | |
39279cc3 CM |
4146 | if (ret < 0) |
4147 | return ERR_PTR(ret); | |
5f39d397 | 4148 | |
4df27c4d YZ |
4149 | if (location.objectid == 0) |
4150 | return NULL; | |
4151 | ||
4152 | if (location.type == BTRFS_INODE_ITEM_KEY) { | |
73f73415 | 4153 | inode = btrfs_iget(dir->i_sb, &location, root, NULL); |
4df27c4d YZ |
4154 | return inode; |
4155 | } | |
4156 | ||
4157 | BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); | |
4158 | ||
76dda93c | 4159 | index = srcu_read_lock(&root->fs_info->subvol_srcu); |
4df27c4d YZ |
4160 | ret = fixup_tree_root_location(root, dir, dentry, |
4161 | &location, &sub_root); | |
4162 | if (ret < 0) { | |
4163 | if (ret != -ENOENT) | |
4164 | inode = ERR_PTR(ret); | |
4165 | else | |
4166 | inode = new_simple_dir(dir->i_sb, &location, sub_root); | |
4167 | } else { | |
73f73415 | 4168 | inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); |
39279cc3 | 4169 | } |
76dda93c YZ |
4170 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
4171 | ||
34d19bad | 4172 | if (!IS_ERR(inode) && root != sub_root) { |
c71bf099 YZ |
4173 | down_read(&root->fs_info->cleanup_work_sem); |
4174 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | |
66b4ffd1 | 4175 | ret = btrfs_orphan_cleanup(sub_root); |
c71bf099 | 4176 | up_read(&root->fs_info->cleanup_work_sem); |
66b4ffd1 JB |
4177 | if (ret) |
4178 | inode = ERR_PTR(ret); | |
c71bf099 YZ |
4179 | } |
4180 | ||
3de4586c CM |
4181 | return inode; |
4182 | } | |
4183 | ||
fe15ce44 | 4184 | static int btrfs_dentry_delete(const struct dentry *dentry) |
76dda93c YZ |
4185 | { |
4186 | struct btrfs_root *root; | |
4187 | ||
efefb143 YZ |
4188 | if (!dentry->d_inode && !IS_ROOT(dentry)) |
4189 | dentry = dentry->d_parent; | |
76dda93c | 4190 | |
efefb143 YZ |
4191 | if (dentry->d_inode) { |
4192 | root = BTRFS_I(dentry->d_inode)->root; | |
4193 | if (btrfs_root_refs(&root->root_item) == 0) | |
4194 | return 1; | |
4195 | } | |
76dda93c YZ |
4196 | return 0; |
4197 | } | |
4198 | ||
3de4586c CM |
4199 | static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, |
4200 | struct nameidata *nd) | |
4201 | { | |
4202 | struct inode *inode; | |
4203 | ||
3de4586c CM |
4204 | inode = btrfs_lookup_dentry(dir, dentry); |
4205 | if (IS_ERR(inode)) | |
4206 | return ERR_CAST(inode); | |
7b128766 | 4207 | |
39279cc3 CM |
4208 | return d_splice_alias(inode, dentry); |
4209 | } | |
4210 | ||
39279cc3 CM |
4211 | static unsigned char btrfs_filetype_table[] = { |
4212 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK | |
4213 | }; | |
4214 | ||
cbdf5a24 DW |
4215 | static int btrfs_real_readdir(struct file *filp, void *dirent, |
4216 | filldir_t filldir) | |
39279cc3 | 4217 | { |
6da6abae | 4218 | struct inode *inode = filp->f_dentry->d_inode; |
39279cc3 CM |
4219 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4220 | struct btrfs_item *item; | |
4221 | struct btrfs_dir_item *di; | |
4222 | struct btrfs_key key; | |
5f39d397 | 4223 | struct btrfs_key found_key; |
39279cc3 CM |
4224 | struct btrfs_path *path; |
4225 | int ret; | |
5f39d397 | 4226 | struct extent_buffer *leaf; |
39279cc3 | 4227 | int slot; |
39279cc3 CM |
4228 | unsigned char d_type; |
4229 | int over = 0; | |
4230 | u32 di_cur; | |
4231 | u32 di_total; | |
4232 | u32 di_len; | |
4233 | int key_type = BTRFS_DIR_INDEX_KEY; | |
5f39d397 CM |
4234 | char tmp_name[32]; |
4235 | char *name_ptr; | |
4236 | int name_len; | |
39279cc3 CM |
4237 | |
4238 | /* FIXME, use a real flag for deciding about the key type */ | |
4239 | if (root->fs_info->tree_root == root) | |
4240 | key_type = BTRFS_DIR_ITEM_KEY; | |
5f39d397 | 4241 | |
3954401f CM |
4242 | /* special case for "." */ |
4243 | if (filp->f_pos == 0) { | |
4244 | over = filldir(dirent, ".", 1, | |
4245 | 1, inode->i_ino, | |
4246 | DT_DIR); | |
4247 | if (over) | |
4248 | return 0; | |
4249 | filp->f_pos = 1; | |
4250 | } | |
3954401f CM |
4251 | /* special case for .., just use the back ref */ |
4252 | if (filp->f_pos == 1) { | |
5ecc7e5d | 4253 | u64 pino = parent_ino(filp->f_path.dentry); |
3954401f | 4254 | over = filldir(dirent, "..", 2, |
5ecc7e5d | 4255 | 2, pino, DT_DIR); |
3954401f | 4256 | if (over) |
49593bfa | 4257 | return 0; |
3954401f CM |
4258 | filp->f_pos = 2; |
4259 | } | |
49593bfa DW |
4260 | path = btrfs_alloc_path(); |
4261 | path->reada = 2; | |
4262 | ||
39279cc3 CM |
4263 | btrfs_set_key_type(&key, key_type); |
4264 | key.offset = filp->f_pos; | |
49593bfa | 4265 | key.objectid = inode->i_ino; |
5f39d397 | 4266 | |
39279cc3 CM |
4267 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
4268 | if (ret < 0) | |
4269 | goto err; | |
49593bfa DW |
4270 | |
4271 | while (1) { | |
5f39d397 | 4272 | leaf = path->nodes[0]; |
39279cc3 | 4273 | slot = path->slots[0]; |
b9e03af0 LZ |
4274 | if (slot >= btrfs_header_nritems(leaf)) { |
4275 | ret = btrfs_next_leaf(root, path); | |
4276 | if (ret < 0) | |
4277 | goto err; | |
4278 | else if (ret > 0) | |
4279 | break; | |
4280 | continue; | |
39279cc3 | 4281 | } |
3de4586c | 4282 | |
5f39d397 CM |
4283 | item = btrfs_item_nr(leaf, slot); |
4284 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
4285 | ||
4286 | if (found_key.objectid != key.objectid) | |
39279cc3 | 4287 | break; |
5f39d397 | 4288 | if (btrfs_key_type(&found_key) != key_type) |
39279cc3 | 4289 | break; |
5f39d397 | 4290 | if (found_key.offset < filp->f_pos) |
b9e03af0 | 4291 | goto next; |
5f39d397 CM |
4292 | |
4293 | filp->f_pos = found_key.offset; | |
49593bfa | 4294 | |
39279cc3 CM |
4295 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
4296 | di_cur = 0; | |
5f39d397 | 4297 | di_total = btrfs_item_size(leaf, item); |
49593bfa DW |
4298 | |
4299 | while (di_cur < di_total) { | |
5f39d397 CM |
4300 | struct btrfs_key location; |
4301 | ||
22a94d44 JB |
4302 | if (verify_dir_item(root, leaf, di)) |
4303 | break; | |
4304 | ||
5f39d397 | 4305 | name_len = btrfs_dir_name_len(leaf, di); |
49593bfa | 4306 | if (name_len <= sizeof(tmp_name)) { |
5f39d397 CM |
4307 | name_ptr = tmp_name; |
4308 | } else { | |
4309 | name_ptr = kmalloc(name_len, GFP_NOFS); | |
49593bfa DW |
4310 | if (!name_ptr) { |
4311 | ret = -ENOMEM; | |
4312 | goto err; | |
4313 | } | |
5f39d397 CM |
4314 | } |
4315 | read_extent_buffer(leaf, name_ptr, | |
4316 | (unsigned long)(di + 1), name_len); | |
4317 | ||
4318 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | |
4319 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
3de4586c CM |
4320 | |
4321 | /* is this a reference to our own snapshot? If so | |
4322 | * skip it | |
4323 | */ | |
4324 | if (location.type == BTRFS_ROOT_ITEM_KEY && | |
4325 | location.objectid == root->root_key.objectid) { | |
4326 | over = 0; | |
4327 | goto skip; | |
4328 | } | |
5f39d397 | 4329 | over = filldir(dirent, name_ptr, name_len, |
49593bfa | 4330 | found_key.offset, location.objectid, |
39279cc3 | 4331 | d_type); |
5f39d397 | 4332 | |
3de4586c | 4333 | skip: |
5f39d397 CM |
4334 | if (name_ptr != tmp_name) |
4335 | kfree(name_ptr); | |
4336 | ||
39279cc3 CM |
4337 | if (over) |
4338 | goto nopos; | |
5103e947 | 4339 | di_len = btrfs_dir_name_len(leaf, di) + |
49593bfa | 4340 | btrfs_dir_data_len(leaf, di) + sizeof(*di); |
39279cc3 CM |
4341 | di_cur += di_len; |
4342 | di = (struct btrfs_dir_item *)((char *)di + di_len); | |
4343 | } | |
b9e03af0 LZ |
4344 | next: |
4345 | path->slots[0]++; | |
39279cc3 | 4346 | } |
49593bfa DW |
4347 | |
4348 | /* Reached end of directory/root. Bump pos past the last item. */ | |
5e591a07 | 4349 | if (key_type == BTRFS_DIR_INDEX_KEY) |
406266ab JE |
4350 | /* |
4351 | * 32-bit glibc will use getdents64, but then strtol - | |
4352 | * so the last number we can serve is this. | |
4353 | */ | |
4354 | filp->f_pos = 0x7fffffff; | |
5e591a07 YZ |
4355 | else |
4356 | filp->f_pos++; | |
39279cc3 CM |
4357 | nopos: |
4358 | ret = 0; | |
4359 | err: | |
39279cc3 | 4360 | btrfs_free_path(path); |
39279cc3 CM |
4361 | return ret; |
4362 | } | |
4363 | ||
a9185b41 | 4364 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
39279cc3 CM |
4365 | { |
4366 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4367 | struct btrfs_trans_handle *trans; | |
4368 | int ret = 0; | |
0af3d00b | 4369 | bool nolock = false; |
39279cc3 | 4370 | |
8929ecfa | 4371 | if (BTRFS_I(inode)->dummy_inode) |
4ca8b41e CM |
4372 | return 0; |
4373 | ||
0af3d00b JB |
4374 | smp_mb(); |
4375 | nolock = (root->fs_info->closing && root == root->fs_info->tree_root); | |
4376 | ||
a9185b41 | 4377 | if (wbc->sync_mode == WB_SYNC_ALL) { |
0af3d00b JB |
4378 | if (nolock) |
4379 | trans = btrfs_join_transaction_nolock(root, 1); | |
4380 | else | |
4381 | trans = btrfs_join_transaction(root, 1); | |
3612b495 TI |
4382 | if (IS_ERR(trans)) |
4383 | return PTR_ERR(trans); | |
39279cc3 | 4384 | btrfs_set_trans_block_group(trans, inode); |
0af3d00b JB |
4385 | if (nolock) |
4386 | ret = btrfs_end_transaction_nolock(trans, root); | |
4387 | else | |
4388 | ret = btrfs_commit_transaction(trans, root); | |
39279cc3 CM |
4389 | } |
4390 | return ret; | |
4391 | } | |
4392 | ||
4393 | /* | |
54aa1f4d | 4394 | * This is somewhat expensive, updating the tree every time the |
39279cc3 CM |
4395 | * inode changes. But, it is most likely to find the inode in cache. |
4396 | * FIXME, needs more benchmarking...there are no reasons other than performance | |
4397 | * to keep or drop this code. | |
4398 | */ | |
4399 | void btrfs_dirty_inode(struct inode *inode) | |
4400 | { | |
4401 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4402 | struct btrfs_trans_handle *trans; | |
8929ecfa YZ |
4403 | int ret; |
4404 | ||
4405 | if (BTRFS_I(inode)->dummy_inode) | |
4406 | return; | |
39279cc3 | 4407 | |
f9295749 | 4408 | trans = btrfs_join_transaction(root, 1); |
3612b495 | 4409 | BUG_ON(IS_ERR(trans)); |
39279cc3 | 4410 | btrfs_set_trans_block_group(trans, inode); |
8929ecfa YZ |
4411 | |
4412 | ret = btrfs_update_inode(trans, root, inode); | |
94b60442 CM |
4413 | if (ret && ret == -ENOSPC) { |
4414 | /* whoops, lets try again with the full transaction */ | |
4415 | btrfs_end_transaction(trans, root); | |
4416 | trans = btrfs_start_transaction(root, 1); | |
9aeead73 CM |
4417 | if (IS_ERR(trans)) { |
4418 | if (printk_ratelimit()) { | |
4419 | printk(KERN_ERR "btrfs: fail to " | |
4420 | "dirty inode %lu error %ld\n", | |
4421 | inode->i_ino, PTR_ERR(trans)); | |
4422 | } | |
4423 | return; | |
4424 | } | |
94b60442 | 4425 | btrfs_set_trans_block_group(trans, inode); |
8929ecfa | 4426 | |
94b60442 CM |
4427 | ret = btrfs_update_inode(trans, root, inode); |
4428 | if (ret) { | |
9aeead73 CM |
4429 | if (printk_ratelimit()) { |
4430 | printk(KERN_ERR "btrfs: fail to " | |
4431 | "dirty inode %lu error %d\n", | |
4432 | inode->i_ino, ret); | |
4433 | } | |
94b60442 CM |
4434 | } |
4435 | } | |
39279cc3 | 4436 | btrfs_end_transaction(trans, root); |
39279cc3 CM |
4437 | } |
4438 | ||
d352ac68 CM |
4439 | /* |
4440 | * find the highest existing sequence number in a directory | |
4441 | * and then set the in-memory index_cnt variable to reflect | |
4442 | * free sequence numbers | |
4443 | */ | |
aec7477b JB |
4444 | static int btrfs_set_inode_index_count(struct inode *inode) |
4445 | { | |
4446 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4447 | struct btrfs_key key, found_key; | |
4448 | struct btrfs_path *path; | |
4449 | struct extent_buffer *leaf; | |
4450 | int ret; | |
4451 | ||
4452 | key.objectid = inode->i_ino; | |
4453 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); | |
4454 | key.offset = (u64)-1; | |
4455 | ||
4456 | path = btrfs_alloc_path(); | |
4457 | if (!path) | |
4458 | return -ENOMEM; | |
4459 | ||
4460 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
4461 | if (ret < 0) | |
4462 | goto out; | |
4463 | /* FIXME: we should be able to handle this */ | |
4464 | if (ret == 0) | |
4465 | goto out; | |
4466 | ret = 0; | |
4467 | ||
4468 | /* | |
4469 | * MAGIC NUMBER EXPLANATION: | |
4470 | * since we search a directory based on f_pos we have to start at 2 | |
4471 | * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody | |
4472 | * else has to start at 2 | |
4473 | */ | |
4474 | if (path->slots[0] == 0) { | |
4475 | BTRFS_I(inode)->index_cnt = 2; | |
4476 | goto out; | |
4477 | } | |
4478 | ||
4479 | path->slots[0]--; | |
4480 | ||
4481 | leaf = path->nodes[0]; | |
4482 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
4483 | ||
4484 | if (found_key.objectid != inode->i_ino || | |
4485 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { | |
4486 | BTRFS_I(inode)->index_cnt = 2; | |
4487 | goto out; | |
4488 | } | |
4489 | ||
4490 | BTRFS_I(inode)->index_cnt = found_key.offset + 1; | |
4491 | out: | |
4492 | btrfs_free_path(path); | |
4493 | return ret; | |
4494 | } | |
4495 | ||
d352ac68 CM |
4496 | /* |
4497 | * helper to find a free sequence number in a given directory. This current | |
4498 | * code is very simple, later versions will do smarter things in the btree | |
4499 | */ | |
3de4586c | 4500 | int btrfs_set_inode_index(struct inode *dir, u64 *index) |
aec7477b JB |
4501 | { |
4502 | int ret = 0; | |
4503 | ||
4504 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { | |
4505 | ret = btrfs_set_inode_index_count(dir); | |
d397712b | 4506 | if (ret) |
aec7477b JB |
4507 | return ret; |
4508 | } | |
4509 | ||
00e4e6b3 | 4510 | *index = BTRFS_I(dir)->index_cnt; |
aec7477b JB |
4511 | BTRFS_I(dir)->index_cnt++; |
4512 | ||
4513 | return ret; | |
4514 | } | |
4515 | ||
39279cc3 CM |
4516 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, |
4517 | struct btrfs_root *root, | |
aec7477b | 4518 | struct inode *dir, |
9c58309d | 4519 | const char *name, int name_len, |
d2fb3437 YZ |
4520 | u64 ref_objectid, u64 objectid, |
4521 | u64 alloc_hint, int mode, u64 *index) | |
39279cc3 CM |
4522 | { |
4523 | struct inode *inode; | |
5f39d397 | 4524 | struct btrfs_inode_item *inode_item; |
39279cc3 | 4525 | struct btrfs_key *location; |
5f39d397 | 4526 | struct btrfs_path *path; |
9c58309d CM |
4527 | struct btrfs_inode_ref *ref; |
4528 | struct btrfs_key key[2]; | |
4529 | u32 sizes[2]; | |
4530 | unsigned long ptr; | |
39279cc3 CM |
4531 | int ret; |
4532 | int owner; | |
4533 | ||
5f39d397 CM |
4534 | path = btrfs_alloc_path(); |
4535 | BUG_ON(!path); | |
4536 | ||
39279cc3 | 4537 | inode = new_inode(root->fs_info->sb); |
8fb27640 YS |
4538 | if (!inode) { |
4539 | btrfs_free_path(path); | |
39279cc3 | 4540 | return ERR_PTR(-ENOMEM); |
8fb27640 | 4541 | } |
39279cc3 | 4542 | |
aec7477b | 4543 | if (dir) { |
1abe9b8a | 4544 | trace_btrfs_inode_request(dir); |
4545 | ||
3de4586c | 4546 | ret = btrfs_set_inode_index(dir, index); |
09771430 | 4547 | if (ret) { |
8fb27640 | 4548 | btrfs_free_path(path); |
09771430 | 4549 | iput(inode); |
aec7477b | 4550 | return ERR_PTR(ret); |
09771430 | 4551 | } |
aec7477b JB |
4552 | } |
4553 | /* | |
4554 | * index_cnt is ignored for everything but a dir, | |
4555 | * btrfs_get_inode_index_count has an explanation for the magic | |
4556 | * number | |
4557 | */ | |
4558 | BTRFS_I(inode)->index_cnt = 2; | |
39279cc3 | 4559 | BTRFS_I(inode)->root = root; |
e02119d5 | 4560 | BTRFS_I(inode)->generation = trans->transid; |
76195853 | 4561 | inode->i_generation = BTRFS_I(inode)->generation; |
6a63209f | 4562 | btrfs_set_inode_space_info(root, inode); |
b888db2b | 4563 | |
39279cc3 CM |
4564 | if (mode & S_IFDIR) |
4565 | owner = 0; | |
4566 | else | |
4567 | owner = 1; | |
d2fb3437 YZ |
4568 | BTRFS_I(inode)->block_group = |
4569 | btrfs_find_block_group(root, 0, alloc_hint, owner); | |
9c58309d CM |
4570 | |
4571 | key[0].objectid = objectid; | |
4572 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | |
4573 | key[0].offset = 0; | |
4574 | ||
4575 | key[1].objectid = objectid; | |
4576 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); | |
4577 | key[1].offset = ref_objectid; | |
4578 | ||
4579 | sizes[0] = sizeof(struct btrfs_inode_item); | |
4580 | sizes[1] = name_len + sizeof(*ref); | |
4581 | ||
b9473439 | 4582 | path->leave_spinning = 1; |
9c58309d CM |
4583 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); |
4584 | if (ret != 0) | |
5f39d397 CM |
4585 | goto fail; |
4586 | ||
ecc11fab | 4587 | inode_init_owner(inode, dir, mode); |
39279cc3 | 4588 | inode->i_ino = objectid; |
a76a3cd4 | 4589 | inode_set_bytes(inode, 0); |
39279cc3 | 4590 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
5f39d397 CM |
4591 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
4592 | struct btrfs_inode_item); | |
e02119d5 | 4593 | fill_inode_item(trans, path->nodes[0], inode_item, inode); |
9c58309d CM |
4594 | |
4595 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, | |
4596 | struct btrfs_inode_ref); | |
4597 | btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); | |
00e4e6b3 | 4598 | btrfs_set_inode_ref_index(path->nodes[0], ref, *index); |
9c58309d CM |
4599 | ptr = (unsigned long)(ref + 1); |
4600 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | |
4601 | ||
5f39d397 CM |
4602 | btrfs_mark_buffer_dirty(path->nodes[0]); |
4603 | btrfs_free_path(path); | |
4604 | ||
39279cc3 CM |
4605 | location = &BTRFS_I(inode)->location; |
4606 | location->objectid = objectid; | |
39279cc3 CM |
4607 | location->offset = 0; |
4608 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | |
4609 | ||
6cbff00f CH |
4610 | btrfs_inherit_iflags(inode, dir); |
4611 | ||
94272164 CM |
4612 | if ((mode & S_IFREG)) { |
4613 | if (btrfs_test_opt(root, NODATASUM)) | |
4614 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | |
75e7cb7f LB |
4615 | if (btrfs_test_opt(root, NODATACOW) || |
4616 | (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW)) | |
94272164 CM |
4617 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; |
4618 | } | |
4619 | ||
39279cc3 | 4620 | insert_inode_hash(inode); |
5d4f98a2 | 4621 | inode_tree_add(inode); |
1abe9b8a | 4622 | |
4623 | trace_btrfs_inode_new(inode); | |
4624 | ||
39279cc3 | 4625 | return inode; |
5f39d397 | 4626 | fail: |
aec7477b JB |
4627 | if (dir) |
4628 | BTRFS_I(dir)->index_cnt--; | |
5f39d397 | 4629 | btrfs_free_path(path); |
09771430 | 4630 | iput(inode); |
5f39d397 | 4631 | return ERR_PTR(ret); |
39279cc3 CM |
4632 | } |
4633 | ||
4634 | static inline u8 btrfs_inode_type(struct inode *inode) | |
4635 | { | |
4636 | return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; | |
4637 | } | |
4638 | ||
d352ac68 CM |
4639 | /* |
4640 | * utility function to add 'inode' into 'parent_inode' with | |
4641 | * a give name and a given sequence number. | |
4642 | * if 'add_backref' is true, also insert a backref from the | |
4643 | * inode to the parent directory. | |
4644 | */ | |
e02119d5 CM |
4645 | int btrfs_add_link(struct btrfs_trans_handle *trans, |
4646 | struct inode *parent_inode, struct inode *inode, | |
4647 | const char *name, int name_len, int add_backref, u64 index) | |
39279cc3 | 4648 | { |
4df27c4d | 4649 | int ret = 0; |
39279cc3 | 4650 | struct btrfs_key key; |
e02119d5 | 4651 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; |
5f39d397 | 4652 | |
4df27c4d YZ |
4653 | if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4654 | memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); | |
4655 | } else { | |
4656 | key.objectid = inode->i_ino; | |
4657 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | |
4658 | key.offset = 0; | |
4659 | } | |
4660 | ||
4661 | if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
4662 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, | |
4663 | key.objectid, root->root_key.objectid, | |
4664 | parent_inode->i_ino, | |
4665 | index, name, name_len); | |
4666 | } else if (add_backref) { | |
4667 | ret = btrfs_insert_inode_ref(trans, root, | |
4668 | name, name_len, inode->i_ino, | |
4669 | parent_inode->i_ino, index); | |
4670 | } | |
39279cc3 | 4671 | |
39279cc3 | 4672 | if (ret == 0) { |
4df27c4d YZ |
4673 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
4674 | parent_inode->i_ino, &key, | |
4675 | btrfs_inode_type(inode), index); | |
4676 | BUG_ON(ret); | |
4677 | ||
dbe674a9 | 4678 | btrfs_i_size_write(parent_inode, parent_inode->i_size + |
e02119d5 | 4679 | name_len * 2); |
79c44584 | 4680 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; |
e02119d5 | 4681 | ret = btrfs_update_inode(trans, root, parent_inode); |
39279cc3 CM |
4682 | } |
4683 | return ret; | |
4684 | } | |
4685 | ||
4686 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | |
a1b075d2 JB |
4687 | struct inode *dir, struct dentry *dentry, |
4688 | struct inode *inode, int backref, u64 index) | |
39279cc3 | 4689 | { |
a1b075d2 JB |
4690 | int err = btrfs_add_link(trans, dir, inode, |
4691 | dentry->d_name.name, dentry->d_name.len, | |
4692 | backref, index); | |
39279cc3 CM |
4693 | if (!err) { |
4694 | d_instantiate(dentry, inode); | |
4695 | return 0; | |
4696 | } | |
4697 | if (err > 0) | |
4698 | err = -EEXIST; | |
4699 | return err; | |
4700 | } | |
4701 | ||
618e21d5 JB |
4702 | static int btrfs_mknod(struct inode *dir, struct dentry *dentry, |
4703 | int mode, dev_t rdev) | |
4704 | { | |
4705 | struct btrfs_trans_handle *trans; | |
4706 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 4707 | struct inode *inode = NULL; |
618e21d5 JB |
4708 | int err; |
4709 | int drop_inode = 0; | |
4710 | u64 objectid; | |
1832a6d5 | 4711 | unsigned long nr = 0; |
00e4e6b3 | 4712 | u64 index = 0; |
618e21d5 JB |
4713 | |
4714 | if (!new_valid_dev(rdev)) | |
4715 | return -EINVAL; | |
4716 | ||
a22285a6 YZ |
4717 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); |
4718 | if (err) | |
4719 | return err; | |
4720 | ||
9ed74f2d JB |
4721 | /* |
4722 | * 2 for inode item and ref | |
4723 | * 2 for dir items | |
4724 | * 1 for xattr if selinux is on | |
4725 | */ | |
a22285a6 YZ |
4726 | trans = btrfs_start_transaction(root, 5); |
4727 | if (IS_ERR(trans)) | |
4728 | return PTR_ERR(trans); | |
1832a6d5 | 4729 | |
618e21d5 JB |
4730 | btrfs_set_trans_block_group(trans, dir); |
4731 | ||
aec7477b | 4732 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
a1b075d2 | 4733 | dentry->d_name.len, dir->i_ino, objectid, |
00e4e6b3 | 4734 | BTRFS_I(dir)->block_group, mode, &index); |
7cf96da3 TI |
4735 | if (IS_ERR(inode)) { |
4736 | err = PTR_ERR(inode); | |
618e21d5 | 4737 | goto out_unlock; |
7cf96da3 | 4738 | } |
618e21d5 | 4739 | |
2a7dba39 | 4740 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
4741 | if (err) { |
4742 | drop_inode = 1; | |
4743 | goto out_unlock; | |
4744 | } | |
4745 | ||
618e21d5 | 4746 | btrfs_set_trans_block_group(trans, inode); |
a1b075d2 | 4747 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
618e21d5 JB |
4748 | if (err) |
4749 | drop_inode = 1; | |
4750 | else { | |
4751 | inode->i_op = &btrfs_special_inode_operations; | |
4752 | init_special_inode(inode, inode->i_mode, rdev); | |
1b4ab1bb | 4753 | btrfs_update_inode(trans, root, inode); |
618e21d5 | 4754 | } |
618e21d5 JB |
4755 | btrfs_update_inode_block_group(trans, inode); |
4756 | btrfs_update_inode_block_group(trans, dir); | |
4757 | out_unlock: | |
d3c2fdcf | 4758 | nr = trans->blocks_used; |
89ce8a63 | 4759 | btrfs_end_transaction_throttle(trans, root); |
a22285a6 | 4760 | btrfs_btree_balance_dirty(root, nr); |
618e21d5 JB |
4761 | if (drop_inode) { |
4762 | inode_dec_link_count(inode); | |
4763 | iput(inode); | |
4764 | } | |
618e21d5 JB |
4765 | return err; |
4766 | } | |
4767 | ||
39279cc3 CM |
4768 | static int btrfs_create(struct inode *dir, struct dentry *dentry, |
4769 | int mode, struct nameidata *nd) | |
4770 | { | |
4771 | struct btrfs_trans_handle *trans; | |
4772 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 4773 | struct inode *inode = NULL; |
39279cc3 | 4774 | int drop_inode = 0; |
a22285a6 | 4775 | int err; |
1832a6d5 | 4776 | unsigned long nr = 0; |
39279cc3 | 4777 | u64 objectid; |
00e4e6b3 | 4778 | u64 index = 0; |
39279cc3 | 4779 | |
a22285a6 YZ |
4780 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); |
4781 | if (err) | |
4782 | return err; | |
9ed74f2d JB |
4783 | /* |
4784 | * 2 for inode item and ref | |
4785 | * 2 for dir items | |
4786 | * 1 for xattr if selinux is on | |
4787 | */ | |
a22285a6 YZ |
4788 | trans = btrfs_start_transaction(root, 5); |
4789 | if (IS_ERR(trans)) | |
4790 | return PTR_ERR(trans); | |
9ed74f2d | 4791 | |
39279cc3 CM |
4792 | btrfs_set_trans_block_group(trans, dir); |
4793 | ||
aec7477b | 4794 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
a1b075d2 JB |
4795 | dentry->d_name.len, dir->i_ino, objectid, |
4796 | BTRFS_I(dir)->block_group, mode, &index); | |
7cf96da3 TI |
4797 | if (IS_ERR(inode)) { |
4798 | err = PTR_ERR(inode); | |
39279cc3 | 4799 | goto out_unlock; |
7cf96da3 | 4800 | } |
39279cc3 | 4801 | |
2a7dba39 | 4802 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
4803 | if (err) { |
4804 | drop_inode = 1; | |
4805 | goto out_unlock; | |
4806 | } | |
4807 | ||
39279cc3 | 4808 | btrfs_set_trans_block_group(trans, inode); |
a1b075d2 | 4809 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
39279cc3 CM |
4810 | if (err) |
4811 | drop_inode = 1; | |
4812 | else { | |
4813 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 4814 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 CM |
4815 | inode->i_fop = &btrfs_file_operations; |
4816 | inode->i_op = &btrfs_file_inode_operations; | |
d1310b2e | 4817 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 | 4818 | } |
39279cc3 CM |
4819 | btrfs_update_inode_block_group(trans, inode); |
4820 | btrfs_update_inode_block_group(trans, dir); | |
4821 | out_unlock: | |
d3c2fdcf | 4822 | nr = trans->blocks_used; |
ab78c84d | 4823 | btrfs_end_transaction_throttle(trans, root); |
39279cc3 CM |
4824 | if (drop_inode) { |
4825 | inode_dec_link_count(inode); | |
4826 | iput(inode); | |
4827 | } | |
d3c2fdcf | 4828 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
4829 | return err; |
4830 | } | |
4831 | ||
4832 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |
4833 | struct dentry *dentry) | |
4834 | { | |
4835 | struct btrfs_trans_handle *trans; | |
4836 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4837 | struct inode *inode = old_dentry->d_inode; | |
00e4e6b3 | 4838 | u64 index; |
1832a6d5 | 4839 | unsigned long nr = 0; |
39279cc3 CM |
4840 | int err; |
4841 | int drop_inode = 0; | |
4842 | ||
4a8be425 TH |
4843 | /* do not allow sys_link's with other subvols of the same device */ |
4844 | if (root->objectid != BTRFS_I(inode)->root->objectid) | |
3ab3564f | 4845 | return -EXDEV; |
4a8be425 | 4846 | |
c055e99e AV |
4847 | if (inode->i_nlink == ~0U) |
4848 | return -EMLINK; | |
4a8be425 | 4849 | |
3de4586c | 4850 | err = btrfs_set_inode_index(dir, &index); |
aec7477b JB |
4851 | if (err) |
4852 | goto fail; | |
4853 | ||
a22285a6 | 4854 | /* |
7e6b6465 | 4855 | * 2 items for inode and inode ref |
a22285a6 | 4856 | * 2 items for dir items |
7e6b6465 | 4857 | * 1 item for parent inode |
a22285a6 | 4858 | */ |
7e6b6465 | 4859 | trans = btrfs_start_transaction(root, 5); |
a22285a6 YZ |
4860 | if (IS_ERR(trans)) { |
4861 | err = PTR_ERR(trans); | |
4862 | goto fail; | |
4863 | } | |
5f39d397 | 4864 | |
3153495d MX |
4865 | btrfs_inc_nlink(inode); |
4866 | inode->i_ctime = CURRENT_TIME; | |
4867 | ||
39279cc3 | 4868 | btrfs_set_trans_block_group(trans, dir); |
7de9c6ee | 4869 | ihold(inode); |
aec7477b | 4870 | |
a1b075d2 | 4871 | err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); |
5f39d397 | 4872 | |
a5719521 | 4873 | if (err) { |
54aa1f4d | 4874 | drop_inode = 1; |
a5719521 | 4875 | } else { |
6a912213 | 4876 | struct dentry *parent = dget_parent(dentry); |
a5719521 YZ |
4877 | btrfs_update_inode_block_group(trans, dir); |
4878 | err = btrfs_update_inode(trans, root, inode); | |
4879 | BUG_ON(err); | |
6a912213 JB |
4880 | btrfs_log_new_name(trans, inode, NULL, parent); |
4881 | dput(parent); | |
a5719521 | 4882 | } |
39279cc3 | 4883 | |
d3c2fdcf | 4884 | nr = trans->blocks_used; |
ab78c84d | 4885 | btrfs_end_transaction_throttle(trans, root); |
1832a6d5 | 4886 | fail: |
39279cc3 CM |
4887 | if (drop_inode) { |
4888 | inode_dec_link_count(inode); | |
4889 | iput(inode); | |
4890 | } | |
d3c2fdcf | 4891 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
4892 | return err; |
4893 | } | |
4894 | ||
39279cc3 CM |
4895 | static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
4896 | { | |
b9d86667 | 4897 | struct inode *inode = NULL; |
39279cc3 CM |
4898 | struct btrfs_trans_handle *trans; |
4899 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4900 | int err = 0; | |
4901 | int drop_on_err = 0; | |
b9d86667 | 4902 | u64 objectid = 0; |
00e4e6b3 | 4903 | u64 index = 0; |
d3c2fdcf | 4904 | unsigned long nr = 1; |
39279cc3 | 4905 | |
a22285a6 YZ |
4906 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); |
4907 | if (err) | |
4908 | return err; | |
4909 | ||
9ed74f2d JB |
4910 | /* |
4911 | * 2 items for inode and ref | |
4912 | * 2 items for dir items | |
4913 | * 1 for xattr if selinux is on | |
4914 | */ | |
a22285a6 YZ |
4915 | trans = btrfs_start_transaction(root, 5); |
4916 | if (IS_ERR(trans)) | |
4917 | return PTR_ERR(trans); | |
9ed74f2d | 4918 | btrfs_set_trans_block_group(trans, dir); |
39279cc3 | 4919 | |
aec7477b | 4920 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
a1b075d2 | 4921 | dentry->d_name.len, dir->i_ino, objectid, |
00e4e6b3 CM |
4922 | BTRFS_I(dir)->block_group, S_IFDIR | mode, |
4923 | &index); | |
39279cc3 CM |
4924 | if (IS_ERR(inode)) { |
4925 | err = PTR_ERR(inode); | |
4926 | goto out_fail; | |
4927 | } | |
5f39d397 | 4928 | |
39279cc3 | 4929 | drop_on_err = 1; |
33268eaf | 4930 | |
2a7dba39 | 4931 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
4932 | if (err) |
4933 | goto out_fail; | |
4934 | ||
39279cc3 CM |
4935 | inode->i_op = &btrfs_dir_inode_operations; |
4936 | inode->i_fop = &btrfs_dir_file_operations; | |
4937 | btrfs_set_trans_block_group(trans, inode); | |
4938 | ||
dbe674a9 | 4939 | btrfs_i_size_write(inode, 0); |
39279cc3 CM |
4940 | err = btrfs_update_inode(trans, root, inode); |
4941 | if (err) | |
4942 | goto out_fail; | |
5f39d397 | 4943 | |
a1b075d2 JB |
4944 | err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, |
4945 | dentry->d_name.len, 0, index); | |
39279cc3 CM |
4946 | if (err) |
4947 | goto out_fail; | |
5f39d397 | 4948 | |
39279cc3 CM |
4949 | d_instantiate(dentry, inode); |
4950 | drop_on_err = 0; | |
39279cc3 CM |
4951 | btrfs_update_inode_block_group(trans, inode); |
4952 | btrfs_update_inode_block_group(trans, dir); | |
4953 | ||
4954 | out_fail: | |
d3c2fdcf | 4955 | nr = trans->blocks_used; |
ab78c84d | 4956 | btrfs_end_transaction_throttle(trans, root); |
39279cc3 CM |
4957 | if (drop_on_err) |
4958 | iput(inode); | |
d3c2fdcf | 4959 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
4960 | return err; |
4961 | } | |
4962 | ||
d352ac68 CM |
4963 | /* helper for btfs_get_extent. Given an existing extent in the tree, |
4964 | * and an extent that you want to insert, deal with overlap and insert | |
4965 | * the new extent into the tree. | |
4966 | */ | |
3b951516 CM |
4967 | static int merge_extent_mapping(struct extent_map_tree *em_tree, |
4968 | struct extent_map *existing, | |
e6dcd2dc CM |
4969 | struct extent_map *em, |
4970 | u64 map_start, u64 map_len) | |
3b951516 CM |
4971 | { |
4972 | u64 start_diff; | |
3b951516 | 4973 | |
e6dcd2dc CM |
4974 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
4975 | start_diff = map_start - em->start; | |
4976 | em->start = map_start; | |
4977 | em->len = map_len; | |
c8b97818 CM |
4978 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
4979 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | |
e6dcd2dc | 4980 | em->block_start += start_diff; |
c8b97818 CM |
4981 | em->block_len -= start_diff; |
4982 | } | |
e6dcd2dc | 4983 | return add_extent_mapping(em_tree, em); |
3b951516 CM |
4984 | } |
4985 | ||
c8b97818 CM |
4986 | static noinline int uncompress_inline(struct btrfs_path *path, |
4987 | struct inode *inode, struct page *page, | |
4988 | size_t pg_offset, u64 extent_offset, | |
4989 | struct btrfs_file_extent_item *item) | |
4990 | { | |
4991 | int ret; | |
4992 | struct extent_buffer *leaf = path->nodes[0]; | |
4993 | char *tmp; | |
4994 | size_t max_size; | |
4995 | unsigned long inline_size; | |
4996 | unsigned long ptr; | |
261507a0 | 4997 | int compress_type; |
c8b97818 CM |
4998 | |
4999 | WARN_ON(pg_offset != 0); | |
261507a0 | 5000 | compress_type = btrfs_file_extent_compression(leaf, item); |
c8b97818 CM |
5001 | max_size = btrfs_file_extent_ram_bytes(leaf, item); |
5002 | inline_size = btrfs_file_extent_inline_item_len(leaf, | |
5003 | btrfs_item_nr(leaf, path->slots[0])); | |
5004 | tmp = kmalloc(inline_size, GFP_NOFS); | |
8d413713 TI |
5005 | if (!tmp) |
5006 | return -ENOMEM; | |
c8b97818 CM |
5007 | ptr = btrfs_file_extent_inline_start(item); |
5008 | ||
5009 | read_extent_buffer(leaf, tmp, ptr, inline_size); | |
5010 | ||
5b050f04 | 5011 | max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); |
261507a0 LZ |
5012 | ret = btrfs_decompress(compress_type, tmp, page, |
5013 | extent_offset, inline_size, max_size); | |
c8b97818 CM |
5014 | if (ret) { |
5015 | char *kaddr = kmap_atomic(page, KM_USER0); | |
5016 | unsigned long copy_size = min_t(u64, | |
5017 | PAGE_CACHE_SIZE - pg_offset, | |
5018 | max_size - extent_offset); | |
5019 | memset(kaddr + pg_offset, 0, copy_size); | |
5020 | kunmap_atomic(kaddr, KM_USER0); | |
5021 | } | |
5022 | kfree(tmp); | |
5023 | return 0; | |
5024 | } | |
5025 | ||
d352ac68 CM |
5026 | /* |
5027 | * a bit scary, this does extent mapping from logical file offset to the disk. | |
d397712b CM |
5028 | * the ugly parts come from merging extents from the disk with the in-ram |
5029 | * representation. This gets more complex because of the data=ordered code, | |
d352ac68 CM |
5030 | * where the in-ram extents might be locked pending data=ordered completion. |
5031 | * | |
5032 | * This also copies inline extents directly into the page. | |
5033 | */ | |
d397712b | 5034 | |
a52d9a80 | 5035 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, |
70dec807 | 5036 | size_t pg_offset, u64 start, u64 len, |
a52d9a80 CM |
5037 | int create) |
5038 | { | |
5039 | int ret; | |
5040 | int err = 0; | |
db94535d | 5041 | u64 bytenr; |
a52d9a80 CM |
5042 | u64 extent_start = 0; |
5043 | u64 extent_end = 0; | |
5044 | u64 objectid = inode->i_ino; | |
5045 | u32 found_type; | |
f421950f | 5046 | struct btrfs_path *path = NULL; |
a52d9a80 CM |
5047 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5048 | struct btrfs_file_extent_item *item; | |
5f39d397 CM |
5049 | struct extent_buffer *leaf; |
5050 | struct btrfs_key found_key; | |
a52d9a80 CM |
5051 | struct extent_map *em = NULL; |
5052 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
d1310b2e | 5053 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
a52d9a80 | 5054 | struct btrfs_trans_handle *trans = NULL; |
261507a0 | 5055 | int compress_type; |
a52d9a80 | 5056 | |
a52d9a80 | 5057 | again: |
890871be | 5058 | read_lock(&em_tree->lock); |
d1310b2e | 5059 | em = lookup_extent_mapping(em_tree, start, len); |
a061fc8d CM |
5060 | if (em) |
5061 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
890871be | 5062 | read_unlock(&em_tree->lock); |
d1310b2e | 5063 | |
a52d9a80 | 5064 | if (em) { |
e1c4b745 CM |
5065 | if (em->start > start || em->start + em->len <= start) |
5066 | free_extent_map(em); | |
5067 | else if (em->block_start == EXTENT_MAP_INLINE && page) | |
70dec807 CM |
5068 | free_extent_map(em); |
5069 | else | |
5070 | goto out; | |
a52d9a80 | 5071 | } |
d1310b2e | 5072 | em = alloc_extent_map(GFP_NOFS); |
a52d9a80 | 5073 | if (!em) { |
d1310b2e CM |
5074 | err = -ENOMEM; |
5075 | goto out; | |
a52d9a80 | 5076 | } |
e6dcd2dc | 5077 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
d1310b2e | 5078 | em->start = EXTENT_MAP_HOLE; |
445a6944 | 5079 | em->orig_start = EXTENT_MAP_HOLE; |
d1310b2e | 5080 | em->len = (u64)-1; |
c8b97818 | 5081 | em->block_len = (u64)-1; |
f421950f CM |
5082 | |
5083 | if (!path) { | |
5084 | path = btrfs_alloc_path(); | |
5085 | BUG_ON(!path); | |
5086 | } | |
5087 | ||
179e29e4 CM |
5088 | ret = btrfs_lookup_file_extent(trans, root, path, |
5089 | objectid, start, trans != NULL); | |
a52d9a80 CM |
5090 | if (ret < 0) { |
5091 | err = ret; | |
5092 | goto out; | |
5093 | } | |
5094 | ||
5095 | if (ret != 0) { | |
5096 | if (path->slots[0] == 0) | |
5097 | goto not_found; | |
5098 | path->slots[0]--; | |
5099 | } | |
5100 | ||
5f39d397 CM |
5101 | leaf = path->nodes[0]; |
5102 | item = btrfs_item_ptr(leaf, path->slots[0], | |
a52d9a80 | 5103 | struct btrfs_file_extent_item); |
a52d9a80 | 5104 | /* are we inside the extent that was found? */ |
5f39d397 CM |
5105 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
5106 | found_type = btrfs_key_type(&found_key); | |
5107 | if (found_key.objectid != objectid || | |
a52d9a80 CM |
5108 | found_type != BTRFS_EXTENT_DATA_KEY) { |
5109 | goto not_found; | |
5110 | } | |
5111 | ||
5f39d397 CM |
5112 | found_type = btrfs_file_extent_type(leaf, item); |
5113 | extent_start = found_key.offset; | |
261507a0 | 5114 | compress_type = btrfs_file_extent_compression(leaf, item); |
d899e052 YZ |
5115 | if (found_type == BTRFS_FILE_EXTENT_REG || |
5116 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
a52d9a80 | 5117 | extent_end = extent_start + |
db94535d | 5118 | btrfs_file_extent_num_bytes(leaf, item); |
9036c102 YZ |
5119 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { |
5120 | size_t size; | |
5121 | size = btrfs_file_extent_inline_len(leaf, item); | |
5122 | extent_end = (extent_start + size + root->sectorsize - 1) & | |
5123 | ~((u64)root->sectorsize - 1); | |
5124 | } | |
5125 | ||
5126 | if (start >= extent_end) { | |
5127 | path->slots[0]++; | |
5128 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
5129 | ret = btrfs_next_leaf(root, path); | |
5130 | if (ret < 0) { | |
5131 | err = ret; | |
5132 | goto out; | |
a52d9a80 | 5133 | } |
9036c102 YZ |
5134 | if (ret > 0) |
5135 | goto not_found; | |
5136 | leaf = path->nodes[0]; | |
a52d9a80 | 5137 | } |
9036c102 YZ |
5138 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
5139 | if (found_key.objectid != objectid || | |
5140 | found_key.type != BTRFS_EXTENT_DATA_KEY) | |
5141 | goto not_found; | |
5142 | if (start + len <= found_key.offset) | |
5143 | goto not_found; | |
5144 | em->start = start; | |
5145 | em->len = found_key.offset - start; | |
5146 | goto not_found_em; | |
5147 | } | |
5148 | ||
d899e052 YZ |
5149 | if (found_type == BTRFS_FILE_EXTENT_REG || |
5150 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
9036c102 YZ |
5151 | em->start = extent_start; |
5152 | em->len = extent_end - extent_start; | |
ff5b7ee3 YZ |
5153 | em->orig_start = extent_start - |
5154 | btrfs_file_extent_offset(leaf, item); | |
db94535d CM |
5155 | bytenr = btrfs_file_extent_disk_bytenr(leaf, item); |
5156 | if (bytenr == 0) { | |
5f39d397 | 5157 | em->block_start = EXTENT_MAP_HOLE; |
a52d9a80 CM |
5158 | goto insert; |
5159 | } | |
261507a0 | 5160 | if (compress_type != BTRFS_COMPRESS_NONE) { |
c8b97818 | 5161 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
261507a0 | 5162 | em->compress_type = compress_type; |
c8b97818 CM |
5163 | em->block_start = bytenr; |
5164 | em->block_len = btrfs_file_extent_disk_num_bytes(leaf, | |
5165 | item); | |
5166 | } else { | |
5167 | bytenr += btrfs_file_extent_offset(leaf, item); | |
5168 | em->block_start = bytenr; | |
5169 | em->block_len = em->len; | |
d899e052 YZ |
5170 | if (found_type == BTRFS_FILE_EXTENT_PREALLOC) |
5171 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
c8b97818 | 5172 | } |
a52d9a80 CM |
5173 | goto insert; |
5174 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 5175 | unsigned long ptr; |
a52d9a80 | 5176 | char *map; |
3326d1b0 CM |
5177 | size_t size; |
5178 | size_t extent_offset; | |
5179 | size_t copy_size; | |
a52d9a80 | 5180 | |
689f9346 | 5181 | em->block_start = EXTENT_MAP_INLINE; |
c8b97818 | 5182 | if (!page || create) { |
689f9346 | 5183 | em->start = extent_start; |
9036c102 | 5184 | em->len = extent_end - extent_start; |
689f9346 Y |
5185 | goto out; |
5186 | } | |
5f39d397 | 5187 | |
9036c102 YZ |
5188 | size = btrfs_file_extent_inline_len(leaf, item); |
5189 | extent_offset = page_offset(page) + pg_offset - extent_start; | |
70dec807 | 5190 | copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, |
3326d1b0 | 5191 | size - extent_offset); |
3326d1b0 | 5192 | em->start = extent_start + extent_offset; |
70dec807 CM |
5193 | em->len = (copy_size + root->sectorsize - 1) & |
5194 | ~((u64)root->sectorsize - 1); | |
ff5b7ee3 | 5195 | em->orig_start = EXTENT_MAP_INLINE; |
261507a0 | 5196 | if (compress_type) { |
c8b97818 | 5197 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
261507a0 LZ |
5198 | em->compress_type = compress_type; |
5199 | } | |
689f9346 | 5200 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; |
179e29e4 | 5201 | if (create == 0 && !PageUptodate(page)) { |
261507a0 LZ |
5202 | if (btrfs_file_extent_compression(leaf, item) != |
5203 | BTRFS_COMPRESS_NONE) { | |
c8b97818 CM |
5204 | ret = uncompress_inline(path, inode, page, |
5205 | pg_offset, | |
5206 | extent_offset, item); | |
5207 | BUG_ON(ret); | |
5208 | } else { | |
5209 | map = kmap(page); | |
5210 | read_extent_buffer(leaf, map + pg_offset, ptr, | |
5211 | copy_size); | |
93c82d57 CM |
5212 | if (pg_offset + copy_size < PAGE_CACHE_SIZE) { |
5213 | memset(map + pg_offset + copy_size, 0, | |
5214 | PAGE_CACHE_SIZE - pg_offset - | |
5215 | copy_size); | |
5216 | } | |
c8b97818 CM |
5217 | kunmap(page); |
5218 | } | |
179e29e4 CM |
5219 | flush_dcache_page(page); |
5220 | } else if (create && PageUptodate(page)) { | |
0ca1f7ce | 5221 | WARN_ON(1); |
179e29e4 CM |
5222 | if (!trans) { |
5223 | kunmap(page); | |
5224 | free_extent_map(em); | |
5225 | em = NULL; | |
5226 | btrfs_release_path(root, path); | |
f9295749 | 5227 | trans = btrfs_join_transaction(root, 1); |
3612b495 TI |
5228 | if (IS_ERR(trans)) |
5229 | return ERR_CAST(trans); | |
179e29e4 CM |
5230 | goto again; |
5231 | } | |
c8b97818 | 5232 | map = kmap(page); |
70dec807 | 5233 | write_extent_buffer(leaf, map + pg_offset, ptr, |
179e29e4 | 5234 | copy_size); |
c8b97818 | 5235 | kunmap(page); |
179e29e4 | 5236 | btrfs_mark_buffer_dirty(leaf); |
a52d9a80 | 5237 | } |
d1310b2e | 5238 | set_extent_uptodate(io_tree, em->start, |
507903b8 | 5239 | extent_map_end(em) - 1, NULL, GFP_NOFS); |
a52d9a80 CM |
5240 | goto insert; |
5241 | } else { | |
d397712b | 5242 | printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); |
a52d9a80 CM |
5243 | WARN_ON(1); |
5244 | } | |
5245 | not_found: | |
5246 | em->start = start; | |
d1310b2e | 5247 | em->len = len; |
a52d9a80 | 5248 | not_found_em: |
5f39d397 | 5249 | em->block_start = EXTENT_MAP_HOLE; |
9036c102 | 5250 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); |
a52d9a80 CM |
5251 | insert: |
5252 | btrfs_release_path(root, path); | |
d1310b2e | 5253 | if (em->start > start || extent_map_end(em) <= start) { |
d397712b CM |
5254 | printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " |
5255 | "[%llu %llu]\n", (unsigned long long)em->start, | |
5256 | (unsigned long long)em->len, | |
5257 | (unsigned long long)start, | |
5258 | (unsigned long long)len); | |
a52d9a80 CM |
5259 | err = -EIO; |
5260 | goto out; | |
5261 | } | |
d1310b2e CM |
5262 | |
5263 | err = 0; | |
890871be | 5264 | write_lock(&em_tree->lock); |
a52d9a80 | 5265 | ret = add_extent_mapping(em_tree, em); |
3b951516 CM |
5266 | /* it is possible that someone inserted the extent into the tree |
5267 | * while we had the lock dropped. It is also possible that | |
5268 | * an overlapping map exists in the tree | |
5269 | */ | |
a52d9a80 | 5270 | if (ret == -EEXIST) { |
3b951516 | 5271 | struct extent_map *existing; |
e6dcd2dc CM |
5272 | |
5273 | ret = 0; | |
5274 | ||
3b951516 | 5275 | existing = lookup_extent_mapping(em_tree, start, len); |
e1c4b745 CM |
5276 | if (existing && (existing->start > start || |
5277 | existing->start + existing->len <= start)) { | |
5278 | free_extent_map(existing); | |
5279 | existing = NULL; | |
5280 | } | |
3b951516 CM |
5281 | if (!existing) { |
5282 | existing = lookup_extent_mapping(em_tree, em->start, | |
5283 | em->len); | |
5284 | if (existing) { | |
5285 | err = merge_extent_mapping(em_tree, existing, | |
e6dcd2dc CM |
5286 | em, start, |
5287 | root->sectorsize); | |
3b951516 CM |
5288 | free_extent_map(existing); |
5289 | if (err) { | |
5290 | free_extent_map(em); | |
5291 | em = NULL; | |
5292 | } | |
5293 | } else { | |
5294 | err = -EIO; | |
3b951516 CM |
5295 | free_extent_map(em); |
5296 | em = NULL; | |
5297 | } | |
5298 | } else { | |
5299 | free_extent_map(em); | |
5300 | em = existing; | |
e6dcd2dc | 5301 | err = 0; |
a52d9a80 | 5302 | } |
a52d9a80 | 5303 | } |
890871be | 5304 | write_unlock(&em_tree->lock); |
a52d9a80 | 5305 | out: |
1abe9b8a | 5306 | |
5307 | trace_btrfs_get_extent(root, em); | |
5308 | ||
f421950f CM |
5309 | if (path) |
5310 | btrfs_free_path(path); | |
a52d9a80 CM |
5311 | if (trans) { |
5312 | ret = btrfs_end_transaction(trans, root); | |
d397712b | 5313 | if (!err) |
a52d9a80 CM |
5314 | err = ret; |
5315 | } | |
a52d9a80 CM |
5316 | if (err) { |
5317 | free_extent_map(em); | |
a52d9a80 CM |
5318 | return ERR_PTR(err); |
5319 | } | |
5320 | return em; | |
5321 | } | |
5322 | ||
ec29ed5b CM |
5323 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, |
5324 | size_t pg_offset, u64 start, u64 len, | |
5325 | int create) | |
5326 | { | |
5327 | struct extent_map *em; | |
5328 | struct extent_map *hole_em = NULL; | |
5329 | u64 range_start = start; | |
5330 | u64 end; | |
5331 | u64 found; | |
5332 | u64 found_end; | |
5333 | int err = 0; | |
5334 | ||
5335 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | |
5336 | if (IS_ERR(em)) | |
5337 | return em; | |
5338 | if (em) { | |
5339 | /* | |
5340 | * if our em maps to a hole, there might | |
5341 | * actually be delalloc bytes behind it | |
5342 | */ | |
5343 | if (em->block_start != EXTENT_MAP_HOLE) | |
5344 | return em; | |
5345 | else | |
5346 | hole_em = em; | |
5347 | } | |
5348 | ||
5349 | /* check to see if we've wrapped (len == -1 or similar) */ | |
5350 | end = start + len; | |
5351 | if (end < start) | |
5352 | end = (u64)-1; | |
5353 | else | |
5354 | end -= 1; | |
5355 | ||
5356 | em = NULL; | |
5357 | ||
5358 | /* ok, we didn't find anything, lets look for delalloc */ | |
5359 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | |
5360 | end, len, EXTENT_DELALLOC, 1); | |
5361 | found_end = range_start + found; | |
5362 | if (found_end < range_start) | |
5363 | found_end = (u64)-1; | |
5364 | ||
5365 | /* | |
5366 | * we didn't find anything useful, return | |
5367 | * the original results from get_extent() | |
5368 | */ | |
5369 | if (range_start > end || found_end <= start) { | |
5370 | em = hole_em; | |
5371 | hole_em = NULL; | |
5372 | goto out; | |
5373 | } | |
5374 | ||
5375 | /* adjust the range_start to make sure it doesn't | |
5376 | * go backwards from the start they passed in | |
5377 | */ | |
5378 | range_start = max(start,range_start); | |
5379 | found = found_end - range_start; | |
5380 | ||
5381 | if (found > 0) { | |
5382 | u64 hole_start = start; | |
5383 | u64 hole_len = len; | |
5384 | ||
5385 | em = alloc_extent_map(GFP_NOFS); | |
5386 | if (!em) { | |
5387 | err = -ENOMEM; | |
5388 | goto out; | |
5389 | } | |
5390 | /* | |
5391 | * when btrfs_get_extent can't find anything it | |
5392 | * returns one huge hole | |
5393 | * | |
5394 | * make sure what it found really fits our range, and | |
5395 | * adjust to make sure it is based on the start from | |
5396 | * the caller | |
5397 | */ | |
5398 | if (hole_em) { | |
5399 | u64 calc_end = extent_map_end(hole_em); | |
5400 | ||
5401 | if (calc_end <= start || (hole_em->start > end)) { | |
5402 | free_extent_map(hole_em); | |
5403 | hole_em = NULL; | |
5404 | } else { | |
5405 | hole_start = max(hole_em->start, start); | |
5406 | hole_len = calc_end - hole_start; | |
5407 | } | |
5408 | } | |
5409 | em->bdev = NULL; | |
5410 | if (hole_em && range_start > hole_start) { | |
5411 | /* our hole starts before our delalloc, so we | |
5412 | * have to return just the parts of the hole | |
5413 | * that go until the delalloc starts | |
5414 | */ | |
5415 | em->len = min(hole_len, | |
5416 | range_start - hole_start); | |
5417 | em->start = hole_start; | |
5418 | em->orig_start = hole_start; | |
5419 | /* | |
5420 | * don't adjust block start at all, | |
5421 | * it is fixed at EXTENT_MAP_HOLE | |
5422 | */ | |
5423 | em->block_start = hole_em->block_start; | |
5424 | em->block_len = hole_len; | |
5425 | } else { | |
5426 | em->start = range_start; | |
5427 | em->len = found; | |
5428 | em->orig_start = range_start; | |
5429 | em->block_start = EXTENT_MAP_DELALLOC; | |
5430 | em->block_len = found; | |
5431 | } | |
5432 | } else if (hole_em) { | |
5433 | return hole_em; | |
5434 | } | |
5435 | out: | |
5436 | ||
5437 | free_extent_map(hole_em); | |
5438 | if (err) { | |
5439 | free_extent_map(em); | |
5440 | return ERR_PTR(err); | |
5441 | } | |
5442 | return em; | |
5443 | } | |
5444 | ||
4b46fce2 | 5445 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
16d299ac | 5446 | struct extent_map *em, |
4b46fce2 JB |
5447 | u64 start, u64 len) |
5448 | { | |
5449 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5450 | struct btrfs_trans_handle *trans; | |
4b46fce2 JB |
5451 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
5452 | struct btrfs_key ins; | |
5453 | u64 alloc_hint; | |
5454 | int ret; | |
16d299ac | 5455 | bool insert = false; |
4b46fce2 | 5456 | |
16d299ac JB |
5457 | /* |
5458 | * Ok if the extent map we looked up is a hole and is for the exact | |
5459 | * range we want, there is no reason to allocate a new one, however if | |
5460 | * it is not right then we need to free this one and drop the cache for | |
5461 | * our range. | |
5462 | */ | |
5463 | if (em->block_start != EXTENT_MAP_HOLE || em->start != start || | |
5464 | em->len != len) { | |
5465 | free_extent_map(em); | |
5466 | em = NULL; | |
5467 | insert = true; | |
5468 | btrfs_drop_extent_cache(inode, start, start + len - 1, 0); | |
5469 | } | |
4b46fce2 JB |
5470 | |
5471 | trans = btrfs_join_transaction(root, 0); | |
3612b495 TI |
5472 | if (IS_ERR(trans)) |
5473 | return ERR_CAST(trans); | |
4b46fce2 JB |
5474 | |
5475 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
5476 | ||
5477 | alloc_hint = get_extent_allocation_hint(inode, start, len); | |
5478 | ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, | |
5479 | alloc_hint, (u64)-1, &ins, 1); | |
5480 | if (ret) { | |
5481 | em = ERR_PTR(ret); | |
5482 | goto out; | |
5483 | } | |
5484 | ||
4b46fce2 | 5485 | if (!em) { |
16d299ac JB |
5486 | em = alloc_extent_map(GFP_NOFS); |
5487 | if (!em) { | |
5488 | em = ERR_PTR(-ENOMEM); | |
5489 | goto out; | |
5490 | } | |
4b46fce2 JB |
5491 | } |
5492 | ||
5493 | em->start = start; | |
5494 | em->orig_start = em->start; | |
5495 | em->len = ins.offset; | |
5496 | ||
5497 | em->block_start = ins.objectid; | |
5498 | em->block_len = ins.offset; | |
5499 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
16d299ac JB |
5500 | |
5501 | /* | |
5502 | * We need to do this because if we're using the original em we searched | |
5503 | * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. | |
5504 | */ | |
5505 | em->flags = 0; | |
4b46fce2 JB |
5506 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
5507 | ||
16d299ac | 5508 | while (insert) { |
4b46fce2 JB |
5509 | write_lock(&em_tree->lock); |
5510 | ret = add_extent_mapping(em_tree, em); | |
5511 | write_unlock(&em_tree->lock); | |
5512 | if (ret != -EEXIST) | |
5513 | break; | |
5514 | btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0); | |
5515 | } | |
5516 | ||
5517 | ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, | |
5518 | ins.offset, ins.offset, 0); | |
5519 | if (ret) { | |
5520 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
5521 | em = ERR_PTR(ret); | |
5522 | } | |
5523 | out: | |
5524 | btrfs_end_transaction(trans, root); | |
5525 | return em; | |
5526 | } | |
5527 | ||
46bfbb5c CM |
5528 | /* |
5529 | * returns 1 when the nocow is safe, < 1 on error, 0 if the | |
5530 | * block must be cow'd | |
5531 | */ | |
5532 | static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, | |
5533 | struct inode *inode, u64 offset, u64 len) | |
5534 | { | |
5535 | struct btrfs_path *path; | |
5536 | int ret; | |
5537 | struct extent_buffer *leaf; | |
5538 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5539 | struct btrfs_file_extent_item *fi; | |
5540 | struct btrfs_key key; | |
5541 | u64 disk_bytenr; | |
5542 | u64 backref_offset; | |
5543 | u64 extent_end; | |
5544 | u64 num_bytes; | |
5545 | int slot; | |
5546 | int found_type; | |
5547 | ||
5548 | path = btrfs_alloc_path(); | |
5549 | if (!path) | |
5550 | return -ENOMEM; | |
5551 | ||
5552 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | |
5553 | offset, 0); | |
5554 | if (ret < 0) | |
5555 | goto out; | |
5556 | ||
5557 | slot = path->slots[0]; | |
5558 | if (ret == 1) { | |
5559 | if (slot == 0) { | |
5560 | /* can't find the item, must cow */ | |
5561 | ret = 0; | |
5562 | goto out; | |
5563 | } | |
5564 | slot--; | |
5565 | } | |
5566 | ret = 0; | |
5567 | leaf = path->nodes[0]; | |
5568 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
5569 | if (key.objectid != inode->i_ino || | |
5570 | key.type != BTRFS_EXTENT_DATA_KEY) { | |
5571 | /* not our file or wrong item type, must cow */ | |
5572 | goto out; | |
5573 | } | |
5574 | ||
5575 | if (key.offset > offset) { | |
5576 | /* Wrong offset, must cow */ | |
5577 | goto out; | |
5578 | } | |
5579 | ||
5580 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
5581 | found_type = btrfs_file_extent_type(leaf, fi); | |
5582 | if (found_type != BTRFS_FILE_EXTENT_REG && | |
5583 | found_type != BTRFS_FILE_EXTENT_PREALLOC) { | |
5584 | /* not a regular extent, must cow */ | |
5585 | goto out; | |
5586 | } | |
5587 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
5588 | backref_offset = btrfs_file_extent_offset(leaf, fi); | |
5589 | ||
5590 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); | |
5591 | if (extent_end < offset + len) { | |
5592 | /* extent doesn't include our full range, must cow */ | |
5593 | goto out; | |
5594 | } | |
5595 | ||
5596 | if (btrfs_extent_readonly(root, disk_bytenr)) | |
5597 | goto out; | |
5598 | ||
5599 | /* | |
5600 | * look for other files referencing this extent, if we | |
5601 | * find any we must cow | |
5602 | */ | |
5603 | if (btrfs_cross_ref_exist(trans, root, inode->i_ino, | |
5604 | key.offset - backref_offset, disk_bytenr)) | |
5605 | goto out; | |
5606 | ||
5607 | /* | |
5608 | * adjust disk_bytenr and num_bytes to cover just the bytes | |
5609 | * in this extent we are about to write. If there | |
5610 | * are any csums in that range we have to cow in order | |
5611 | * to keep the csums correct | |
5612 | */ | |
5613 | disk_bytenr += backref_offset; | |
5614 | disk_bytenr += offset - key.offset; | |
5615 | num_bytes = min(offset + len, extent_end) - offset; | |
5616 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) | |
5617 | goto out; | |
5618 | /* | |
5619 | * all of the above have passed, it is safe to overwrite this extent | |
5620 | * without cow | |
5621 | */ | |
5622 | ret = 1; | |
5623 | out: | |
5624 | btrfs_free_path(path); | |
5625 | return ret; | |
5626 | } | |
5627 | ||
4b46fce2 JB |
5628 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
5629 | struct buffer_head *bh_result, int create) | |
5630 | { | |
5631 | struct extent_map *em; | |
5632 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5633 | u64 start = iblock << inode->i_blkbits; | |
5634 | u64 len = bh_result->b_size; | |
46bfbb5c | 5635 | struct btrfs_trans_handle *trans; |
4b46fce2 JB |
5636 | |
5637 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | |
5638 | if (IS_ERR(em)) | |
5639 | return PTR_ERR(em); | |
5640 | ||
5641 | /* | |
5642 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered | |
5643 | * io. INLINE is special, and we could probably kludge it in here, but | |
5644 | * it's still buffered so for safety lets just fall back to the generic | |
5645 | * buffered path. | |
5646 | * | |
5647 | * For COMPRESSED we _have_ to read the entire extent in so we can | |
5648 | * decompress it, so there will be buffering required no matter what we | |
5649 | * do, so go ahead and fallback to buffered. | |
5650 | * | |
5651 | * We return -ENOTBLK because thats what makes DIO go ahead and go back | |
5652 | * to buffered IO. Don't blame me, this is the price we pay for using | |
5653 | * the generic code. | |
5654 | */ | |
5655 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || | |
5656 | em->block_start == EXTENT_MAP_INLINE) { | |
5657 | free_extent_map(em); | |
5658 | return -ENOTBLK; | |
5659 | } | |
5660 | ||
5661 | /* Just a good old fashioned hole, return */ | |
5662 | if (!create && (em->block_start == EXTENT_MAP_HOLE || | |
5663 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | |
5664 | free_extent_map(em); | |
5665 | /* DIO will do one hole at a time, so just unlock a sector */ | |
5666 | unlock_extent(&BTRFS_I(inode)->io_tree, start, | |
5667 | start + root->sectorsize - 1, GFP_NOFS); | |
5668 | return 0; | |
5669 | } | |
5670 | ||
5671 | /* | |
5672 | * We don't allocate a new extent in the following cases | |
5673 | * | |
5674 | * 1) The inode is marked as NODATACOW. In this case we'll just use the | |
5675 | * existing extent. | |
5676 | * 2) The extent is marked as PREALLOC. We're good to go here and can | |
5677 | * just use the extent. | |
5678 | * | |
5679 | */ | |
46bfbb5c CM |
5680 | if (!create) { |
5681 | len = em->len - (start - em->start); | |
4b46fce2 | 5682 | goto map; |
46bfbb5c | 5683 | } |
4b46fce2 JB |
5684 | |
5685 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | |
5686 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | |
5687 | em->block_start != EXTENT_MAP_HOLE)) { | |
4b46fce2 JB |
5688 | int type; |
5689 | int ret; | |
46bfbb5c | 5690 | u64 block_start; |
4b46fce2 JB |
5691 | |
5692 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
5693 | type = BTRFS_ORDERED_PREALLOC; | |
5694 | else | |
5695 | type = BTRFS_ORDERED_NOCOW; | |
46bfbb5c | 5696 | len = min(len, em->len - (start - em->start)); |
4b46fce2 | 5697 | block_start = em->block_start + (start - em->start); |
46bfbb5c CM |
5698 | |
5699 | /* | |
5700 | * we're not going to log anything, but we do need | |
5701 | * to make sure the current transaction stays open | |
5702 | * while we look for nocow cross refs | |
5703 | */ | |
5704 | trans = btrfs_join_transaction(root, 0); | |
3612b495 | 5705 | if (IS_ERR(trans)) |
46bfbb5c CM |
5706 | goto must_cow; |
5707 | ||
5708 | if (can_nocow_odirect(trans, inode, start, len) == 1) { | |
5709 | ret = btrfs_add_ordered_extent_dio(inode, start, | |
5710 | block_start, len, len, type); | |
5711 | btrfs_end_transaction(trans, root); | |
5712 | if (ret) { | |
5713 | free_extent_map(em); | |
5714 | return ret; | |
5715 | } | |
5716 | goto unlock; | |
4b46fce2 | 5717 | } |
46bfbb5c | 5718 | btrfs_end_transaction(trans, root); |
4b46fce2 | 5719 | } |
46bfbb5c CM |
5720 | must_cow: |
5721 | /* | |
5722 | * this will cow the extent, reset the len in case we changed | |
5723 | * it above | |
5724 | */ | |
5725 | len = bh_result->b_size; | |
16d299ac | 5726 | em = btrfs_new_extent_direct(inode, em, start, len); |
46bfbb5c CM |
5727 | if (IS_ERR(em)) |
5728 | return PTR_ERR(em); | |
5729 | len = min(len, em->len - (start - em->start)); | |
5730 | unlock: | |
4845e44f CM |
5731 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, |
5732 | EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, | |
5733 | 0, NULL, GFP_NOFS); | |
4b46fce2 JB |
5734 | map: |
5735 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> | |
5736 | inode->i_blkbits; | |
46bfbb5c | 5737 | bh_result->b_size = len; |
4b46fce2 JB |
5738 | bh_result->b_bdev = em->bdev; |
5739 | set_buffer_mapped(bh_result); | |
5740 | if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
5741 | set_buffer_new(bh_result); | |
5742 | ||
5743 | free_extent_map(em); | |
5744 | ||
5745 | return 0; | |
5746 | } | |
5747 | ||
5748 | struct btrfs_dio_private { | |
5749 | struct inode *inode; | |
5750 | u64 logical_offset; | |
5751 | u64 disk_bytenr; | |
5752 | u64 bytes; | |
5753 | u32 *csums; | |
5754 | void *private; | |
e65e1535 MX |
5755 | |
5756 | /* number of bios pending for this dio */ | |
5757 | atomic_t pending_bios; | |
5758 | ||
5759 | /* IO errors */ | |
5760 | int errors; | |
5761 | ||
5762 | struct bio *orig_bio; | |
4b46fce2 JB |
5763 | }; |
5764 | ||
5765 | static void btrfs_endio_direct_read(struct bio *bio, int err) | |
5766 | { | |
e65e1535 | 5767 | struct btrfs_dio_private *dip = bio->bi_private; |
4b46fce2 JB |
5768 | struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; |
5769 | struct bio_vec *bvec = bio->bi_io_vec; | |
4b46fce2 JB |
5770 | struct inode *inode = dip->inode; |
5771 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5772 | u64 start; | |
5773 | u32 *private = dip->csums; | |
5774 | ||
5775 | start = dip->logical_offset; | |
5776 | do { | |
5777 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { | |
5778 | struct page *page = bvec->bv_page; | |
5779 | char *kaddr; | |
5780 | u32 csum = ~(u32)0; | |
5781 | unsigned long flags; | |
5782 | ||
5783 | local_irq_save(flags); | |
5784 | kaddr = kmap_atomic(page, KM_IRQ0); | |
5785 | csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, | |
5786 | csum, bvec->bv_len); | |
5787 | btrfs_csum_final(csum, (char *)&csum); | |
5788 | kunmap_atomic(kaddr, KM_IRQ0); | |
5789 | local_irq_restore(flags); | |
5790 | ||
5791 | flush_dcache_page(bvec->bv_page); | |
5792 | if (csum != *private) { | |
5793 | printk(KERN_ERR "btrfs csum failed ino %lu off" | |
5794 | " %llu csum %u private %u\n", | |
5795 | inode->i_ino, (unsigned long long)start, | |
5796 | csum, *private); | |
5797 | err = -EIO; | |
5798 | } | |
5799 | } | |
5800 | ||
5801 | start += bvec->bv_len; | |
5802 | private++; | |
5803 | bvec++; | |
5804 | } while (bvec <= bvec_end); | |
5805 | ||
5806 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, | |
5807 | dip->logical_offset + dip->bytes - 1, GFP_NOFS); | |
5808 | bio->bi_private = dip->private; | |
5809 | ||
5810 | kfree(dip->csums); | |
5811 | kfree(dip); | |
c0da7aa1 JB |
5812 | |
5813 | /* If we had a csum failure make sure to clear the uptodate flag */ | |
5814 | if (err) | |
5815 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
4b46fce2 JB |
5816 | dio_end_io(bio, err); |
5817 | } | |
5818 | ||
5819 | static void btrfs_endio_direct_write(struct bio *bio, int err) | |
5820 | { | |
5821 | struct btrfs_dio_private *dip = bio->bi_private; | |
5822 | struct inode *inode = dip->inode; | |
5823 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5824 | struct btrfs_trans_handle *trans; | |
5825 | struct btrfs_ordered_extent *ordered = NULL; | |
5826 | struct extent_state *cached_state = NULL; | |
163cf09c CM |
5827 | u64 ordered_offset = dip->logical_offset; |
5828 | u64 ordered_bytes = dip->bytes; | |
4b46fce2 JB |
5829 | int ret; |
5830 | ||
5831 | if (err) | |
5832 | goto out_done; | |
163cf09c CM |
5833 | again: |
5834 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | |
5835 | &ordered_offset, | |
5836 | ordered_bytes); | |
4b46fce2 | 5837 | if (!ret) |
163cf09c | 5838 | goto out_test; |
4b46fce2 JB |
5839 | |
5840 | BUG_ON(!ordered); | |
5841 | ||
5842 | trans = btrfs_join_transaction(root, 1); | |
3612b495 | 5843 | if (IS_ERR(trans)) { |
4b46fce2 JB |
5844 | err = -ENOMEM; |
5845 | goto out; | |
5846 | } | |
5847 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
5848 | ||
5849 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { | |
5850 | ret = btrfs_ordered_update_i_size(inode, 0, ordered); | |
5851 | if (!ret) | |
5852 | ret = btrfs_update_inode(trans, root, inode); | |
5853 | err = ret; | |
5854 | goto out; | |
5855 | } | |
5856 | ||
5857 | lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, | |
5858 | ordered->file_offset + ordered->len - 1, 0, | |
5859 | &cached_state, GFP_NOFS); | |
5860 | ||
5861 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { | |
5862 | ret = btrfs_mark_extent_written(trans, inode, | |
5863 | ordered->file_offset, | |
5864 | ordered->file_offset + | |
5865 | ordered->len); | |
5866 | if (ret) { | |
5867 | err = ret; | |
5868 | goto out_unlock; | |
5869 | } | |
5870 | } else { | |
5871 | ret = insert_reserved_file_extent(trans, inode, | |
5872 | ordered->file_offset, | |
5873 | ordered->start, | |
5874 | ordered->disk_len, | |
5875 | ordered->len, | |
5876 | ordered->len, | |
5877 | 0, 0, 0, | |
5878 | BTRFS_FILE_EXTENT_REG); | |
5879 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, | |
5880 | ordered->file_offset, ordered->len); | |
5881 | if (ret) { | |
5882 | err = ret; | |
5883 | WARN_ON(1); | |
5884 | goto out_unlock; | |
5885 | } | |
5886 | } | |
5887 | ||
5888 | add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); | |
1ef30be1 JB |
5889 | ret = btrfs_ordered_update_i_size(inode, 0, ordered); |
5890 | if (!ret) | |
5891 | btrfs_update_inode(trans, root, inode); | |
5892 | ret = 0; | |
4b46fce2 JB |
5893 | out_unlock: |
5894 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, | |
5895 | ordered->file_offset + ordered->len - 1, | |
5896 | &cached_state, GFP_NOFS); | |
5897 | out: | |
5898 | btrfs_delalloc_release_metadata(inode, ordered->len); | |
5899 | btrfs_end_transaction(trans, root); | |
163cf09c | 5900 | ordered_offset = ordered->file_offset + ordered->len; |
4b46fce2 JB |
5901 | btrfs_put_ordered_extent(ordered); |
5902 | btrfs_put_ordered_extent(ordered); | |
163cf09c CM |
5903 | |
5904 | out_test: | |
5905 | /* | |
5906 | * our bio might span multiple ordered extents. If we haven't | |
5907 | * completed the accounting for the whole dio, go back and try again | |
5908 | */ | |
5909 | if (ordered_offset < dip->logical_offset + dip->bytes) { | |
5910 | ordered_bytes = dip->logical_offset + dip->bytes - | |
5911 | ordered_offset; | |
5912 | goto again; | |
5913 | } | |
4b46fce2 JB |
5914 | out_done: |
5915 | bio->bi_private = dip->private; | |
5916 | ||
5917 | kfree(dip->csums); | |
5918 | kfree(dip); | |
c0da7aa1 JB |
5919 | |
5920 | /* If we had an error make sure to clear the uptodate flag */ | |
5921 | if (err) | |
5922 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
4b46fce2 JB |
5923 | dio_end_io(bio, err); |
5924 | } | |
5925 | ||
eaf25d93 CM |
5926 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, |
5927 | struct bio *bio, int mirror_num, | |
5928 | unsigned long bio_flags, u64 offset) | |
5929 | { | |
5930 | int ret; | |
5931 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5932 | ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); | |
5933 | BUG_ON(ret); | |
5934 | return 0; | |
5935 | } | |
5936 | ||
e65e1535 MX |
5937 | static void btrfs_end_dio_bio(struct bio *bio, int err) |
5938 | { | |
5939 | struct btrfs_dio_private *dip = bio->bi_private; | |
5940 | ||
5941 | if (err) { | |
5942 | printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " | |
3dd1462e JB |
5943 | "sector %#Lx len %u err no %d\n", |
5944 | dip->inode->i_ino, bio->bi_rw, | |
5945 | (unsigned long long)bio->bi_sector, bio->bi_size, err); | |
e65e1535 MX |
5946 | dip->errors = 1; |
5947 | ||
5948 | /* | |
5949 | * before atomic variable goto zero, we must make sure | |
5950 | * dip->errors is perceived to be set. | |
5951 | */ | |
5952 | smp_mb__before_atomic_dec(); | |
5953 | } | |
5954 | ||
5955 | /* if there are more bios still pending for this dio, just exit */ | |
5956 | if (!atomic_dec_and_test(&dip->pending_bios)) | |
5957 | goto out; | |
5958 | ||
5959 | if (dip->errors) | |
5960 | bio_io_error(dip->orig_bio); | |
5961 | else { | |
5962 | set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); | |
5963 | bio_endio(dip->orig_bio, 0); | |
5964 | } | |
5965 | out: | |
5966 | bio_put(bio); | |
5967 | } | |
5968 | ||
5969 | static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |
5970 | u64 first_sector, gfp_t gfp_flags) | |
5971 | { | |
5972 | int nr_vecs = bio_get_nr_vecs(bdev); | |
5973 | return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); | |
5974 | } | |
5975 | ||
5976 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |
5977 | int rw, u64 file_offset, int skip_sum, | |
1ae39938 | 5978 | u32 *csums, int async_submit) |
e65e1535 MX |
5979 | { |
5980 | int write = rw & REQ_WRITE; | |
5981 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5982 | int ret; | |
5983 | ||
5984 | bio_get(bio); | |
5985 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | |
5986 | if (ret) | |
5987 | goto err; | |
5988 | ||
1ae39938 JB |
5989 | if (skip_sum) |
5990 | goto map; | |
5991 | ||
5992 | if (write && async_submit) { | |
e65e1535 MX |
5993 | ret = btrfs_wq_submit_bio(root->fs_info, |
5994 | inode, rw, bio, 0, 0, | |
5995 | file_offset, | |
5996 | __btrfs_submit_bio_start_direct_io, | |
5997 | __btrfs_submit_bio_done); | |
5998 | goto err; | |
1ae39938 JB |
5999 | } else if (write) { |
6000 | /* | |
6001 | * If we aren't doing async submit, calculate the csum of the | |
6002 | * bio now. | |
6003 | */ | |
6004 | ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); | |
6005 | if (ret) | |
6006 | goto err; | |
c2db1073 TI |
6007 | } else if (!skip_sum) { |
6008 | ret = btrfs_lookup_bio_sums_dio(root, inode, bio, | |
e65e1535 | 6009 | file_offset, csums); |
c2db1073 TI |
6010 | if (ret) |
6011 | goto err; | |
6012 | } | |
e65e1535 | 6013 | |
1ae39938 JB |
6014 | map: |
6015 | ret = btrfs_map_bio(root, rw, bio, 0, async_submit); | |
e65e1535 MX |
6016 | err: |
6017 | bio_put(bio); | |
6018 | return ret; | |
6019 | } | |
6020 | ||
6021 | static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |
6022 | int skip_sum) | |
6023 | { | |
6024 | struct inode *inode = dip->inode; | |
6025 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6026 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | |
6027 | struct bio *bio; | |
6028 | struct bio *orig_bio = dip->orig_bio; | |
6029 | struct bio_vec *bvec = orig_bio->bi_io_vec; | |
6030 | u64 start_sector = orig_bio->bi_sector; | |
6031 | u64 file_offset = dip->logical_offset; | |
6032 | u64 submit_len = 0; | |
6033 | u64 map_length; | |
6034 | int nr_pages = 0; | |
6035 | u32 *csums = dip->csums; | |
6036 | int ret = 0; | |
1ae39938 | 6037 | int async_submit = 0; |
98bc3149 | 6038 | int write = rw & REQ_WRITE; |
e65e1535 | 6039 | |
e65e1535 MX |
6040 | map_length = orig_bio->bi_size; |
6041 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, | |
6042 | &map_length, NULL, 0); | |
6043 | if (ret) { | |
64728bbb | 6044 | bio_put(orig_bio); |
e65e1535 MX |
6045 | return -EIO; |
6046 | } | |
6047 | ||
02f57c7a JB |
6048 | if (map_length >= orig_bio->bi_size) { |
6049 | bio = orig_bio; | |
6050 | goto submit; | |
6051 | } | |
6052 | ||
1ae39938 | 6053 | async_submit = 1; |
02f57c7a JB |
6054 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); |
6055 | if (!bio) | |
6056 | return -ENOMEM; | |
6057 | bio->bi_private = dip; | |
6058 | bio->bi_end_io = btrfs_end_dio_bio; | |
6059 | atomic_inc(&dip->pending_bios); | |
6060 | ||
e65e1535 MX |
6061 | while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { |
6062 | if (unlikely(map_length < submit_len + bvec->bv_len || | |
6063 | bio_add_page(bio, bvec->bv_page, bvec->bv_len, | |
6064 | bvec->bv_offset) < bvec->bv_len)) { | |
6065 | /* | |
6066 | * inc the count before we submit the bio so | |
6067 | * we know the end IO handler won't happen before | |
6068 | * we inc the count. Otherwise, the dip might get freed | |
6069 | * before we're done setting it up | |
6070 | */ | |
6071 | atomic_inc(&dip->pending_bios); | |
6072 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | |
6073 | file_offset, skip_sum, | |
1ae39938 | 6074 | csums, async_submit); |
e65e1535 MX |
6075 | if (ret) { |
6076 | bio_put(bio); | |
6077 | atomic_dec(&dip->pending_bios); | |
6078 | goto out_err; | |
6079 | } | |
6080 | ||
98bc3149 JB |
6081 | /* Write's use the ordered csums */ |
6082 | if (!write && !skip_sum) | |
e65e1535 MX |
6083 | csums = csums + nr_pages; |
6084 | start_sector += submit_len >> 9; | |
6085 | file_offset += submit_len; | |
6086 | ||
6087 | submit_len = 0; | |
6088 | nr_pages = 0; | |
6089 | ||
6090 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, | |
6091 | start_sector, GFP_NOFS); | |
6092 | if (!bio) | |
6093 | goto out_err; | |
6094 | bio->bi_private = dip; | |
6095 | bio->bi_end_io = btrfs_end_dio_bio; | |
6096 | ||
6097 | map_length = orig_bio->bi_size; | |
6098 | ret = btrfs_map_block(map_tree, READ, start_sector << 9, | |
6099 | &map_length, NULL, 0); | |
6100 | if (ret) { | |
6101 | bio_put(bio); | |
6102 | goto out_err; | |
6103 | } | |
6104 | } else { | |
6105 | submit_len += bvec->bv_len; | |
6106 | nr_pages ++; | |
6107 | bvec++; | |
6108 | } | |
6109 | } | |
6110 | ||
02f57c7a | 6111 | submit: |
e65e1535 | 6112 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, |
1ae39938 | 6113 | csums, async_submit); |
e65e1535 MX |
6114 | if (!ret) |
6115 | return 0; | |
6116 | ||
6117 | bio_put(bio); | |
6118 | out_err: | |
6119 | dip->errors = 1; | |
6120 | /* | |
6121 | * before atomic variable goto zero, we must | |
6122 | * make sure dip->errors is perceived to be set. | |
6123 | */ | |
6124 | smp_mb__before_atomic_dec(); | |
6125 | if (atomic_dec_and_test(&dip->pending_bios)) | |
6126 | bio_io_error(dip->orig_bio); | |
6127 | ||
6128 | /* bio_end_io() will handle error, so we needn't return it */ | |
6129 | return 0; | |
6130 | } | |
6131 | ||
4b46fce2 JB |
6132 | static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, |
6133 | loff_t file_offset) | |
6134 | { | |
6135 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6136 | struct btrfs_dio_private *dip; | |
6137 | struct bio_vec *bvec = bio->bi_io_vec; | |
4b46fce2 | 6138 | int skip_sum; |
7b6d91da | 6139 | int write = rw & REQ_WRITE; |
4b46fce2 JB |
6140 | int ret = 0; |
6141 | ||
6142 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
6143 | ||
6144 | dip = kmalloc(sizeof(*dip), GFP_NOFS); | |
6145 | if (!dip) { | |
6146 | ret = -ENOMEM; | |
6147 | goto free_ordered; | |
6148 | } | |
6149 | dip->csums = NULL; | |
6150 | ||
98bc3149 JB |
6151 | /* Write's use the ordered csum stuff, so we don't need dip->csums */ |
6152 | if (!write && !skip_sum) { | |
4b46fce2 JB |
6153 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); |
6154 | if (!dip->csums) { | |
b4966b77 | 6155 | kfree(dip); |
4b46fce2 JB |
6156 | ret = -ENOMEM; |
6157 | goto free_ordered; | |
6158 | } | |
6159 | } | |
6160 | ||
6161 | dip->private = bio->bi_private; | |
6162 | dip->inode = inode; | |
6163 | dip->logical_offset = file_offset; | |
6164 | ||
4b46fce2 JB |
6165 | dip->bytes = 0; |
6166 | do { | |
6167 | dip->bytes += bvec->bv_len; | |
6168 | bvec++; | |
6169 | } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); | |
6170 | ||
46bfbb5c | 6171 | dip->disk_bytenr = (u64)bio->bi_sector << 9; |
4b46fce2 | 6172 | bio->bi_private = dip; |
e65e1535 MX |
6173 | dip->errors = 0; |
6174 | dip->orig_bio = bio; | |
6175 | atomic_set(&dip->pending_bios, 0); | |
4b46fce2 JB |
6176 | |
6177 | if (write) | |
6178 | bio->bi_end_io = btrfs_endio_direct_write; | |
6179 | else | |
6180 | bio->bi_end_io = btrfs_endio_direct_read; | |
6181 | ||
e65e1535 MX |
6182 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); |
6183 | if (!ret) | |
eaf25d93 | 6184 | return; |
4b46fce2 JB |
6185 | free_ordered: |
6186 | /* | |
6187 | * If this is a write, we need to clean up the reserved space and kill | |
6188 | * the ordered extent. | |
6189 | */ | |
6190 | if (write) { | |
6191 | struct btrfs_ordered_extent *ordered; | |
955256f2 | 6192 | ordered = btrfs_lookup_ordered_extent(inode, file_offset); |
4b46fce2 JB |
6193 | if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && |
6194 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) | |
6195 | btrfs_free_reserved_extent(root, ordered->start, | |
6196 | ordered->disk_len); | |
6197 | btrfs_put_ordered_extent(ordered); | |
6198 | btrfs_put_ordered_extent(ordered); | |
6199 | } | |
6200 | bio_endio(bio, ret); | |
6201 | } | |
6202 | ||
5a5f79b5 CM |
6203 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, |
6204 | const struct iovec *iov, loff_t offset, | |
6205 | unsigned long nr_segs) | |
6206 | { | |
6207 | int seg; | |
a1b75f7d | 6208 | int i; |
5a5f79b5 CM |
6209 | size_t size; |
6210 | unsigned long addr; | |
6211 | unsigned blocksize_mask = root->sectorsize - 1; | |
6212 | ssize_t retval = -EINVAL; | |
6213 | loff_t end = offset; | |
6214 | ||
6215 | if (offset & blocksize_mask) | |
6216 | goto out; | |
6217 | ||
6218 | /* Check the memory alignment. Blocks cannot straddle pages */ | |
6219 | for (seg = 0; seg < nr_segs; seg++) { | |
6220 | addr = (unsigned long)iov[seg].iov_base; | |
6221 | size = iov[seg].iov_len; | |
6222 | end += size; | |
a1b75f7d | 6223 | if ((addr & blocksize_mask) || (size & blocksize_mask)) |
5a5f79b5 | 6224 | goto out; |
a1b75f7d JB |
6225 | |
6226 | /* If this is a write we don't need to check anymore */ | |
6227 | if (rw & WRITE) | |
6228 | continue; | |
6229 | ||
6230 | /* | |
6231 | * Check to make sure we don't have duplicate iov_base's in this | |
6232 | * iovec, if so return EINVAL, otherwise we'll get csum errors | |
6233 | * when reading back. | |
6234 | */ | |
6235 | for (i = seg + 1; i < nr_segs; i++) { | |
6236 | if (iov[seg].iov_base == iov[i].iov_base) | |
6237 | goto out; | |
6238 | } | |
5a5f79b5 CM |
6239 | } |
6240 | retval = 0; | |
6241 | out: | |
6242 | return retval; | |
6243 | } | |
16432985 CM |
6244 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, |
6245 | const struct iovec *iov, loff_t offset, | |
6246 | unsigned long nr_segs) | |
6247 | { | |
4b46fce2 JB |
6248 | struct file *file = iocb->ki_filp; |
6249 | struct inode *inode = file->f_mapping->host; | |
6250 | struct btrfs_ordered_extent *ordered; | |
4845e44f | 6251 | struct extent_state *cached_state = NULL; |
4b46fce2 JB |
6252 | u64 lockstart, lockend; |
6253 | ssize_t ret; | |
4845e44f CM |
6254 | int writing = rw & WRITE; |
6255 | int write_bits = 0; | |
3f7c579c | 6256 | size_t count = iov_length(iov, nr_segs); |
4b46fce2 | 6257 | |
5a5f79b5 CM |
6258 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, |
6259 | offset, nr_segs)) { | |
6260 | return 0; | |
6261 | } | |
6262 | ||
4b46fce2 | 6263 | lockstart = offset; |
3f7c579c CM |
6264 | lockend = offset + count - 1; |
6265 | ||
6266 | if (writing) { | |
6267 | ret = btrfs_delalloc_reserve_space(inode, count); | |
6268 | if (ret) | |
6269 | goto out; | |
6270 | } | |
4845e44f | 6271 | |
4b46fce2 | 6272 | while (1) { |
4845e44f CM |
6273 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
6274 | 0, &cached_state, GFP_NOFS); | |
4b46fce2 JB |
6275 | /* |
6276 | * We're concerned with the entire range that we're going to be | |
6277 | * doing DIO to, so we need to make sure theres no ordered | |
6278 | * extents in this range. | |
6279 | */ | |
6280 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | |
6281 | lockend - lockstart + 1); | |
6282 | if (!ordered) | |
6283 | break; | |
4845e44f CM |
6284 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
6285 | &cached_state, GFP_NOFS); | |
4b46fce2 JB |
6286 | btrfs_start_ordered_extent(inode, ordered, 1); |
6287 | btrfs_put_ordered_extent(ordered); | |
6288 | cond_resched(); | |
6289 | } | |
6290 | ||
4845e44f CM |
6291 | /* |
6292 | * we don't use btrfs_set_extent_delalloc because we don't want | |
6293 | * the dirty or uptodate bits | |
6294 | */ | |
6295 | if (writing) { | |
6296 | write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; | |
6297 | ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
6298 | EXTENT_DELALLOC, 0, NULL, &cached_state, | |
6299 | GFP_NOFS); | |
6300 | if (ret) { | |
6301 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, | |
6302 | lockend, EXTENT_LOCKED | write_bits, | |
6303 | 1, 0, &cached_state, GFP_NOFS); | |
6304 | goto out; | |
6305 | } | |
6306 | } | |
6307 | ||
6308 | free_extent_state(cached_state); | |
6309 | cached_state = NULL; | |
6310 | ||
5a5f79b5 CM |
6311 | ret = __blockdev_direct_IO(rw, iocb, inode, |
6312 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, | |
6313 | iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, | |
6314 | btrfs_submit_direct, 0); | |
4b46fce2 JB |
6315 | |
6316 | if (ret < 0 && ret != -EIOCBQUEUED) { | |
4845e44f CM |
6317 | clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, |
6318 | offset + iov_length(iov, nr_segs) - 1, | |
6319 | EXTENT_LOCKED | write_bits, 1, 0, | |
6320 | &cached_state, GFP_NOFS); | |
4b46fce2 JB |
6321 | } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { |
6322 | /* | |
6323 | * We're falling back to buffered, unlock the section we didn't | |
6324 | * do IO on. | |
6325 | */ | |
4845e44f CM |
6326 | clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, |
6327 | offset + iov_length(iov, nr_segs) - 1, | |
6328 | EXTENT_LOCKED | write_bits, 1, 0, | |
6329 | &cached_state, GFP_NOFS); | |
4b46fce2 | 6330 | } |
4845e44f CM |
6331 | out: |
6332 | free_extent_state(cached_state); | |
4b46fce2 | 6333 | return ret; |
16432985 CM |
6334 | } |
6335 | ||
1506fcc8 YS |
6336 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
6337 | __u64 start, __u64 len) | |
6338 | { | |
ec29ed5b | 6339 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
1506fcc8 YS |
6340 | } |
6341 | ||
a52d9a80 | 6342 | int btrfs_readpage(struct file *file, struct page *page) |
9ebefb18 | 6343 | { |
d1310b2e CM |
6344 | struct extent_io_tree *tree; |
6345 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
a52d9a80 | 6346 | return extent_read_full_page(tree, page, btrfs_get_extent); |
9ebefb18 | 6347 | } |
1832a6d5 | 6348 | |
a52d9a80 | 6349 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) |
39279cc3 | 6350 | { |
d1310b2e | 6351 | struct extent_io_tree *tree; |
b888db2b CM |
6352 | |
6353 | ||
6354 | if (current->flags & PF_MEMALLOC) { | |
6355 | redirty_page_for_writepage(wbc, page); | |
6356 | unlock_page(page); | |
6357 | return 0; | |
6358 | } | |
d1310b2e | 6359 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
a52d9a80 | 6360 | return extent_write_full_page(tree, page, btrfs_get_extent, wbc); |
9ebefb18 CM |
6361 | } |
6362 | ||
f421950f CM |
6363 | int btrfs_writepages(struct address_space *mapping, |
6364 | struct writeback_control *wbc) | |
b293f02e | 6365 | { |
d1310b2e | 6366 | struct extent_io_tree *tree; |
771ed689 | 6367 | |
d1310b2e | 6368 | tree = &BTRFS_I(mapping->host)->io_tree; |
b293f02e CM |
6369 | return extent_writepages(tree, mapping, btrfs_get_extent, wbc); |
6370 | } | |
6371 | ||
3ab2fb5a CM |
6372 | static int |
6373 | btrfs_readpages(struct file *file, struct address_space *mapping, | |
6374 | struct list_head *pages, unsigned nr_pages) | |
6375 | { | |
d1310b2e CM |
6376 | struct extent_io_tree *tree; |
6377 | tree = &BTRFS_I(mapping->host)->io_tree; | |
3ab2fb5a CM |
6378 | return extent_readpages(tree, mapping, pages, nr_pages, |
6379 | btrfs_get_extent); | |
6380 | } | |
e6dcd2dc | 6381 | static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
9ebefb18 | 6382 | { |
d1310b2e CM |
6383 | struct extent_io_tree *tree; |
6384 | struct extent_map_tree *map; | |
a52d9a80 | 6385 | int ret; |
8c2383c3 | 6386 | |
d1310b2e CM |
6387 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
6388 | map = &BTRFS_I(page->mapping->host)->extent_tree; | |
70dec807 | 6389 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); |
a52d9a80 CM |
6390 | if (ret == 1) { |
6391 | ClearPagePrivate(page); | |
6392 | set_page_private(page, 0); | |
6393 | page_cache_release(page); | |
39279cc3 | 6394 | } |
a52d9a80 | 6395 | return ret; |
39279cc3 CM |
6396 | } |
6397 | ||
e6dcd2dc CM |
6398 | static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
6399 | { | |
98509cfc CM |
6400 | if (PageWriteback(page) || PageDirty(page)) |
6401 | return 0; | |
b335b003 | 6402 | return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); |
e6dcd2dc CM |
6403 | } |
6404 | ||
a52d9a80 | 6405 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) |
39279cc3 | 6406 | { |
d1310b2e | 6407 | struct extent_io_tree *tree; |
e6dcd2dc | 6408 | struct btrfs_ordered_extent *ordered; |
2ac55d41 | 6409 | struct extent_state *cached_state = NULL; |
e6dcd2dc CM |
6410 | u64 page_start = page_offset(page); |
6411 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | |
39279cc3 | 6412 | |
8b62b72b CM |
6413 | |
6414 | /* | |
6415 | * we have the page locked, so new writeback can't start, | |
6416 | * and the dirty bit won't be cleared while we are here. | |
6417 | * | |
6418 | * Wait for IO on this page so that we can safely clear | |
6419 | * the PagePrivate2 bit and do ordered accounting | |
6420 | */ | |
e6dcd2dc | 6421 | wait_on_page_writeback(page); |
8b62b72b | 6422 | |
d1310b2e | 6423 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
e6dcd2dc CM |
6424 | if (offset) { |
6425 | btrfs_releasepage(page, GFP_NOFS); | |
6426 | return; | |
6427 | } | |
2ac55d41 JB |
6428 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state, |
6429 | GFP_NOFS); | |
e6dcd2dc CM |
6430 | ordered = btrfs_lookup_ordered_extent(page->mapping->host, |
6431 | page_offset(page)); | |
6432 | if (ordered) { | |
eb84ae03 CM |
6433 | /* |
6434 | * IO on this page will never be started, so we need | |
6435 | * to account for any ordered extents now | |
6436 | */ | |
e6dcd2dc CM |
6437 | clear_extent_bit(tree, page_start, page_end, |
6438 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
32c00aff | 6439 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, |
2ac55d41 | 6440 | &cached_state, GFP_NOFS); |
8b62b72b CM |
6441 | /* |
6442 | * whoever cleared the private bit is responsible | |
6443 | * for the finish_ordered_io | |
6444 | */ | |
6445 | if (TestClearPagePrivate2(page)) { | |
6446 | btrfs_finish_ordered_io(page->mapping->host, | |
6447 | page_start, page_end); | |
6448 | } | |
e6dcd2dc | 6449 | btrfs_put_ordered_extent(ordered); |
2ac55d41 JB |
6450 | cached_state = NULL; |
6451 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state, | |
6452 | GFP_NOFS); | |
e6dcd2dc CM |
6453 | } |
6454 | clear_extent_bit(tree, page_start, page_end, | |
32c00aff | 6455 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | |
2ac55d41 | 6456 | EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); |
e6dcd2dc CM |
6457 | __btrfs_releasepage(page, GFP_NOFS); |
6458 | ||
4a096752 | 6459 | ClearPageChecked(page); |
9ad6b7bc | 6460 | if (PagePrivate(page)) { |
9ad6b7bc CM |
6461 | ClearPagePrivate(page); |
6462 | set_page_private(page, 0); | |
6463 | page_cache_release(page); | |
6464 | } | |
39279cc3 CM |
6465 | } |
6466 | ||
9ebefb18 CM |
6467 | /* |
6468 | * btrfs_page_mkwrite() is not allowed to change the file size as it gets | |
6469 | * called from a page fault handler when a page is first dirtied. Hence we must | |
6470 | * be careful to check for EOF conditions here. We set the page up correctly | |
6471 | * for a written page which means we get ENOSPC checking when writing into | |
6472 | * holes and correct delalloc and unwritten extent mapping on filesystems that | |
6473 | * support these features. | |
6474 | * | |
6475 | * We are not allowed to take the i_mutex here so we have to play games to | |
6476 | * protect against truncate races as the page could now be beyond EOF. Because | |
6477 | * vmtruncate() writes the inode size before removing pages, once we have the | |
6478 | * page lock we can determine safely if the page is beyond EOF. If it is not | |
6479 | * beyond EOF, then the page is guaranteed safe against truncation until we | |
6480 | * unlock the page. | |
6481 | */ | |
c2ec175c | 6482 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
9ebefb18 | 6483 | { |
c2ec175c | 6484 | struct page *page = vmf->page; |
6da6abae | 6485 | struct inode *inode = fdentry(vma->vm_file)->d_inode; |
1832a6d5 | 6486 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
6487 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
6488 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 6489 | struct extent_state *cached_state = NULL; |
e6dcd2dc CM |
6490 | char *kaddr; |
6491 | unsigned long zero_start; | |
9ebefb18 | 6492 | loff_t size; |
1832a6d5 | 6493 | int ret; |
a52d9a80 | 6494 | u64 page_start; |
e6dcd2dc | 6495 | u64 page_end; |
9ebefb18 | 6496 | |
0ca1f7ce | 6497 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
56a76f82 NP |
6498 | if (ret) { |
6499 | if (ret == -ENOMEM) | |
6500 | ret = VM_FAULT_OOM; | |
6501 | else /* -ENOSPC, -EIO, etc */ | |
6502 | ret = VM_FAULT_SIGBUS; | |
1832a6d5 | 6503 | goto out; |
56a76f82 | 6504 | } |
1832a6d5 | 6505 | |
56a76f82 | 6506 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ |
e6dcd2dc | 6507 | again: |
9ebefb18 | 6508 | lock_page(page); |
9ebefb18 | 6509 | size = i_size_read(inode); |
e6dcd2dc CM |
6510 | page_start = page_offset(page); |
6511 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
a52d9a80 | 6512 | |
9ebefb18 | 6513 | if ((page->mapping != inode->i_mapping) || |
e6dcd2dc | 6514 | (page_start >= size)) { |
9ebefb18 CM |
6515 | /* page got truncated out from underneath us */ |
6516 | goto out_unlock; | |
6517 | } | |
e6dcd2dc CM |
6518 | wait_on_page_writeback(page); |
6519 | ||
2ac55d41 JB |
6520 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, |
6521 | GFP_NOFS); | |
e6dcd2dc CM |
6522 | set_page_extent_mapped(page); |
6523 | ||
eb84ae03 CM |
6524 | /* |
6525 | * we can't set the delalloc bits if there are pending ordered | |
6526 | * extents. Drop our locks and wait for them to finish | |
6527 | */ | |
e6dcd2dc CM |
6528 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
6529 | if (ordered) { | |
2ac55d41 JB |
6530 | unlock_extent_cached(io_tree, page_start, page_end, |
6531 | &cached_state, GFP_NOFS); | |
e6dcd2dc | 6532 | unlock_page(page); |
eb84ae03 | 6533 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
6534 | btrfs_put_ordered_extent(ordered); |
6535 | goto again; | |
6536 | } | |
6537 | ||
fbf19087 JB |
6538 | /* |
6539 | * XXX - page_mkwrite gets called every time the page is dirtied, even | |
6540 | * if it was already dirty, so for space accounting reasons we need to | |
6541 | * clear any delalloc bits for the range we are fixing to save. There | |
6542 | * is probably a better way to do this, but for now keep consistent with | |
6543 | * prepare_pages in the normal write path. | |
6544 | */ | |
2ac55d41 | 6545 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
32c00aff | 6546 | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, |
2ac55d41 | 6547 | 0, 0, &cached_state, GFP_NOFS); |
fbf19087 | 6548 | |
2ac55d41 JB |
6549 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
6550 | &cached_state); | |
9ed74f2d | 6551 | if (ret) { |
2ac55d41 JB |
6552 | unlock_extent_cached(io_tree, page_start, page_end, |
6553 | &cached_state, GFP_NOFS); | |
9ed74f2d JB |
6554 | ret = VM_FAULT_SIGBUS; |
6555 | goto out_unlock; | |
6556 | } | |
e6dcd2dc | 6557 | ret = 0; |
9ebefb18 CM |
6558 | |
6559 | /* page is wholly or partially inside EOF */ | |
a52d9a80 | 6560 | if (page_start + PAGE_CACHE_SIZE > size) |
e6dcd2dc | 6561 | zero_start = size & ~PAGE_CACHE_MASK; |
9ebefb18 | 6562 | else |
e6dcd2dc | 6563 | zero_start = PAGE_CACHE_SIZE; |
9ebefb18 | 6564 | |
e6dcd2dc CM |
6565 | if (zero_start != PAGE_CACHE_SIZE) { |
6566 | kaddr = kmap(page); | |
6567 | memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); | |
6568 | flush_dcache_page(page); | |
6569 | kunmap(page); | |
6570 | } | |
247e743c | 6571 | ClearPageChecked(page); |
e6dcd2dc | 6572 | set_page_dirty(page); |
50a9b214 | 6573 | SetPageUptodate(page); |
5a3f23d5 | 6574 | |
257c62e1 CM |
6575 | BTRFS_I(inode)->last_trans = root->fs_info->generation; |
6576 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | |
6577 | ||
2ac55d41 | 6578 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); |
9ebefb18 CM |
6579 | |
6580 | out_unlock: | |
50a9b214 CM |
6581 | if (!ret) |
6582 | return VM_FAULT_LOCKED; | |
9ebefb18 | 6583 | unlock_page(page); |
0ca1f7ce | 6584 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
1832a6d5 | 6585 | out: |
9ebefb18 CM |
6586 | return ret; |
6587 | } | |
6588 | ||
a41ad394 | 6589 | static int btrfs_truncate(struct inode *inode) |
39279cc3 CM |
6590 | { |
6591 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6592 | int ret; | |
3893e33b | 6593 | int err = 0; |
39279cc3 | 6594 | struct btrfs_trans_handle *trans; |
d3c2fdcf | 6595 | unsigned long nr; |
dbe674a9 | 6596 | u64 mask = root->sectorsize - 1; |
39279cc3 | 6597 | |
5d5e103a JB |
6598 | ret = btrfs_truncate_page(inode->i_mapping, inode->i_size); |
6599 | if (ret) | |
a41ad394 | 6600 | return ret; |
8082510e | 6601 | |
4a096752 | 6602 | btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); |
8082510e | 6603 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); |
39279cc3 | 6604 | |
f0cd846e JB |
6605 | trans = btrfs_start_transaction(root, 5); |
6606 | if (IS_ERR(trans)) | |
6607 | return PTR_ERR(trans); | |
6608 | ||
6609 | btrfs_set_trans_block_group(trans, inode); | |
6610 | ||
6611 | ret = btrfs_orphan_add(trans, inode); | |
6612 | if (ret) { | |
6613 | btrfs_end_transaction(trans, root); | |
6614 | return ret; | |
6615 | } | |
6616 | ||
6617 | nr = trans->blocks_used; | |
6618 | btrfs_end_transaction(trans, root); | |
6619 | btrfs_btree_balance_dirty(root, nr); | |
6620 | ||
6621 | /* Now start a transaction for the truncate */ | |
d68fc57b | 6622 | trans = btrfs_start_transaction(root, 0); |
3893e33b JB |
6623 | if (IS_ERR(trans)) |
6624 | return PTR_ERR(trans); | |
8082510e | 6625 | btrfs_set_trans_block_group(trans, inode); |
d68fc57b | 6626 | trans->block_rsv = root->orphan_block_rsv; |
5a3f23d5 CM |
6627 | |
6628 | /* | |
6629 | * setattr is responsible for setting the ordered_data_close flag, | |
6630 | * but that is only tested during the last file release. That | |
6631 | * could happen well after the next commit, leaving a great big | |
6632 | * window where new writes may get lost if someone chooses to write | |
6633 | * to this file after truncating to zero | |
6634 | * | |
6635 | * The inode doesn't have any dirty data here, and so if we commit | |
6636 | * this is a noop. If someone immediately starts writing to the inode | |
6637 | * it is very likely we'll catch some of their writes in this | |
6638 | * transaction, and the commit will find this file on the ordered | |
6639 | * data list with good things to send down. | |
6640 | * | |
6641 | * This is a best effort solution, there is still a window where | |
6642 | * using truncate to replace the contents of the file will | |
6643 | * end up with a zero length file after a crash. | |
6644 | */ | |
6645 | if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) | |
6646 | btrfs_add_ordered_operation(trans, root, inode); | |
6647 | ||
8082510e | 6648 | while (1) { |
d68fc57b YZ |
6649 | if (!trans) { |
6650 | trans = btrfs_start_transaction(root, 0); | |
3893e33b JB |
6651 | if (IS_ERR(trans)) |
6652 | return PTR_ERR(trans); | |
d68fc57b YZ |
6653 | btrfs_set_trans_block_group(trans, inode); |
6654 | trans->block_rsv = root->orphan_block_rsv; | |
6655 | } | |
6656 | ||
6657 | ret = btrfs_block_rsv_check(trans, root, | |
6658 | root->orphan_block_rsv, 0, 5); | |
3893e33b | 6659 | if (ret == -EAGAIN) { |
d68fc57b | 6660 | ret = btrfs_commit_transaction(trans, root); |
3893e33b JB |
6661 | if (ret) |
6662 | return ret; | |
d68fc57b YZ |
6663 | trans = NULL; |
6664 | continue; | |
3893e33b JB |
6665 | } else if (ret) { |
6666 | err = ret; | |
6667 | break; | |
d68fc57b YZ |
6668 | } |
6669 | ||
8082510e YZ |
6670 | ret = btrfs_truncate_inode_items(trans, root, inode, |
6671 | inode->i_size, | |
6672 | BTRFS_EXTENT_DATA_KEY); | |
3893e33b JB |
6673 | if (ret != -EAGAIN) { |
6674 | err = ret; | |
8082510e | 6675 | break; |
3893e33b | 6676 | } |
39279cc3 | 6677 | |
8082510e | 6678 | ret = btrfs_update_inode(trans, root, inode); |
3893e33b JB |
6679 | if (ret) { |
6680 | err = ret; | |
6681 | break; | |
6682 | } | |
5f39d397 | 6683 | |
8082510e YZ |
6684 | nr = trans->blocks_used; |
6685 | btrfs_end_transaction(trans, root); | |
d68fc57b | 6686 | trans = NULL; |
8082510e | 6687 | btrfs_btree_balance_dirty(root, nr); |
8082510e YZ |
6688 | } |
6689 | ||
6690 | if (ret == 0 && inode->i_nlink > 0) { | |
6691 | ret = btrfs_orphan_del(trans, inode); | |
3893e33b JB |
6692 | if (ret) |
6693 | err = ret; | |
ded5db9d JB |
6694 | } else if (ret && inode->i_nlink > 0) { |
6695 | /* | |
6696 | * Failed to do the truncate, remove us from the in memory | |
6697 | * orphan list. | |
6698 | */ | |
6699 | ret = btrfs_orphan_del(NULL, inode); | |
8082510e YZ |
6700 | } |
6701 | ||
6702 | ret = btrfs_update_inode(trans, root, inode); | |
3893e33b JB |
6703 | if (ret && !err) |
6704 | err = ret; | |
7b128766 | 6705 | |
7b128766 | 6706 | nr = trans->blocks_used; |
89ce8a63 | 6707 | ret = btrfs_end_transaction_throttle(trans, root); |
3893e33b JB |
6708 | if (ret && !err) |
6709 | err = ret; | |
d3c2fdcf | 6710 | btrfs_btree_balance_dirty(root, nr); |
a41ad394 | 6711 | |
3893e33b | 6712 | return err; |
39279cc3 CM |
6713 | } |
6714 | ||
d352ac68 CM |
6715 | /* |
6716 | * create a new subvolume directory/inode (helper for the ioctl). | |
6717 | */ | |
d2fb3437 | 6718 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, |
76dda93c | 6719 | struct btrfs_root *new_root, |
d2fb3437 | 6720 | u64 new_dirid, u64 alloc_hint) |
39279cc3 | 6721 | { |
39279cc3 | 6722 | struct inode *inode; |
76dda93c | 6723 | int err; |
00e4e6b3 | 6724 | u64 index = 0; |
39279cc3 | 6725 | |
aec7477b | 6726 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, |
d2fb3437 | 6727 | new_dirid, alloc_hint, S_IFDIR | 0700, &index); |
54aa1f4d | 6728 | if (IS_ERR(inode)) |
f46b5a66 | 6729 | return PTR_ERR(inode); |
39279cc3 CM |
6730 | inode->i_op = &btrfs_dir_inode_operations; |
6731 | inode->i_fop = &btrfs_dir_file_operations; | |
6732 | ||
39279cc3 | 6733 | inode->i_nlink = 1; |
dbe674a9 | 6734 | btrfs_i_size_write(inode, 0); |
3b96362c | 6735 | |
76dda93c YZ |
6736 | err = btrfs_update_inode(trans, new_root, inode); |
6737 | BUG_ON(err); | |
cb8e7090 | 6738 | |
76dda93c | 6739 | iput(inode); |
cb8e7090 | 6740 | return 0; |
39279cc3 CM |
6741 | } |
6742 | ||
d352ac68 CM |
6743 | /* helper function for file defrag and space balancing. This |
6744 | * forces readahead on a given range of bytes in an inode | |
6745 | */ | |
edbd8d4e | 6746 | unsigned long btrfs_force_ra(struct address_space *mapping, |
86479a04 CM |
6747 | struct file_ra_state *ra, struct file *file, |
6748 | pgoff_t offset, pgoff_t last_index) | |
6749 | { | |
8e7bf94f | 6750 | pgoff_t req_size = last_index - offset + 1; |
86479a04 | 6751 | |
86479a04 CM |
6752 | page_cache_sync_readahead(mapping, ra, file, offset, req_size); |
6753 | return offset + req_size; | |
86479a04 CM |
6754 | } |
6755 | ||
39279cc3 CM |
6756 | struct inode *btrfs_alloc_inode(struct super_block *sb) |
6757 | { | |
6758 | struct btrfs_inode *ei; | |
2ead6ae7 | 6759 | struct inode *inode; |
39279cc3 CM |
6760 | |
6761 | ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); | |
6762 | if (!ei) | |
6763 | return NULL; | |
2ead6ae7 YZ |
6764 | |
6765 | ei->root = NULL; | |
6766 | ei->space_info = NULL; | |
6767 | ei->generation = 0; | |
6768 | ei->sequence = 0; | |
15ee9bc7 | 6769 | ei->last_trans = 0; |
257c62e1 | 6770 | ei->last_sub_trans = 0; |
e02119d5 | 6771 | ei->logged_trans = 0; |
2ead6ae7 YZ |
6772 | ei->delalloc_bytes = 0; |
6773 | ei->reserved_bytes = 0; | |
6774 | ei->disk_i_size = 0; | |
6775 | ei->flags = 0; | |
6776 | ei->index_cnt = (u64)-1; | |
6777 | ei->last_unlink_trans = 0; | |
6778 | ||
0ca1f7ce | 6779 | atomic_set(&ei->outstanding_extents, 0); |
57a45ced | 6780 | atomic_set(&ei->reserved_extents, 0); |
2ead6ae7 YZ |
6781 | |
6782 | ei->ordered_data_close = 0; | |
d68fc57b | 6783 | ei->orphan_meta_reserved = 0; |
2ead6ae7 | 6784 | ei->dummy_inode = 0; |
261507a0 | 6785 | ei->force_compress = BTRFS_COMPRESS_NONE; |
2ead6ae7 YZ |
6786 | |
6787 | inode = &ei->vfs_inode; | |
6788 | extent_map_tree_init(&ei->extent_tree, GFP_NOFS); | |
6789 | extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); | |
6790 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); | |
6791 | mutex_init(&ei->log_mutex); | |
e6dcd2dc | 6792 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
7b128766 | 6793 | INIT_LIST_HEAD(&ei->i_orphan); |
2ead6ae7 | 6794 | INIT_LIST_HEAD(&ei->delalloc_inodes); |
5a3f23d5 | 6795 | INIT_LIST_HEAD(&ei->ordered_operations); |
2ead6ae7 YZ |
6796 | RB_CLEAR_NODE(&ei->rb_node); |
6797 | ||
6798 | return inode; | |
39279cc3 CM |
6799 | } |
6800 | ||
fa0d7e3d NP |
6801 | static void btrfs_i_callback(struct rcu_head *head) |
6802 | { | |
6803 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
6804 | INIT_LIST_HEAD(&inode->i_dentry); | |
6805 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); | |
6806 | } | |
6807 | ||
39279cc3 CM |
6808 | void btrfs_destroy_inode(struct inode *inode) |
6809 | { | |
e6dcd2dc | 6810 | struct btrfs_ordered_extent *ordered; |
5a3f23d5 CM |
6811 | struct btrfs_root *root = BTRFS_I(inode)->root; |
6812 | ||
39279cc3 CM |
6813 | WARN_ON(!list_empty(&inode->i_dentry)); |
6814 | WARN_ON(inode->i_data.nrpages); | |
0ca1f7ce | 6815 | WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); |
57a45ced | 6816 | WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents)); |
39279cc3 | 6817 | |
a6dbd429 JB |
6818 | /* |
6819 | * This can happen where we create an inode, but somebody else also | |
6820 | * created the same inode and we need to destroy the one we already | |
6821 | * created. | |
6822 | */ | |
6823 | if (!root) | |
6824 | goto free; | |
6825 | ||
5a3f23d5 CM |
6826 | /* |
6827 | * Make sure we're properly removed from the ordered operation | |
6828 | * lists. | |
6829 | */ | |
6830 | smp_mb(); | |
6831 | if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { | |
6832 | spin_lock(&root->fs_info->ordered_extent_lock); | |
6833 | list_del_init(&BTRFS_I(inode)->ordered_operations); | |
6834 | spin_unlock(&root->fs_info->ordered_extent_lock); | |
6835 | } | |
6836 | ||
0af3d00b JB |
6837 | if (root == root->fs_info->tree_root) { |
6838 | struct btrfs_block_group_cache *block_group; | |
6839 | ||
6840 | block_group = btrfs_lookup_block_group(root->fs_info, | |
6841 | BTRFS_I(inode)->block_group); | |
6842 | if (block_group && block_group->inode == inode) { | |
6843 | spin_lock(&block_group->lock); | |
6844 | block_group->inode = NULL; | |
6845 | spin_unlock(&block_group->lock); | |
6846 | btrfs_put_block_group(block_group); | |
6847 | } else if (block_group) { | |
6848 | btrfs_put_block_group(block_group); | |
6849 | } | |
6850 | } | |
6851 | ||
d68fc57b | 6852 | spin_lock(&root->orphan_lock); |
7b128766 | 6853 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { |
8082510e YZ |
6854 | printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", |
6855 | inode->i_ino); | |
6856 | list_del_init(&BTRFS_I(inode)->i_orphan); | |
7b128766 | 6857 | } |
d68fc57b | 6858 | spin_unlock(&root->orphan_lock); |
7b128766 | 6859 | |
d397712b | 6860 | while (1) { |
e6dcd2dc CM |
6861 | ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); |
6862 | if (!ordered) | |
6863 | break; | |
6864 | else { | |
d397712b CM |
6865 | printk(KERN_ERR "btrfs found ordered " |
6866 | "extent %llu %llu on inode cleanup\n", | |
6867 | (unsigned long long)ordered->file_offset, | |
6868 | (unsigned long long)ordered->len); | |
e6dcd2dc CM |
6869 | btrfs_remove_ordered_extent(inode, ordered); |
6870 | btrfs_put_ordered_extent(ordered); | |
6871 | btrfs_put_ordered_extent(ordered); | |
6872 | } | |
6873 | } | |
5d4f98a2 | 6874 | inode_tree_del(inode); |
5b21f2ed | 6875 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); |
a6dbd429 | 6876 | free: |
fa0d7e3d | 6877 | call_rcu(&inode->i_rcu, btrfs_i_callback); |
39279cc3 CM |
6878 | } |
6879 | ||
45321ac5 | 6880 | int btrfs_drop_inode(struct inode *inode) |
76dda93c YZ |
6881 | { |
6882 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
45321ac5 | 6883 | |
0af3d00b JB |
6884 | if (btrfs_root_refs(&root->root_item) == 0 && |
6885 | root != root->fs_info->tree_root) | |
45321ac5 | 6886 | return 1; |
76dda93c | 6887 | else |
45321ac5 | 6888 | return generic_drop_inode(inode); |
76dda93c YZ |
6889 | } |
6890 | ||
0ee0fda0 | 6891 | static void init_once(void *foo) |
39279cc3 CM |
6892 | { |
6893 | struct btrfs_inode *ei = (struct btrfs_inode *) foo; | |
6894 | ||
6895 | inode_init_once(&ei->vfs_inode); | |
6896 | } | |
6897 | ||
6898 | void btrfs_destroy_cachep(void) | |
6899 | { | |
6900 | if (btrfs_inode_cachep) | |
6901 | kmem_cache_destroy(btrfs_inode_cachep); | |
6902 | if (btrfs_trans_handle_cachep) | |
6903 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
6904 | if (btrfs_transaction_cachep) | |
6905 | kmem_cache_destroy(btrfs_transaction_cachep); | |
39279cc3 CM |
6906 | if (btrfs_path_cachep) |
6907 | kmem_cache_destroy(btrfs_path_cachep); | |
dc89e982 JB |
6908 | if (btrfs_free_space_cachep) |
6909 | kmem_cache_destroy(btrfs_free_space_cachep); | |
39279cc3 CM |
6910 | } |
6911 | ||
6912 | int btrfs_init_cachep(void) | |
6913 | { | |
9601e3f6 CH |
6914 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache", |
6915 | sizeof(struct btrfs_inode), 0, | |
6916 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); | |
39279cc3 CM |
6917 | if (!btrfs_inode_cachep) |
6918 | goto fail; | |
9601e3f6 CH |
6919 | |
6920 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache", | |
6921 | sizeof(struct btrfs_trans_handle), 0, | |
6922 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
6923 | if (!btrfs_trans_handle_cachep) |
6924 | goto fail; | |
9601e3f6 CH |
6925 | |
6926 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache", | |
6927 | sizeof(struct btrfs_transaction), 0, | |
6928 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
6929 | if (!btrfs_transaction_cachep) |
6930 | goto fail; | |
9601e3f6 CH |
6931 | |
6932 | btrfs_path_cachep = kmem_cache_create("btrfs_path_cache", | |
6933 | sizeof(struct btrfs_path), 0, | |
6934 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
6935 | if (!btrfs_path_cachep) |
6936 | goto fail; | |
9601e3f6 | 6937 | |
dc89e982 JB |
6938 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache", |
6939 | sizeof(struct btrfs_free_space), 0, | |
6940 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
6941 | if (!btrfs_free_space_cachep) | |
6942 | goto fail; | |
6943 | ||
39279cc3 CM |
6944 | return 0; |
6945 | fail: | |
6946 | btrfs_destroy_cachep(); | |
6947 | return -ENOMEM; | |
6948 | } | |
6949 | ||
6950 | static int btrfs_getattr(struct vfsmount *mnt, | |
6951 | struct dentry *dentry, struct kstat *stat) | |
6952 | { | |
6953 | struct inode *inode = dentry->d_inode; | |
6954 | generic_fillattr(inode, stat); | |
3394e160 | 6955 | stat->dev = BTRFS_I(inode)->root->anon_super.s_dev; |
d6667462 | 6956 | stat->blksize = PAGE_CACHE_SIZE; |
a76a3cd4 YZ |
6957 | stat->blocks = (inode_get_bytes(inode) + |
6958 | BTRFS_I(inode)->delalloc_bytes) >> 9; | |
39279cc3 CM |
6959 | return 0; |
6960 | } | |
6961 | ||
75e7cb7f LB |
6962 | /* |
6963 | * If a file is moved, it will inherit the cow and compression flags of the new | |
6964 | * directory. | |
6965 | */ | |
6966 | static void fixup_inode_flags(struct inode *dir, struct inode *inode) | |
6967 | { | |
6968 | struct btrfs_inode *b_dir = BTRFS_I(dir); | |
6969 | struct btrfs_inode *b_inode = BTRFS_I(inode); | |
6970 | ||
6971 | if (b_dir->flags & BTRFS_INODE_NODATACOW) | |
6972 | b_inode->flags |= BTRFS_INODE_NODATACOW; | |
6973 | else | |
6974 | b_inode->flags &= ~BTRFS_INODE_NODATACOW; | |
6975 | ||
6976 | if (b_dir->flags & BTRFS_INODE_COMPRESS) | |
6977 | b_inode->flags |= BTRFS_INODE_COMPRESS; | |
6978 | else | |
6979 | b_inode->flags &= ~BTRFS_INODE_COMPRESS; | |
6980 | } | |
6981 | ||
d397712b CM |
6982 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
6983 | struct inode *new_dir, struct dentry *new_dentry) | |
39279cc3 CM |
6984 | { |
6985 | struct btrfs_trans_handle *trans; | |
6986 | struct btrfs_root *root = BTRFS_I(old_dir)->root; | |
4df27c4d | 6987 | struct btrfs_root *dest = BTRFS_I(new_dir)->root; |
39279cc3 CM |
6988 | struct inode *new_inode = new_dentry->d_inode; |
6989 | struct inode *old_inode = old_dentry->d_inode; | |
6990 | struct timespec ctime = CURRENT_TIME; | |
00e4e6b3 | 6991 | u64 index = 0; |
4df27c4d | 6992 | u64 root_objectid; |
39279cc3 CM |
6993 | int ret; |
6994 | ||
f679a840 YZ |
6995 | if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
6996 | return -EPERM; | |
6997 | ||
4df27c4d YZ |
6998 | /* we only allow rename subvolume link between subvolumes */ |
6999 | if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) | |
3394e160 CM |
7000 | return -EXDEV; |
7001 | ||
4df27c4d YZ |
7002 | if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || |
7003 | (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) | |
39279cc3 | 7004 | return -ENOTEMPTY; |
5f39d397 | 7005 | |
4df27c4d YZ |
7006 | if (S_ISDIR(old_inode->i_mode) && new_inode && |
7007 | new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) | |
7008 | return -ENOTEMPTY; | |
5a3f23d5 CM |
7009 | /* |
7010 | * we're using rename to replace one file with another. | |
7011 | * and the replacement file is large. Start IO on it now so | |
7012 | * we don't add too much work to the end of the transaction | |
7013 | */ | |
4baf8c92 | 7014 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && |
5a3f23d5 CM |
7015 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
7016 | filemap_flush(old_inode->i_mapping); | |
7017 | ||
76dda93c YZ |
7018 | /* close the racy window with snapshot create/destroy ioctl */ |
7019 | if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) | |
7020 | down_read(&root->fs_info->subvol_sem); | |
a22285a6 YZ |
7021 | /* |
7022 | * We want to reserve the absolute worst case amount of items. So if | |
7023 | * both inodes are subvols and we need to unlink them then that would | |
7024 | * require 4 item modifications, but if they are both normal inodes it | |
7025 | * would require 5 item modifications, so we'll assume their normal | |
7026 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items | |
7027 | * should cover the worst case number of items we'll modify. | |
7028 | */ | |
7029 | trans = btrfs_start_transaction(root, 20); | |
b44c59a8 JL |
7030 | if (IS_ERR(trans)) { |
7031 | ret = PTR_ERR(trans); | |
7032 | goto out_notrans; | |
7033 | } | |
76dda93c | 7034 | |
a5719521 | 7035 | btrfs_set_trans_block_group(trans, new_dir); |
5f39d397 | 7036 | |
4df27c4d YZ |
7037 | if (dest != root) |
7038 | btrfs_record_root_in_trans(trans, dest); | |
5f39d397 | 7039 | |
a5719521 YZ |
7040 | ret = btrfs_set_inode_index(new_dir, &index); |
7041 | if (ret) | |
7042 | goto out_fail; | |
5a3f23d5 | 7043 | |
a5719521 | 7044 | if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
7045 | /* force full log commit if subvolume involved. */ |
7046 | root->fs_info->last_trans_log_full_commit = trans->transid; | |
7047 | } else { | |
a5719521 YZ |
7048 | ret = btrfs_insert_inode_ref(trans, dest, |
7049 | new_dentry->d_name.name, | |
7050 | new_dentry->d_name.len, | |
7051 | old_inode->i_ino, | |
7052 | new_dir->i_ino, index); | |
7053 | if (ret) | |
7054 | goto out_fail; | |
4df27c4d YZ |
7055 | /* |
7056 | * this is an ugly little race, but the rename is required | |
7057 | * to make sure that if we crash, the inode is either at the | |
7058 | * old name or the new one. pinning the log transaction lets | |
7059 | * us make sure we don't allow a log commit to come in after | |
7060 | * we unlink the name but before we add the new name back in. | |
7061 | */ | |
7062 | btrfs_pin_log_trans(root); | |
7063 | } | |
5a3f23d5 CM |
7064 | /* |
7065 | * make sure the inode gets flushed if it is replacing | |
7066 | * something. | |
7067 | */ | |
7068 | if (new_inode && new_inode->i_size && | |
7069 | old_inode && S_ISREG(old_inode->i_mode)) { | |
7070 | btrfs_add_ordered_operation(trans, root, old_inode); | |
7071 | } | |
7072 | ||
39279cc3 CM |
7073 | old_dir->i_ctime = old_dir->i_mtime = ctime; |
7074 | new_dir->i_ctime = new_dir->i_mtime = ctime; | |
7075 | old_inode->i_ctime = ctime; | |
5f39d397 | 7076 | |
12fcfd22 CM |
7077 | if (old_dentry->d_parent != new_dentry->d_parent) |
7078 | btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); | |
7079 | ||
4df27c4d YZ |
7080 | if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
7081 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; | |
7082 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, | |
7083 | old_dentry->d_name.name, | |
7084 | old_dentry->d_name.len); | |
7085 | } else { | |
92986796 AV |
7086 | ret = __btrfs_unlink_inode(trans, root, old_dir, |
7087 | old_dentry->d_inode, | |
7088 | old_dentry->d_name.name, | |
7089 | old_dentry->d_name.len); | |
7090 | if (!ret) | |
7091 | ret = btrfs_update_inode(trans, root, old_inode); | |
4df27c4d YZ |
7092 | } |
7093 | BUG_ON(ret); | |
39279cc3 CM |
7094 | |
7095 | if (new_inode) { | |
7096 | new_inode->i_ctime = CURRENT_TIME; | |
4df27c4d YZ |
7097 | if (unlikely(new_inode->i_ino == |
7098 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { | |
7099 | root_objectid = BTRFS_I(new_inode)->location.objectid; | |
7100 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | |
7101 | root_objectid, | |
7102 | new_dentry->d_name.name, | |
7103 | new_dentry->d_name.len); | |
7104 | BUG_ON(new_inode->i_nlink == 0); | |
7105 | } else { | |
7106 | ret = btrfs_unlink_inode(trans, dest, new_dir, | |
7107 | new_dentry->d_inode, | |
7108 | new_dentry->d_name.name, | |
7109 | new_dentry->d_name.len); | |
7110 | } | |
7111 | BUG_ON(ret); | |
7b128766 | 7112 | if (new_inode->i_nlink == 0) { |
e02119d5 | 7113 | ret = btrfs_orphan_add(trans, new_dentry->d_inode); |
4df27c4d | 7114 | BUG_ON(ret); |
7b128766 | 7115 | } |
39279cc3 | 7116 | } |
aec7477b | 7117 | |
75e7cb7f LB |
7118 | fixup_inode_flags(new_dir, old_inode); |
7119 | ||
4df27c4d YZ |
7120 | ret = btrfs_add_link(trans, new_dir, old_inode, |
7121 | new_dentry->d_name.name, | |
a5719521 | 7122 | new_dentry->d_name.len, 0, index); |
4df27c4d | 7123 | BUG_ON(ret); |
39279cc3 | 7124 | |
4df27c4d | 7125 | if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { |
6a912213 JB |
7126 | struct dentry *parent = dget_parent(new_dentry); |
7127 | btrfs_log_new_name(trans, old_inode, old_dir, parent); | |
7128 | dput(parent); | |
4df27c4d YZ |
7129 | btrfs_end_log_trans(root); |
7130 | } | |
39279cc3 | 7131 | out_fail: |
ab78c84d | 7132 | btrfs_end_transaction_throttle(trans, root); |
b44c59a8 | 7133 | out_notrans: |
76dda93c YZ |
7134 | if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) |
7135 | up_read(&root->fs_info->subvol_sem); | |
9ed74f2d | 7136 | |
39279cc3 CM |
7137 | return ret; |
7138 | } | |
7139 | ||
d352ac68 CM |
7140 | /* |
7141 | * some fairly slow code that needs optimization. This walks the list | |
7142 | * of all the inodes with pending delalloc and forces them to disk. | |
7143 | */ | |
24bbcf04 | 7144 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) |
ea8c2819 CM |
7145 | { |
7146 | struct list_head *head = &root->fs_info->delalloc_inodes; | |
7147 | struct btrfs_inode *binode; | |
5b21f2ed | 7148 | struct inode *inode; |
ea8c2819 | 7149 | |
c146afad YZ |
7150 | if (root->fs_info->sb->s_flags & MS_RDONLY) |
7151 | return -EROFS; | |
7152 | ||
75eff68e | 7153 | spin_lock(&root->fs_info->delalloc_lock); |
d397712b | 7154 | while (!list_empty(head)) { |
ea8c2819 CM |
7155 | binode = list_entry(head->next, struct btrfs_inode, |
7156 | delalloc_inodes); | |
5b21f2ed ZY |
7157 | inode = igrab(&binode->vfs_inode); |
7158 | if (!inode) | |
7159 | list_del_init(&binode->delalloc_inodes); | |
75eff68e | 7160 | spin_unlock(&root->fs_info->delalloc_lock); |
5b21f2ed | 7161 | if (inode) { |
8c8bee1d | 7162 | filemap_flush(inode->i_mapping); |
24bbcf04 YZ |
7163 | if (delay_iput) |
7164 | btrfs_add_delayed_iput(inode); | |
7165 | else | |
7166 | iput(inode); | |
5b21f2ed ZY |
7167 | } |
7168 | cond_resched(); | |
75eff68e | 7169 | spin_lock(&root->fs_info->delalloc_lock); |
ea8c2819 | 7170 | } |
75eff68e | 7171 | spin_unlock(&root->fs_info->delalloc_lock); |
8c8bee1d CM |
7172 | |
7173 | /* the filemap_flush will queue IO into the worker threads, but | |
7174 | * we have to make sure the IO is actually started and that | |
7175 | * ordered extents get created before we return | |
7176 | */ | |
7177 | atomic_inc(&root->fs_info->async_submit_draining); | |
d397712b | 7178 | while (atomic_read(&root->fs_info->nr_async_submits) || |
771ed689 | 7179 | atomic_read(&root->fs_info->async_delalloc_pages)) { |
8c8bee1d | 7180 | wait_event(root->fs_info->async_submit_wait, |
771ed689 CM |
7181 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && |
7182 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | |
8c8bee1d CM |
7183 | } |
7184 | atomic_dec(&root->fs_info->async_submit_draining); | |
ea8c2819 CM |
7185 | return 0; |
7186 | } | |
7187 | ||
0019f10d JB |
7188 | int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, |
7189 | int sync) | |
5da9d01b YZ |
7190 | { |
7191 | struct btrfs_inode *binode; | |
7192 | struct inode *inode = NULL; | |
7193 | ||
7194 | spin_lock(&root->fs_info->delalloc_lock); | |
7195 | while (!list_empty(&root->fs_info->delalloc_inodes)) { | |
7196 | binode = list_entry(root->fs_info->delalloc_inodes.next, | |
7197 | struct btrfs_inode, delalloc_inodes); | |
7198 | inode = igrab(&binode->vfs_inode); | |
7199 | if (inode) { | |
7200 | list_move_tail(&binode->delalloc_inodes, | |
7201 | &root->fs_info->delalloc_inodes); | |
7202 | break; | |
7203 | } | |
7204 | ||
7205 | list_del_init(&binode->delalloc_inodes); | |
7206 | cond_resched_lock(&root->fs_info->delalloc_lock); | |
7207 | } | |
7208 | spin_unlock(&root->fs_info->delalloc_lock); | |
7209 | ||
7210 | if (inode) { | |
0019f10d JB |
7211 | if (sync) { |
7212 | filemap_write_and_wait(inode->i_mapping); | |
7213 | /* | |
7214 | * We have to do this because compression doesn't | |
7215 | * actually set PG_writeback until it submits the pages | |
7216 | * for IO, which happens in an async thread, so we could | |
7217 | * race and not actually wait for any writeback pages | |
7218 | * because they've not been submitted yet. Technically | |
7219 | * this could still be the case for the ordered stuff | |
7220 | * since the async thread may not have started to do its | |
7221 | * work yet. If this becomes the case then we need to | |
7222 | * figure out a way to make sure that in writepage we | |
7223 | * wait for any async pages to be submitted before | |
7224 | * returning so that fdatawait does what its supposed to | |
7225 | * do. | |
7226 | */ | |
7227 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
7228 | } else { | |
7229 | filemap_flush(inode->i_mapping); | |
7230 | } | |
5da9d01b YZ |
7231 | if (delay_iput) |
7232 | btrfs_add_delayed_iput(inode); | |
7233 | else | |
7234 | iput(inode); | |
7235 | return 1; | |
7236 | } | |
7237 | return 0; | |
7238 | } | |
7239 | ||
39279cc3 CM |
7240 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, |
7241 | const char *symname) | |
7242 | { | |
7243 | struct btrfs_trans_handle *trans; | |
7244 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
7245 | struct btrfs_path *path; | |
7246 | struct btrfs_key key; | |
1832a6d5 | 7247 | struct inode *inode = NULL; |
39279cc3 CM |
7248 | int err; |
7249 | int drop_inode = 0; | |
7250 | u64 objectid; | |
00e4e6b3 | 7251 | u64 index = 0 ; |
39279cc3 CM |
7252 | int name_len; |
7253 | int datasize; | |
5f39d397 | 7254 | unsigned long ptr; |
39279cc3 | 7255 | struct btrfs_file_extent_item *ei; |
5f39d397 | 7256 | struct extent_buffer *leaf; |
1832a6d5 | 7257 | unsigned long nr = 0; |
39279cc3 CM |
7258 | |
7259 | name_len = strlen(symname) + 1; | |
7260 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) | |
7261 | return -ENAMETOOLONG; | |
1832a6d5 | 7262 | |
a22285a6 YZ |
7263 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); |
7264 | if (err) | |
7265 | return err; | |
9ed74f2d JB |
7266 | /* |
7267 | * 2 items for inode item and ref | |
7268 | * 2 items for dir items | |
7269 | * 1 item for xattr if selinux is on | |
7270 | */ | |
a22285a6 YZ |
7271 | trans = btrfs_start_transaction(root, 5); |
7272 | if (IS_ERR(trans)) | |
7273 | return PTR_ERR(trans); | |
1832a6d5 | 7274 | |
39279cc3 CM |
7275 | btrfs_set_trans_block_group(trans, dir); |
7276 | ||
aec7477b | 7277 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
a1b075d2 | 7278 | dentry->d_name.len, dir->i_ino, objectid, |
00e4e6b3 CM |
7279 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, |
7280 | &index); | |
7cf96da3 TI |
7281 | if (IS_ERR(inode)) { |
7282 | err = PTR_ERR(inode); | |
39279cc3 | 7283 | goto out_unlock; |
7cf96da3 | 7284 | } |
39279cc3 | 7285 | |
2a7dba39 | 7286 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
7287 | if (err) { |
7288 | drop_inode = 1; | |
7289 | goto out_unlock; | |
7290 | } | |
7291 | ||
39279cc3 | 7292 | btrfs_set_trans_block_group(trans, inode); |
a1b075d2 | 7293 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
39279cc3 CM |
7294 | if (err) |
7295 | drop_inode = 1; | |
7296 | else { | |
7297 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 7298 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 CM |
7299 | inode->i_fop = &btrfs_file_operations; |
7300 | inode->i_op = &btrfs_file_inode_operations; | |
d1310b2e | 7301 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 | 7302 | } |
39279cc3 CM |
7303 | btrfs_update_inode_block_group(trans, inode); |
7304 | btrfs_update_inode_block_group(trans, dir); | |
7305 | if (drop_inode) | |
7306 | goto out_unlock; | |
7307 | ||
7308 | path = btrfs_alloc_path(); | |
7309 | BUG_ON(!path); | |
7310 | key.objectid = inode->i_ino; | |
7311 | key.offset = 0; | |
39279cc3 CM |
7312 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); |
7313 | datasize = btrfs_file_extent_calc_inline_size(name_len); | |
7314 | err = btrfs_insert_empty_item(trans, root, path, &key, | |
7315 | datasize); | |
54aa1f4d CM |
7316 | if (err) { |
7317 | drop_inode = 1; | |
7318 | goto out_unlock; | |
7319 | } | |
5f39d397 CM |
7320 | leaf = path->nodes[0]; |
7321 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
7322 | struct btrfs_file_extent_item); | |
7323 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
7324 | btrfs_set_file_extent_type(leaf, ei, | |
39279cc3 | 7325 | BTRFS_FILE_EXTENT_INLINE); |
c8b97818 CM |
7326 | btrfs_set_file_extent_encryption(leaf, ei, 0); |
7327 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
7328 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
7329 | btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); | |
7330 | ||
39279cc3 | 7331 | ptr = btrfs_file_extent_inline_start(ei); |
5f39d397 CM |
7332 | write_extent_buffer(leaf, symname, ptr, name_len); |
7333 | btrfs_mark_buffer_dirty(leaf); | |
39279cc3 | 7334 | btrfs_free_path(path); |
5f39d397 | 7335 | |
39279cc3 CM |
7336 | inode->i_op = &btrfs_symlink_inode_operations; |
7337 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 7338 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d899e052 | 7339 | inode_set_bytes(inode, name_len); |
dbe674a9 | 7340 | btrfs_i_size_write(inode, name_len - 1); |
54aa1f4d CM |
7341 | err = btrfs_update_inode(trans, root, inode); |
7342 | if (err) | |
7343 | drop_inode = 1; | |
39279cc3 CM |
7344 | |
7345 | out_unlock: | |
d3c2fdcf | 7346 | nr = trans->blocks_used; |
ab78c84d | 7347 | btrfs_end_transaction_throttle(trans, root); |
39279cc3 CM |
7348 | if (drop_inode) { |
7349 | inode_dec_link_count(inode); | |
7350 | iput(inode); | |
7351 | } | |
d3c2fdcf | 7352 | btrfs_btree_balance_dirty(root, nr); |
39279cc3 CM |
7353 | return err; |
7354 | } | |
16432985 | 7355 | |
0af3d00b JB |
7356 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, |
7357 | u64 start, u64 num_bytes, u64 min_size, | |
7358 | loff_t actual_len, u64 *alloc_hint, | |
7359 | struct btrfs_trans_handle *trans) | |
d899e052 | 7360 | { |
d899e052 YZ |
7361 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7362 | struct btrfs_key ins; | |
d899e052 | 7363 | u64 cur_offset = start; |
55a61d1d | 7364 | u64 i_size; |
d899e052 | 7365 | int ret = 0; |
0af3d00b | 7366 | bool own_trans = true; |
d899e052 | 7367 | |
0af3d00b JB |
7368 | if (trans) |
7369 | own_trans = false; | |
d899e052 | 7370 | while (num_bytes > 0) { |
0af3d00b JB |
7371 | if (own_trans) { |
7372 | trans = btrfs_start_transaction(root, 3); | |
7373 | if (IS_ERR(trans)) { | |
7374 | ret = PTR_ERR(trans); | |
7375 | break; | |
7376 | } | |
5a303d5d YZ |
7377 | } |
7378 | ||
efa56464 YZ |
7379 | ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, |
7380 | 0, *alloc_hint, (u64)-1, &ins, 1); | |
5a303d5d | 7381 | if (ret) { |
0af3d00b JB |
7382 | if (own_trans) |
7383 | btrfs_end_transaction(trans, root); | |
a22285a6 | 7384 | break; |
d899e052 | 7385 | } |
5a303d5d | 7386 | |
d899e052 YZ |
7387 | ret = insert_reserved_file_extent(trans, inode, |
7388 | cur_offset, ins.objectid, | |
7389 | ins.offset, ins.offset, | |
920bbbfb | 7390 | ins.offset, 0, 0, 0, |
d899e052 YZ |
7391 | BTRFS_FILE_EXTENT_PREALLOC); |
7392 | BUG_ON(ret); | |
a1ed835e CM |
7393 | btrfs_drop_extent_cache(inode, cur_offset, |
7394 | cur_offset + ins.offset -1, 0); | |
5a303d5d | 7395 | |
d899e052 YZ |
7396 | num_bytes -= ins.offset; |
7397 | cur_offset += ins.offset; | |
efa56464 | 7398 | *alloc_hint = ins.objectid + ins.offset; |
5a303d5d | 7399 | |
d899e052 | 7400 | inode->i_ctime = CURRENT_TIME; |
6cbff00f | 7401 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; |
d899e052 | 7402 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
efa56464 YZ |
7403 | (actual_len > inode->i_size) && |
7404 | (cur_offset > inode->i_size)) { | |
d1ea6a61 | 7405 | if (cur_offset > actual_len) |
55a61d1d | 7406 | i_size = actual_len; |
d1ea6a61 | 7407 | else |
55a61d1d JB |
7408 | i_size = cur_offset; |
7409 | i_size_write(inode, i_size); | |
7410 | btrfs_ordered_update_i_size(inode, i_size, NULL); | |
5a303d5d YZ |
7411 | } |
7412 | ||
d899e052 YZ |
7413 | ret = btrfs_update_inode(trans, root, inode); |
7414 | BUG_ON(ret); | |
d899e052 | 7415 | |
0af3d00b JB |
7416 | if (own_trans) |
7417 | btrfs_end_transaction(trans, root); | |
5a303d5d | 7418 | } |
d899e052 YZ |
7419 | return ret; |
7420 | } | |
7421 | ||
0af3d00b JB |
7422 | int btrfs_prealloc_file_range(struct inode *inode, int mode, |
7423 | u64 start, u64 num_bytes, u64 min_size, | |
7424 | loff_t actual_len, u64 *alloc_hint) | |
7425 | { | |
7426 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
7427 | min_size, actual_len, alloc_hint, | |
7428 | NULL); | |
7429 | } | |
7430 | ||
7431 | int btrfs_prealloc_file_range_trans(struct inode *inode, | |
7432 | struct btrfs_trans_handle *trans, int mode, | |
7433 | u64 start, u64 num_bytes, u64 min_size, | |
7434 | loff_t actual_len, u64 *alloc_hint) | |
7435 | { | |
7436 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
7437 | min_size, actual_len, alloc_hint, trans); | |
7438 | } | |
7439 | ||
e6dcd2dc CM |
7440 | static int btrfs_set_page_dirty(struct page *page) |
7441 | { | |
e6dcd2dc CM |
7442 | return __set_page_dirty_nobuffers(page); |
7443 | } | |
7444 | ||
b74c79e9 | 7445 | static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) |
fdebe2bd | 7446 | { |
b83cc969 LZ |
7447 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7448 | ||
7449 | if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) | |
7450 | return -EROFS; | |
6cbff00f | 7451 | if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) |
fdebe2bd | 7452 | return -EACCES; |
b74c79e9 | 7453 | return generic_permission(inode, mask, flags, btrfs_check_acl); |
fdebe2bd | 7454 | } |
39279cc3 | 7455 | |
6e1d5dcc | 7456 | static const struct inode_operations btrfs_dir_inode_operations = { |
3394e160 | 7457 | .getattr = btrfs_getattr, |
39279cc3 CM |
7458 | .lookup = btrfs_lookup, |
7459 | .create = btrfs_create, | |
7460 | .unlink = btrfs_unlink, | |
7461 | .link = btrfs_link, | |
7462 | .mkdir = btrfs_mkdir, | |
7463 | .rmdir = btrfs_rmdir, | |
7464 | .rename = btrfs_rename, | |
7465 | .symlink = btrfs_symlink, | |
7466 | .setattr = btrfs_setattr, | |
618e21d5 | 7467 | .mknod = btrfs_mknod, |
95819c05 CH |
7468 | .setxattr = btrfs_setxattr, |
7469 | .getxattr = btrfs_getxattr, | |
5103e947 | 7470 | .listxattr = btrfs_listxattr, |
95819c05 | 7471 | .removexattr = btrfs_removexattr, |
fdebe2bd | 7472 | .permission = btrfs_permission, |
39279cc3 | 7473 | }; |
6e1d5dcc | 7474 | static const struct inode_operations btrfs_dir_ro_inode_operations = { |
39279cc3 | 7475 | .lookup = btrfs_lookup, |
fdebe2bd | 7476 | .permission = btrfs_permission, |
39279cc3 | 7477 | }; |
76dda93c | 7478 | |
828c0950 | 7479 | static const struct file_operations btrfs_dir_file_operations = { |
39279cc3 CM |
7480 | .llseek = generic_file_llseek, |
7481 | .read = generic_read_dir, | |
cbdf5a24 | 7482 | .readdir = btrfs_real_readdir, |
34287aa3 | 7483 | .unlocked_ioctl = btrfs_ioctl, |
39279cc3 | 7484 | #ifdef CONFIG_COMPAT |
34287aa3 | 7485 | .compat_ioctl = btrfs_ioctl, |
39279cc3 | 7486 | #endif |
6bf13c0c | 7487 | .release = btrfs_release_file, |
e02119d5 | 7488 | .fsync = btrfs_sync_file, |
39279cc3 CM |
7489 | }; |
7490 | ||
d1310b2e | 7491 | static struct extent_io_ops btrfs_extent_io_ops = { |
07157aac | 7492 | .fill_delalloc = run_delalloc_range, |
065631f6 | 7493 | .submit_bio_hook = btrfs_submit_bio_hook, |
239b14b3 | 7494 | .merge_bio_hook = btrfs_merge_bio_hook, |
07157aac | 7495 | .readpage_end_io_hook = btrfs_readpage_end_io_hook, |
e6dcd2dc | 7496 | .writepage_end_io_hook = btrfs_writepage_end_io_hook, |
247e743c | 7497 | .writepage_start_hook = btrfs_writepage_start_hook, |
1259ab75 | 7498 | .readpage_io_failed_hook = btrfs_io_failed_hook, |
b0c68f8b CM |
7499 | .set_bit_hook = btrfs_set_bit_hook, |
7500 | .clear_bit_hook = btrfs_clear_bit_hook, | |
9ed74f2d JB |
7501 | .merge_extent_hook = btrfs_merge_extent_hook, |
7502 | .split_extent_hook = btrfs_split_extent_hook, | |
07157aac CM |
7503 | }; |
7504 | ||
35054394 CM |
7505 | /* |
7506 | * btrfs doesn't support the bmap operation because swapfiles | |
7507 | * use bmap to make a mapping of extents in the file. They assume | |
7508 | * these extents won't change over the life of the file and they | |
7509 | * use the bmap result to do IO directly to the drive. | |
7510 | * | |
7511 | * the btrfs bmap call would return logical addresses that aren't | |
7512 | * suitable for IO and they also will change frequently as COW | |
7513 | * operations happen. So, swapfile + btrfs == corruption. | |
7514 | * | |
7515 | * For now we're avoiding this by dropping bmap. | |
7516 | */ | |
7f09410b | 7517 | static const struct address_space_operations btrfs_aops = { |
39279cc3 CM |
7518 | .readpage = btrfs_readpage, |
7519 | .writepage = btrfs_writepage, | |
b293f02e | 7520 | .writepages = btrfs_writepages, |
3ab2fb5a | 7521 | .readpages = btrfs_readpages, |
16432985 | 7522 | .direct_IO = btrfs_direct_IO, |
a52d9a80 CM |
7523 | .invalidatepage = btrfs_invalidatepage, |
7524 | .releasepage = btrfs_releasepage, | |
e6dcd2dc | 7525 | .set_page_dirty = btrfs_set_page_dirty, |
465fdd97 | 7526 | .error_remove_page = generic_error_remove_page, |
39279cc3 CM |
7527 | }; |
7528 | ||
7f09410b | 7529 | static const struct address_space_operations btrfs_symlink_aops = { |
39279cc3 CM |
7530 | .readpage = btrfs_readpage, |
7531 | .writepage = btrfs_writepage, | |
2bf5a725 CM |
7532 | .invalidatepage = btrfs_invalidatepage, |
7533 | .releasepage = btrfs_releasepage, | |
39279cc3 CM |
7534 | }; |
7535 | ||
6e1d5dcc | 7536 | static const struct inode_operations btrfs_file_inode_operations = { |
39279cc3 CM |
7537 | .getattr = btrfs_getattr, |
7538 | .setattr = btrfs_setattr, | |
95819c05 CH |
7539 | .setxattr = btrfs_setxattr, |
7540 | .getxattr = btrfs_getxattr, | |
5103e947 | 7541 | .listxattr = btrfs_listxattr, |
95819c05 | 7542 | .removexattr = btrfs_removexattr, |
fdebe2bd | 7543 | .permission = btrfs_permission, |
1506fcc8 | 7544 | .fiemap = btrfs_fiemap, |
39279cc3 | 7545 | }; |
6e1d5dcc | 7546 | static const struct inode_operations btrfs_special_inode_operations = { |
618e21d5 JB |
7547 | .getattr = btrfs_getattr, |
7548 | .setattr = btrfs_setattr, | |
fdebe2bd | 7549 | .permission = btrfs_permission, |
95819c05 CH |
7550 | .setxattr = btrfs_setxattr, |
7551 | .getxattr = btrfs_getxattr, | |
33268eaf | 7552 | .listxattr = btrfs_listxattr, |
95819c05 | 7553 | .removexattr = btrfs_removexattr, |
618e21d5 | 7554 | }; |
6e1d5dcc | 7555 | static const struct inode_operations btrfs_symlink_inode_operations = { |
39279cc3 CM |
7556 | .readlink = generic_readlink, |
7557 | .follow_link = page_follow_link_light, | |
7558 | .put_link = page_put_link, | |
f209561a | 7559 | .getattr = btrfs_getattr, |
fdebe2bd | 7560 | .permission = btrfs_permission, |
0279b4cd JO |
7561 | .setxattr = btrfs_setxattr, |
7562 | .getxattr = btrfs_getxattr, | |
7563 | .listxattr = btrfs_listxattr, | |
7564 | .removexattr = btrfs_removexattr, | |
39279cc3 | 7565 | }; |
76dda93c | 7566 | |
82d339d9 | 7567 | const struct dentry_operations btrfs_dentry_operations = { |
76dda93c YZ |
7568 | .d_delete = btrfs_dentry_delete, |
7569 | }; |