]>
Commit | Line | Data |
---|---|---|
6cbd5570 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
8f18cf13 | 19 | #include <linux/kernel.h> |
065631f6 | 20 | #include <linux/bio.h> |
39279cc3 | 21 | #include <linux/buffer_head.h> |
f2eb0a24 | 22 | #include <linux/file.h> |
39279cc3 CM |
23 | #include <linux/fs.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
39279cc3 CM |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/mpage.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/writeback.h> | |
39279cc3 | 33 | #include <linux/compat.h> |
9ebefb18 | 34 | #include <linux/bit_spinlock.h> |
5103e947 | 35 | #include <linux/xattr.h> |
33268eaf | 36 | #include <linux/posix_acl.h> |
d899e052 | 37 | #include <linux/falloc.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
7a36ddec | 39 | #include <linux/ratelimit.h> |
22c44fe6 | 40 | #include <linux/mount.h> |
55e301fd | 41 | #include <linux/btrfs.h> |
53b381b3 | 42 | #include <linux/blkdev.h> |
f23b5a59 | 43 | #include <linux/posix_acl_xattr.h> |
e2e40f2c | 44 | #include <linux/uio.h> |
39279cc3 CM |
45 | #include "ctree.h" |
46 | #include "disk-io.h" | |
47 | #include "transaction.h" | |
48 | #include "btrfs_inode.h" | |
39279cc3 | 49 | #include "print-tree.h" |
e6dcd2dc | 50 | #include "ordered-data.h" |
95819c05 | 51 | #include "xattr.h" |
e02119d5 | 52 | #include "tree-log.h" |
4a54c8c1 | 53 | #include "volumes.h" |
c8b97818 | 54 | #include "compression.h" |
b4ce94de | 55 | #include "locking.h" |
dc89e982 | 56 | #include "free-space-cache.h" |
581bb050 | 57 | #include "inode-map.h" |
38c227d8 | 58 | #include "backref.h" |
f23b5a59 | 59 | #include "hash.h" |
63541927 | 60 | #include "props.h" |
31193213 | 61 | #include "qgroup.h" |
dda3245e | 62 | #include "dedupe.h" |
39279cc3 CM |
63 | |
64 | struct btrfs_iget_args { | |
90d3e592 | 65 | struct btrfs_key *location; |
39279cc3 CM |
66 | struct btrfs_root *root; |
67 | }; | |
68 | ||
f28a4928 FM |
69 | struct btrfs_dio_data { |
70 | u64 outstanding_extents; | |
71 | u64 reserve; | |
72 | u64 unsubmitted_oe_range_start; | |
73 | u64 unsubmitted_oe_range_end; | |
4aaedfb0 | 74 | int overwrite; |
f28a4928 FM |
75 | }; |
76 | ||
6e1d5dcc AD |
77 | static const struct inode_operations btrfs_dir_inode_operations; |
78 | static const struct inode_operations btrfs_symlink_inode_operations; | |
79 | static const struct inode_operations btrfs_dir_ro_inode_operations; | |
80 | static const struct inode_operations btrfs_special_inode_operations; | |
81 | static const struct inode_operations btrfs_file_inode_operations; | |
7f09410b AD |
82 | static const struct address_space_operations btrfs_aops; |
83 | static const struct address_space_operations btrfs_symlink_aops; | |
828c0950 | 84 | static const struct file_operations btrfs_dir_file_operations; |
20e5506b | 85 | static const struct extent_io_ops btrfs_extent_io_ops; |
39279cc3 CM |
86 | |
87 | static struct kmem_cache *btrfs_inode_cachep; | |
88 | struct kmem_cache *btrfs_trans_handle_cachep; | |
89 | struct kmem_cache *btrfs_transaction_cachep; | |
39279cc3 | 90 | struct kmem_cache *btrfs_path_cachep; |
dc89e982 | 91 | struct kmem_cache *btrfs_free_space_cachep; |
39279cc3 CM |
92 | |
93 | #define S_SHIFT 12 | |
4d4ab6d6 | 94 | static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { |
39279cc3 CM |
95 | [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, |
96 | [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, | |
97 | [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, | |
98 | [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, | |
99 | [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, | |
100 | [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, | |
101 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | |
102 | }; | |
103 | ||
3972f260 | 104 | static int btrfs_setsize(struct inode *inode, struct iattr *attr); |
a41ad394 | 105 | static int btrfs_truncate(struct inode *inode); |
5fd02043 | 106 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); |
771ed689 CM |
107 | static noinline int cow_file_range(struct inode *inode, |
108 | struct page *locked_page, | |
dda3245e WX |
109 | u64 start, u64 end, u64 delalloc_end, |
110 | int *page_started, unsigned long *nr_written, | |
111 | int unlock, struct btrfs_dedupe_hash *hash); | |
6f9994db LB |
112 | static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, |
113 | u64 orig_start, u64 block_start, | |
114 | u64 block_len, u64 orig_block_len, | |
115 | u64 ram_bytes, int compress_type, | |
116 | int type); | |
7b128766 | 117 | |
48a3b636 | 118 | static int btrfs_dirty_inode(struct inode *inode); |
7b128766 | 119 | |
6a3891c5 JB |
120 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
121 | void btrfs_test_inode_set_ops(struct inode *inode) | |
122 | { | |
123 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | |
124 | } | |
125 | #endif | |
126 | ||
f34f57a3 | 127 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
2a7dba39 EP |
128 | struct inode *inode, struct inode *dir, |
129 | const struct qstr *qstr) | |
0279b4cd JO |
130 | { |
131 | int err; | |
132 | ||
f34f57a3 | 133 | err = btrfs_init_acl(trans, inode, dir); |
0279b4cd | 134 | if (!err) |
2a7dba39 | 135 | err = btrfs_xattr_security_init(trans, inode, dir, qstr); |
0279b4cd JO |
136 | return err; |
137 | } | |
138 | ||
c8b97818 CM |
139 | /* |
140 | * this does all the hard work for inserting an inline extent into | |
141 | * the btree. The caller should have done a btrfs_drop_extents so that | |
142 | * no overlapping inline items exist in the btree | |
143 | */ | |
40f76580 | 144 | static int insert_inline_extent(struct btrfs_trans_handle *trans, |
1acae57b | 145 | struct btrfs_path *path, int extent_inserted, |
c8b97818 CM |
146 | struct btrfs_root *root, struct inode *inode, |
147 | u64 start, size_t size, size_t compressed_size, | |
fe3f566c | 148 | int compress_type, |
c8b97818 CM |
149 | struct page **compressed_pages) |
150 | { | |
c8b97818 CM |
151 | struct extent_buffer *leaf; |
152 | struct page *page = NULL; | |
153 | char *kaddr; | |
154 | unsigned long ptr; | |
155 | struct btrfs_file_extent_item *ei; | |
156 | int err = 0; | |
157 | int ret; | |
158 | size_t cur_size = size; | |
c8b97818 | 159 | unsigned long offset; |
c8b97818 | 160 | |
fe3f566c | 161 | if (compressed_size && compressed_pages) |
c8b97818 | 162 | cur_size = compressed_size; |
c8b97818 | 163 | |
1acae57b | 164 | inode_add_bytes(inode, size); |
c8b97818 | 165 | |
1acae57b FDBM |
166 | if (!extent_inserted) { |
167 | struct btrfs_key key; | |
168 | size_t datasize; | |
c8b97818 | 169 | |
4a0cc7ca | 170 | key.objectid = btrfs_ino(BTRFS_I(inode)); |
1acae57b | 171 | key.offset = start; |
962a298f | 172 | key.type = BTRFS_EXTENT_DATA_KEY; |
c8b97818 | 173 | |
1acae57b FDBM |
174 | datasize = btrfs_file_extent_calc_inline_size(cur_size); |
175 | path->leave_spinning = 1; | |
176 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
177 | datasize); | |
178 | if (ret) { | |
179 | err = ret; | |
180 | goto fail; | |
181 | } | |
c8b97818 CM |
182 | } |
183 | leaf = path->nodes[0]; | |
184 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
185 | struct btrfs_file_extent_item); | |
186 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
187 | btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); | |
188 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
189 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
190 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | |
191 | ptr = btrfs_file_extent_inline_start(ei); | |
192 | ||
261507a0 | 193 | if (compress_type != BTRFS_COMPRESS_NONE) { |
c8b97818 CM |
194 | struct page *cpage; |
195 | int i = 0; | |
d397712b | 196 | while (compressed_size > 0) { |
c8b97818 | 197 | cpage = compressed_pages[i]; |
5b050f04 | 198 | cur_size = min_t(unsigned long, compressed_size, |
09cbfeaf | 199 | PAGE_SIZE); |
c8b97818 | 200 | |
7ac687d9 | 201 | kaddr = kmap_atomic(cpage); |
c8b97818 | 202 | write_extent_buffer(leaf, kaddr, ptr, cur_size); |
7ac687d9 | 203 | kunmap_atomic(kaddr); |
c8b97818 CM |
204 | |
205 | i++; | |
206 | ptr += cur_size; | |
207 | compressed_size -= cur_size; | |
208 | } | |
209 | btrfs_set_file_extent_compression(leaf, ei, | |
261507a0 | 210 | compress_type); |
c8b97818 CM |
211 | } else { |
212 | page = find_get_page(inode->i_mapping, | |
09cbfeaf | 213 | start >> PAGE_SHIFT); |
c8b97818 | 214 | btrfs_set_file_extent_compression(leaf, ei, 0); |
7ac687d9 | 215 | kaddr = kmap_atomic(page); |
09cbfeaf | 216 | offset = start & (PAGE_SIZE - 1); |
c8b97818 | 217 | write_extent_buffer(leaf, kaddr + offset, ptr, size); |
7ac687d9 | 218 | kunmap_atomic(kaddr); |
09cbfeaf | 219 | put_page(page); |
c8b97818 CM |
220 | } |
221 | btrfs_mark_buffer_dirty(leaf); | |
1acae57b | 222 | btrfs_release_path(path); |
c8b97818 | 223 | |
c2167754 YZ |
224 | /* |
225 | * we're an inline extent, so nobody can | |
226 | * extend the file past i_size without locking | |
227 | * a page we already have locked. | |
228 | * | |
229 | * We must do any isize and inode updates | |
230 | * before we unlock the pages. Otherwise we | |
231 | * could end up racing with unlink. | |
232 | */ | |
c8b97818 | 233 | BTRFS_I(inode)->disk_i_size = inode->i_size; |
79787eaa | 234 | ret = btrfs_update_inode(trans, root, inode); |
c2167754 | 235 | |
79787eaa | 236 | return ret; |
c8b97818 | 237 | fail: |
c8b97818 CM |
238 | return err; |
239 | } | |
240 | ||
241 | ||
242 | /* | |
243 | * conditionally insert an inline extent into the file. This | |
244 | * does the checks required to make sure the data is small enough | |
245 | * to fit as an inline extent. | |
246 | */ | |
00361589 JB |
247 | static noinline int cow_file_range_inline(struct btrfs_root *root, |
248 | struct inode *inode, u64 start, | |
249 | u64 end, size_t compressed_size, | |
250 | int compress_type, | |
251 | struct page **compressed_pages) | |
c8b97818 | 252 | { |
0b246afa | 253 | struct btrfs_fs_info *fs_info = root->fs_info; |
00361589 | 254 | struct btrfs_trans_handle *trans; |
c8b97818 CM |
255 | u64 isize = i_size_read(inode); |
256 | u64 actual_end = min(end + 1, isize); | |
257 | u64 inline_len = actual_end - start; | |
0b246afa | 258 | u64 aligned_end = ALIGN(end, fs_info->sectorsize); |
c8b97818 CM |
259 | u64 data_len = inline_len; |
260 | int ret; | |
1acae57b FDBM |
261 | struct btrfs_path *path; |
262 | int extent_inserted = 0; | |
263 | u32 extent_item_size; | |
c8b97818 CM |
264 | |
265 | if (compressed_size) | |
266 | data_len = compressed_size; | |
267 | ||
268 | if (start > 0 || | |
0b246afa JM |
269 | actual_end > fs_info->sectorsize || |
270 | data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) || | |
c8b97818 | 271 | (!compressed_size && |
0b246afa | 272 | (actual_end & (fs_info->sectorsize - 1)) == 0) || |
c8b97818 | 273 | end + 1 < isize || |
0b246afa | 274 | data_len > fs_info->max_inline) { |
c8b97818 CM |
275 | return 1; |
276 | } | |
277 | ||
1acae57b FDBM |
278 | path = btrfs_alloc_path(); |
279 | if (!path) | |
280 | return -ENOMEM; | |
281 | ||
00361589 | 282 | trans = btrfs_join_transaction(root); |
1acae57b FDBM |
283 | if (IS_ERR(trans)) { |
284 | btrfs_free_path(path); | |
00361589 | 285 | return PTR_ERR(trans); |
1acae57b | 286 | } |
0b246afa | 287 | trans->block_rsv = &fs_info->delalloc_block_rsv; |
00361589 | 288 | |
1acae57b FDBM |
289 | if (compressed_size && compressed_pages) |
290 | extent_item_size = btrfs_file_extent_calc_inline_size( | |
291 | compressed_size); | |
292 | else | |
293 | extent_item_size = btrfs_file_extent_calc_inline_size( | |
294 | inline_len); | |
295 | ||
296 | ret = __btrfs_drop_extents(trans, root, inode, path, | |
297 | start, aligned_end, NULL, | |
298 | 1, 1, extent_item_size, &extent_inserted); | |
00361589 | 299 | if (ret) { |
66642832 | 300 | btrfs_abort_transaction(trans, ret); |
00361589 JB |
301 | goto out; |
302 | } | |
c8b97818 CM |
303 | |
304 | if (isize > actual_end) | |
305 | inline_len = min_t(u64, isize, actual_end); | |
1acae57b FDBM |
306 | ret = insert_inline_extent(trans, path, extent_inserted, |
307 | root, inode, start, | |
c8b97818 | 308 | inline_len, compressed_size, |
fe3f566c | 309 | compress_type, compressed_pages); |
2adcac1a | 310 | if (ret && ret != -ENOSPC) { |
66642832 | 311 | btrfs_abort_transaction(trans, ret); |
00361589 | 312 | goto out; |
2adcac1a | 313 | } else if (ret == -ENOSPC) { |
00361589 JB |
314 | ret = 1; |
315 | goto out; | |
79787eaa | 316 | } |
2adcac1a | 317 | |
bdc20e67 | 318 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); |
691fa059 | 319 | btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start); |
dcdbc059 | 320 | btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); |
00361589 | 321 | out: |
94ed938a QW |
322 | /* |
323 | * Don't forget to free the reserved space, as for inlined extent | |
324 | * it won't count as data extent, free them directly here. | |
325 | * And at reserve time, it's always aligned to page size, so | |
326 | * just free one page here. | |
327 | */ | |
09cbfeaf | 328 | btrfs_qgroup_free_data(inode, 0, PAGE_SIZE); |
1acae57b | 329 | btrfs_free_path(path); |
3a45bb20 | 330 | btrfs_end_transaction(trans); |
00361589 | 331 | return ret; |
c8b97818 CM |
332 | } |
333 | ||
771ed689 CM |
334 | struct async_extent { |
335 | u64 start; | |
336 | u64 ram_size; | |
337 | u64 compressed_size; | |
338 | struct page **pages; | |
339 | unsigned long nr_pages; | |
261507a0 | 340 | int compress_type; |
771ed689 CM |
341 | struct list_head list; |
342 | }; | |
343 | ||
344 | struct async_cow { | |
345 | struct inode *inode; | |
346 | struct btrfs_root *root; | |
347 | struct page *locked_page; | |
348 | u64 start; | |
349 | u64 end; | |
350 | struct list_head extents; | |
351 | struct btrfs_work work; | |
352 | }; | |
353 | ||
354 | static noinline int add_async_extent(struct async_cow *cow, | |
355 | u64 start, u64 ram_size, | |
356 | u64 compressed_size, | |
357 | struct page **pages, | |
261507a0 LZ |
358 | unsigned long nr_pages, |
359 | int compress_type) | |
771ed689 CM |
360 | { |
361 | struct async_extent *async_extent; | |
362 | ||
363 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | |
79787eaa | 364 | BUG_ON(!async_extent); /* -ENOMEM */ |
771ed689 CM |
365 | async_extent->start = start; |
366 | async_extent->ram_size = ram_size; | |
367 | async_extent->compressed_size = compressed_size; | |
368 | async_extent->pages = pages; | |
369 | async_extent->nr_pages = nr_pages; | |
261507a0 | 370 | async_extent->compress_type = compress_type; |
771ed689 CM |
371 | list_add_tail(&async_extent->list, &cow->extents); |
372 | return 0; | |
373 | } | |
374 | ||
f79707b0 WS |
375 | static inline int inode_need_compress(struct inode *inode) |
376 | { | |
0b246afa | 377 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
f79707b0 WS |
378 | |
379 | /* force compress */ | |
0b246afa | 380 | if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) |
f79707b0 WS |
381 | return 1; |
382 | /* bad compression ratios */ | |
383 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) | |
384 | return 0; | |
0b246afa | 385 | if (btrfs_test_opt(fs_info, COMPRESS) || |
f79707b0 WS |
386 | BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS || |
387 | BTRFS_I(inode)->force_compress) | |
388 | return 1; | |
389 | return 0; | |
390 | } | |
391 | ||
6158e1ce | 392 | static inline void inode_should_defrag(struct btrfs_inode *inode, |
26d30f85 AJ |
393 | u64 start, u64 end, u64 num_bytes, u64 small_write) |
394 | { | |
395 | /* If this is a small write inside eof, kick off a defrag */ | |
396 | if (num_bytes < small_write && | |
6158e1ce | 397 | (start > 0 || end + 1 < inode->disk_i_size)) |
26d30f85 AJ |
398 | btrfs_add_inode_defrag(NULL, inode); |
399 | } | |
400 | ||
d352ac68 | 401 | /* |
771ed689 CM |
402 | * we create compressed extents in two phases. The first |
403 | * phase compresses a range of pages that have already been | |
404 | * locked (both pages and state bits are locked). | |
c8b97818 | 405 | * |
771ed689 CM |
406 | * This is done inside an ordered work queue, and the compression |
407 | * is spread across many cpus. The actual IO submission is step | |
408 | * two, and the ordered work queue takes care of making sure that | |
409 | * happens in the same order things were put onto the queue by | |
410 | * writepages and friends. | |
c8b97818 | 411 | * |
771ed689 CM |
412 | * If this code finds it can't get good compression, it puts an |
413 | * entry onto the work queue to write the uncompressed bytes. This | |
414 | * makes sure that both compressed inodes and uncompressed inodes | |
b2570314 AB |
415 | * are written in the same order that the flusher thread sent them |
416 | * down. | |
d352ac68 | 417 | */ |
c44f649e | 418 | static noinline void compress_file_range(struct inode *inode, |
771ed689 CM |
419 | struct page *locked_page, |
420 | u64 start, u64 end, | |
421 | struct async_cow *async_cow, | |
422 | int *num_added) | |
b888db2b | 423 | { |
0b246afa | 424 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
b888db2b | 425 | struct btrfs_root *root = BTRFS_I(inode)->root; |
db94535d | 426 | u64 num_bytes; |
0b246afa | 427 | u64 blocksize = fs_info->sectorsize; |
c8b97818 | 428 | u64 actual_end; |
42dc7bab | 429 | u64 isize = i_size_read(inode); |
e6dcd2dc | 430 | int ret = 0; |
c8b97818 CM |
431 | struct page **pages = NULL; |
432 | unsigned long nr_pages; | |
c8b97818 CM |
433 | unsigned long total_compressed = 0; |
434 | unsigned long total_in = 0; | |
c8b97818 CM |
435 | int i; |
436 | int will_compress; | |
0b246afa | 437 | int compress_type = fs_info->compress_type; |
4adaa611 | 438 | int redirty = 0; |
b888db2b | 439 | |
6158e1ce NB |
440 | inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, |
441 | SZ_16K); | |
4cb5300b | 442 | |
42dc7bab | 443 | actual_end = min_t(u64, isize, end + 1); |
c8b97818 CM |
444 | again: |
445 | will_compress = 0; | |
09cbfeaf | 446 | nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; |
069eac78 DS |
447 | BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); |
448 | nr_pages = min_t(unsigned long, nr_pages, | |
449 | BTRFS_MAX_COMPRESSED / PAGE_SIZE); | |
be20aa9d | 450 | |
f03d9301 CM |
451 | /* |
452 | * we don't want to send crud past the end of i_size through | |
453 | * compression, that's just a waste of CPU time. So, if the | |
454 | * end of the file is before the start of our current | |
455 | * requested range of bytes, we bail out to the uncompressed | |
456 | * cleanup code that can deal with all of this. | |
457 | * | |
458 | * It isn't really the fastest way to fix things, but this is a | |
459 | * very uncommon corner. | |
460 | */ | |
461 | if (actual_end <= start) | |
462 | goto cleanup_and_bail_uncompressed; | |
463 | ||
c8b97818 CM |
464 | total_compressed = actual_end - start; |
465 | ||
4bcbb332 SW |
466 | /* |
467 | * skip compression for a small file range(<=blocksize) that | |
01327610 | 468 | * isn't an inline extent, since it doesn't save disk space at all. |
4bcbb332 SW |
469 | */ |
470 | if (total_compressed <= blocksize && | |
471 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | |
472 | goto cleanup_and_bail_uncompressed; | |
473 | ||
069eac78 DS |
474 | total_compressed = min_t(unsigned long, total_compressed, |
475 | BTRFS_MAX_UNCOMPRESSED); | |
fda2832f | 476 | num_bytes = ALIGN(end - start + 1, blocksize); |
be20aa9d | 477 | num_bytes = max(blocksize, num_bytes); |
c8b97818 CM |
478 | total_in = 0; |
479 | ret = 0; | |
db94535d | 480 | |
771ed689 CM |
481 | /* |
482 | * we do compression for mount -o compress and when the | |
483 | * inode has not been flagged as nocompress. This flag can | |
484 | * change at any time if we discover bad compression ratios. | |
c8b97818 | 485 | */ |
f79707b0 | 486 | if (inode_need_compress(inode)) { |
c8b97818 | 487 | WARN_ON(pages); |
31e818fe | 488 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); |
560f7d75 LZ |
489 | if (!pages) { |
490 | /* just bail out to the uncompressed code */ | |
491 | goto cont; | |
492 | } | |
c8b97818 | 493 | |
261507a0 LZ |
494 | if (BTRFS_I(inode)->force_compress) |
495 | compress_type = BTRFS_I(inode)->force_compress; | |
496 | ||
4adaa611 CM |
497 | /* |
498 | * we need to call clear_page_dirty_for_io on each | |
499 | * page in the range. Otherwise applications with the file | |
500 | * mmap'd can wander in and change the page contents while | |
501 | * we are compressing them. | |
502 | * | |
503 | * If the compression fails for any reason, we set the pages | |
504 | * dirty again later on. | |
505 | */ | |
506 | extent_range_clear_dirty_for_io(inode, start, end); | |
507 | redirty = 1; | |
261507a0 LZ |
508 | ret = btrfs_compress_pages(compress_type, |
509 | inode->i_mapping, start, | |
38c31464 | 510 | pages, |
4d3a800e | 511 | &nr_pages, |
261507a0 | 512 | &total_in, |
e5d74902 | 513 | &total_compressed); |
c8b97818 CM |
514 | |
515 | if (!ret) { | |
516 | unsigned long offset = total_compressed & | |
09cbfeaf | 517 | (PAGE_SIZE - 1); |
4d3a800e | 518 | struct page *page = pages[nr_pages - 1]; |
c8b97818 CM |
519 | char *kaddr; |
520 | ||
521 | /* zero the tail end of the last page, we might be | |
522 | * sending it down to disk | |
523 | */ | |
524 | if (offset) { | |
7ac687d9 | 525 | kaddr = kmap_atomic(page); |
c8b97818 | 526 | memset(kaddr + offset, 0, |
09cbfeaf | 527 | PAGE_SIZE - offset); |
7ac687d9 | 528 | kunmap_atomic(kaddr); |
c8b97818 CM |
529 | } |
530 | will_compress = 1; | |
531 | } | |
532 | } | |
560f7d75 | 533 | cont: |
c8b97818 CM |
534 | if (start == 0) { |
535 | /* lets try to make an inline extent */ | |
771ed689 | 536 | if (ret || total_in < (actual_end - start)) { |
c8b97818 | 537 | /* we didn't compress the entire range, try |
771ed689 | 538 | * to make an uncompressed inline extent. |
c8b97818 | 539 | */ |
00361589 | 540 | ret = cow_file_range_inline(root, inode, start, end, |
f74670f7 | 541 | 0, BTRFS_COMPRESS_NONE, NULL); |
c8b97818 | 542 | } else { |
771ed689 | 543 | /* try making a compressed inline extent */ |
00361589 | 544 | ret = cow_file_range_inline(root, inode, start, end, |
fe3f566c LZ |
545 | total_compressed, |
546 | compress_type, pages); | |
c8b97818 | 547 | } |
79787eaa | 548 | if (ret <= 0) { |
151a41bc JB |
549 | unsigned long clear_flags = EXTENT_DELALLOC | |
550 | EXTENT_DEFRAG; | |
e6eb4314 FM |
551 | unsigned long page_error_op; |
552 | ||
151a41bc | 553 | clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; |
e6eb4314 | 554 | page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; |
151a41bc | 555 | |
771ed689 | 556 | /* |
79787eaa JM |
557 | * inline extent creation worked or returned error, |
558 | * we don't need to create any more async work items. | |
559 | * Unlock and free up our temp pages. | |
771ed689 | 560 | */ |
ba8b04c1 QW |
561 | extent_clear_unlock_delalloc(inode, start, end, end, |
562 | NULL, clear_flags, | |
563 | PAGE_UNLOCK | | |
c2790a2e JB |
564 | PAGE_CLEAR_DIRTY | |
565 | PAGE_SET_WRITEBACK | | |
e6eb4314 | 566 | page_error_op | |
c2790a2e | 567 | PAGE_END_WRITEBACK); |
18513091 WX |
568 | btrfs_free_reserved_data_space_noquota(inode, start, |
569 | end - start + 1); | |
c8b97818 CM |
570 | goto free_pages_out; |
571 | } | |
572 | } | |
573 | ||
574 | if (will_compress) { | |
575 | /* | |
576 | * we aren't doing an inline extent round the compressed size | |
577 | * up to a block size boundary so the allocator does sane | |
578 | * things | |
579 | */ | |
fda2832f | 580 | total_compressed = ALIGN(total_compressed, blocksize); |
c8b97818 CM |
581 | |
582 | /* | |
583 | * one last check to make sure the compression is really a | |
584 | * win, compare the page count read with the blocks on disk | |
585 | */ | |
09cbfeaf | 586 | total_in = ALIGN(total_in, PAGE_SIZE); |
c8b97818 CM |
587 | if (total_compressed >= total_in) { |
588 | will_compress = 0; | |
589 | } else { | |
c8b97818 | 590 | num_bytes = total_in; |
c8bb0c8b AS |
591 | *num_added += 1; |
592 | ||
593 | /* | |
594 | * The async work queues will take care of doing actual | |
595 | * allocation on disk for these compressed pages, and | |
596 | * will submit them to the elevator. | |
597 | */ | |
598 | add_async_extent(async_cow, start, num_bytes, | |
4d3a800e | 599 | total_compressed, pages, nr_pages, |
c8bb0c8b AS |
600 | compress_type); |
601 | ||
602 | if (start + num_bytes < end) { | |
603 | start += num_bytes; | |
604 | pages = NULL; | |
605 | cond_resched(); | |
606 | goto again; | |
607 | } | |
608 | return; | |
c8b97818 CM |
609 | } |
610 | } | |
c8bb0c8b | 611 | if (pages) { |
c8b97818 CM |
612 | /* |
613 | * the compression code ran but failed to make things smaller, | |
614 | * free any pages it allocated and our page pointer array | |
615 | */ | |
4d3a800e | 616 | for (i = 0; i < nr_pages; i++) { |
70b99e69 | 617 | WARN_ON(pages[i]->mapping); |
09cbfeaf | 618 | put_page(pages[i]); |
c8b97818 CM |
619 | } |
620 | kfree(pages); | |
621 | pages = NULL; | |
622 | total_compressed = 0; | |
4d3a800e | 623 | nr_pages = 0; |
c8b97818 CM |
624 | |
625 | /* flag the file so we don't compress in the future */ | |
0b246afa | 626 | if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && |
1e701a32 | 627 | !(BTRFS_I(inode)->force_compress)) { |
a555f810 | 628 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
1e701a32 | 629 | } |
c8b97818 | 630 | } |
f03d9301 | 631 | cleanup_and_bail_uncompressed: |
c8bb0c8b AS |
632 | /* |
633 | * No compression, but we still need to write the pages in the file | |
634 | * we've been given so far. redirty the locked page if it corresponds | |
635 | * to our extent and set things up for the async work queue to run | |
636 | * cow_file_range to do the normal delalloc dance. | |
637 | */ | |
638 | if (page_offset(locked_page) >= start && | |
639 | page_offset(locked_page) <= end) | |
640 | __set_page_dirty_nobuffers(locked_page); | |
641 | /* unlocked later on in the async handlers */ | |
642 | ||
643 | if (redirty) | |
644 | extent_range_redirty_for_io(inode, start, end); | |
645 | add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0, | |
646 | BTRFS_COMPRESS_NONE); | |
647 | *num_added += 1; | |
3b951516 | 648 | |
c44f649e | 649 | return; |
771ed689 CM |
650 | |
651 | free_pages_out: | |
4d3a800e | 652 | for (i = 0; i < nr_pages; i++) { |
771ed689 | 653 | WARN_ON(pages[i]->mapping); |
09cbfeaf | 654 | put_page(pages[i]); |
771ed689 | 655 | } |
d397712b | 656 | kfree(pages); |
771ed689 | 657 | } |
771ed689 | 658 | |
40ae837b FM |
659 | static void free_async_extent_pages(struct async_extent *async_extent) |
660 | { | |
661 | int i; | |
662 | ||
663 | if (!async_extent->pages) | |
664 | return; | |
665 | ||
666 | for (i = 0; i < async_extent->nr_pages; i++) { | |
667 | WARN_ON(async_extent->pages[i]->mapping); | |
09cbfeaf | 668 | put_page(async_extent->pages[i]); |
40ae837b FM |
669 | } |
670 | kfree(async_extent->pages); | |
671 | async_extent->nr_pages = 0; | |
672 | async_extent->pages = NULL; | |
771ed689 CM |
673 | } |
674 | ||
675 | /* | |
676 | * phase two of compressed writeback. This is the ordered portion | |
677 | * of the code, which only gets called in the order the work was | |
678 | * queued. We walk all the async extents created by compress_file_range | |
679 | * and send them down to the disk. | |
680 | */ | |
dec8f175 | 681 | static noinline void submit_compressed_extents(struct inode *inode, |
771ed689 CM |
682 | struct async_cow *async_cow) |
683 | { | |
0b246afa | 684 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
771ed689 CM |
685 | struct async_extent *async_extent; |
686 | u64 alloc_hint = 0; | |
771ed689 CM |
687 | struct btrfs_key ins; |
688 | struct extent_map *em; | |
689 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
771ed689 | 690 | struct extent_io_tree *io_tree; |
f5a84ee3 | 691 | int ret = 0; |
771ed689 | 692 | |
3e04e7f1 | 693 | again: |
d397712b | 694 | while (!list_empty(&async_cow->extents)) { |
771ed689 CM |
695 | async_extent = list_entry(async_cow->extents.next, |
696 | struct async_extent, list); | |
697 | list_del(&async_extent->list); | |
c8b97818 | 698 | |
771ed689 CM |
699 | io_tree = &BTRFS_I(inode)->io_tree; |
700 | ||
f5a84ee3 | 701 | retry: |
771ed689 CM |
702 | /* did the compression code fall back to uncompressed IO? */ |
703 | if (!async_extent->pages) { | |
704 | int page_started = 0; | |
705 | unsigned long nr_written = 0; | |
706 | ||
707 | lock_extent(io_tree, async_extent->start, | |
2ac55d41 | 708 | async_extent->start + |
d0082371 | 709 | async_extent->ram_size - 1); |
771ed689 CM |
710 | |
711 | /* allocate blocks */ | |
f5a84ee3 JB |
712 | ret = cow_file_range(inode, async_cow->locked_page, |
713 | async_extent->start, | |
714 | async_extent->start + | |
715 | async_extent->ram_size - 1, | |
dda3245e WX |
716 | async_extent->start + |
717 | async_extent->ram_size - 1, | |
718 | &page_started, &nr_written, 0, | |
719 | NULL); | |
771ed689 | 720 | |
79787eaa JM |
721 | /* JDM XXX */ |
722 | ||
771ed689 CM |
723 | /* |
724 | * if page_started, cow_file_range inserted an | |
725 | * inline extent and took care of all the unlocking | |
726 | * and IO for us. Otherwise, we need to submit | |
727 | * all those pages down to the drive. | |
728 | */ | |
f5a84ee3 | 729 | if (!page_started && !ret) |
771ed689 CM |
730 | extent_write_locked_range(io_tree, |
731 | inode, async_extent->start, | |
d397712b | 732 | async_extent->start + |
771ed689 CM |
733 | async_extent->ram_size - 1, |
734 | btrfs_get_extent, | |
735 | WB_SYNC_ALL); | |
3e04e7f1 JB |
736 | else if (ret) |
737 | unlock_page(async_cow->locked_page); | |
771ed689 CM |
738 | kfree(async_extent); |
739 | cond_resched(); | |
740 | continue; | |
741 | } | |
742 | ||
743 | lock_extent(io_tree, async_extent->start, | |
d0082371 | 744 | async_extent->start + async_extent->ram_size - 1); |
771ed689 | 745 | |
18513091 | 746 | ret = btrfs_reserve_extent(root, async_extent->ram_size, |
771ed689 CM |
747 | async_extent->compressed_size, |
748 | async_extent->compressed_size, | |
e570fd27 | 749 | 0, alloc_hint, &ins, 1, 1); |
f5a84ee3 | 750 | if (ret) { |
40ae837b | 751 | free_async_extent_pages(async_extent); |
3e04e7f1 | 752 | |
fdf8e2ea JB |
753 | if (ret == -ENOSPC) { |
754 | unlock_extent(io_tree, async_extent->start, | |
755 | async_extent->start + | |
756 | async_extent->ram_size - 1); | |
ce62003f LB |
757 | |
758 | /* | |
759 | * we need to redirty the pages if we decide to | |
760 | * fallback to uncompressed IO, otherwise we | |
761 | * will not submit these pages down to lower | |
762 | * layers. | |
763 | */ | |
764 | extent_range_redirty_for_io(inode, | |
765 | async_extent->start, | |
766 | async_extent->start + | |
767 | async_extent->ram_size - 1); | |
768 | ||
79787eaa | 769 | goto retry; |
fdf8e2ea | 770 | } |
3e04e7f1 | 771 | goto out_free; |
f5a84ee3 | 772 | } |
c2167754 YZ |
773 | /* |
774 | * here we're doing allocation and writeback of the | |
775 | * compressed pages | |
776 | */ | |
6f9994db LB |
777 | em = create_io_em(inode, async_extent->start, |
778 | async_extent->ram_size, /* len */ | |
779 | async_extent->start, /* orig_start */ | |
780 | ins.objectid, /* block_start */ | |
781 | ins.offset, /* block_len */ | |
782 | ins.offset, /* orig_block_len */ | |
783 | async_extent->ram_size, /* ram_bytes */ | |
784 | async_extent->compress_type, | |
785 | BTRFS_ORDERED_COMPRESSED); | |
786 | if (IS_ERR(em)) | |
787 | /* ret value is not necessary due to void function */ | |
3e04e7f1 | 788 | goto out_free_reserve; |
6f9994db | 789 | free_extent_map(em); |
3e04e7f1 | 790 | |
261507a0 LZ |
791 | ret = btrfs_add_ordered_extent_compress(inode, |
792 | async_extent->start, | |
793 | ins.objectid, | |
794 | async_extent->ram_size, | |
795 | ins.offset, | |
796 | BTRFS_ORDERED_COMPRESSED, | |
797 | async_extent->compress_type); | |
d9f85963 | 798 | if (ret) { |
dcdbc059 NB |
799 | btrfs_drop_extent_cache(BTRFS_I(inode), |
800 | async_extent->start, | |
d9f85963 FM |
801 | async_extent->start + |
802 | async_extent->ram_size - 1, 0); | |
3e04e7f1 | 803 | goto out_free_reserve; |
d9f85963 | 804 | } |
0b246afa | 805 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
771ed689 | 806 | |
771ed689 CM |
807 | /* |
808 | * clear dirty, set writeback and unlock the pages. | |
809 | */ | |
c2790a2e | 810 | extent_clear_unlock_delalloc(inode, async_extent->start, |
ba8b04c1 QW |
811 | async_extent->start + |
812 | async_extent->ram_size - 1, | |
a791e35e CM |
813 | async_extent->start + |
814 | async_extent->ram_size - 1, | |
151a41bc JB |
815 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC, |
816 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
c2790a2e | 817 | PAGE_SET_WRITEBACK); |
771ed689 | 818 | ret = btrfs_submit_compressed_write(inode, |
d397712b CM |
819 | async_extent->start, |
820 | async_extent->ram_size, | |
821 | ins.objectid, | |
822 | ins.offset, async_extent->pages, | |
823 | async_extent->nr_pages); | |
fce2a4e6 FM |
824 | if (ret) { |
825 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; | |
826 | struct page *p = async_extent->pages[0]; | |
827 | const u64 start = async_extent->start; | |
828 | const u64 end = start + async_extent->ram_size - 1; | |
829 | ||
830 | p->mapping = inode->i_mapping; | |
831 | tree->ops->writepage_end_io_hook(p, start, end, | |
832 | NULL, 0); | |
833 | p->mapping = NULL; | |
ba8b04c1 QW |
834 | extent_clear_unlock_delalloc(inode, start, end, end, |
835 | NULL, 0, | |
fce2a4e6 FM |
836 | PAGE_END_WRITEBACK | |
837 | PAGE_SET_ERROR); | |
40ae837b | 838 | free_async_extent_pages(async_extent); |
fce2a4e6 | 839 | } |
771ed689 CM |
840 | alloc_hint = ins.objectid + ins.offset; |
841 | kfree(async_extent); | |
842 | cond_resched(); | |
843 | } | |
dec8f175 | 844 | return; |
3e04e7f1 | 845 | out_free_reserve: |
0b246afa | 846 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
2ff7e61e | 847 | btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); |
79787eaa | 848 | out_free: |
c2790a2e | 849 | extent_clear_unlock_delalloc(inode, async_extent->start, |
ba8b04c1 QW |
850 | async_extent->start + |
851 | async_extent->ram_size - 1, | |
3e04e7f1 JB |
852 | async_extent->start + |
853 | async_extent->ram_size - 1, | |
c2790a2e | 854 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC | |
151a41bc JB |
855 | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, |
856 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
704de49d FM |
857 | PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK | |
858 | PAGE_SET_ERROR); | |
40ae837b | 859 | free_async_extent_pages(async_extent); |
79787eaa | 860 | kfree(async_extent); |
3e04e7f1 | 861 | goto again; |
771ed689 CM |
862 | } |
863 | ||
4b46fce2 JB |
864 | static u64 get_extent_allocation_hint(struct inode *inode, u64 start, |
865 | u64 num_bytes) | |
866 | { | |
867 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
868 | struct extent_map *em; | |
869 | u64 alloc_hint = 0; | |
870 | ||
871 | read_lock(&em_tree->lock); | |
872 | em = search_extent_mapping(em_tree, start, num_bytes); | |
873 | if (em) { | |
874 | /* | |
875 | * if block start isn't an actual block number then find the | |
876 | * first block in this inode and use that as a hint. If that | |
877 | * block is also bogus then just don't worry about it. | |
878 | */ | |
879 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | |
880 | free_extent_map(em); | |
881 | em = search_extent_mapping(em_tree, 0, 0); | |
882 | if (em && em->block_start < EXTENT_MAP_LAST_BYTE) | |
883 | alloc_hint = em->block_start; | |
884 | if (em) | |
885 | free_extent_map(em); | |
886 | } else { | |
887 | alloc_hint = em->block_start; | |
888 | free_extent_map(em); | |
889 | } | |
890 | } | |
891 | read_unlock(&em_tree->lock); | |
892 | ||
893 | return alloc_hint; | |
894 | } | |
895 | ||
771ed689 CM |
896 | /* |
897 | * when extent_io.c finds a delayed allocation range in the file, | |
898 | * the call backs end up in this code. The basic idea is to | |
899 | * allocate extents on disk for the range, and create ordered data structs | |
900 | * in ram to track those extents. | |
901 | * | |
902 | * locked_page is the page that writepage had locked already. We use | |
903 | * it to make sure we don't do extra locks or unlocks. | |
904 | * | |
905 | * *page_started is set to one if we unlock locked_page and do everything | |
906 | * required to start IO on it. It may be clean and already done with | |
907 | * IO when we return. | |
908 | */ | |
00361589 JB |
909 | static noinline int cow_file_range(struct inode *inode, |
910 | struct page *locked_page, | |
dda3245e WX |
911 | u64 start, u64 end, u64 delalloc_end, |
912 | int *page_started, unsigned long *nr_written, | |
913 | int unlock, struct btrfs_dedupe_hash *hash) | |
771ed689 | 914 | { |
0b246afa | 915 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
00361589 | 916 | struct btrfs_root *root = BTRFS_I(inode)->root; |
771ed689 CM |
917 | u64 alloc_hint = 0; |
918 | u64 num_bytes; | |
919 | unsigned long ram_size; | |
920 | u64 disk_num_bytes; | |
921 | u64 cur_alloc_size; | |
0b246afa | 922 | u64 blocksize = fs_info->sectorsize; |
771ed689 CM |
923 | struct btrfs_key ins; |
924 | struct extent_map *em; | |
771ed689 CM |
925 | int ret = 0; |
926 | ||
70ddc553 | 927 | if (btrfs_is_free_space_inode(BTRFS_I(inode))) { |
02ecd2c2 | 928 | WARN_ON_ONCE(1); |
29bce2f3 JB |
929 | ret = -EINVAL; |
930 | goto out_unlock; | |
02ecd2c2 | 931 | } |
771ed689 | 932 | |
fda2832f | 933 | num_bytes = ALIGN(end - start + 1, blocksize); |
771ed689 CM |
934 | num_bytes = max(blocksize, num_bytes); |
935 | disk_num_bytes = num_bytes; | |
771ed689 | 936 | |
6158e1ce | 937 | inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); |
4cb5300b | 938 | |
771ed689 CM |
939 | if (start == 0) { |
940 | /* lets try to make an inline extent */ | |
f74670f7 AJ |
941 | ret = cow_file_range_inline(root, inode, start, end, 0, |
942 | BTRFS_COMPRESS_NONE, NULL); | |
771ed689 | 943 | if (ret == 0) { |
ba8b04c1 QW |
944 | extent_clear_unlock_delalloc(inode, start, end, |
945 | delalloc_end, NULL, | |
c2790a2e | 946 | EXTENT_LOCKED | EXTENT_DELALLOC | |
151a41bc | 947 | EXTENT_DEFRAG, PAGE_UNLOCK | |
c2790a2e JB |
948 | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | |
949 | PAGE_END_WRITEBACK); | |
18513091 WX |
950 | btrfs_free_reserved_data_space_noquota(inode, start, |
951 | end - start + 1); | |
771ed689 | 952 | *nr_written = *nr_written + |
09cbfeaf | 953 | (end - start + PAGE_SIZE) / PAGE_SIZE; |
771ed689 | 954 | *page_started = 1; |
771ed689 | 955 | goto out; |
79787eaa | 956 | } else if (ret < 0) { |
79787eaa | 957 | goto out_unlock; |
771ed689 CM |
958 | } |
959 | } | |
960 | ||
961 | BUG_ON(disk_num_bytes > | |
0b246afa | 962 | btrfs_super_total_bytes(fs_info->super_copy)); |
771ed689 | 963 | |
4b46fce2 | 964 | alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); |
dcdbc059 NB |
965 | btrfs_drop_extent_cache(BTRFS_I(inode), start, |
966 | start + num_bytes - 1, 0); | |
771ed689 | 967 | |
d397712b | 968 | while (disk_num_bytes > 0) { |
a791e35e CM |
969 | unsigned long op; |
970 | ||
287a0ab9 | 971 | cur_alloc_size = disk_num_bytes; |
18513091 | 972 | ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, |
0b246afa | 973 | fs_info->sectorsize, 0, alloc_hint, |
e570fd27 | 974 | &ins, 1, 1); |
00361589 | 975 | if (ret < 0) |
79787eaa | 976 | goto out_unlock; |
d397712b | 977 | |
771ed689 | 978 | ram_size = ins.offset; |
6f9994db LB |
979 | em = create_io_em(inode, start, ins.offset, /* len */ |
980 | start, /* orig_start */ | |
981 | ins.objectid, /* block_start */ | |
982 | ins.offset, /* block_len */ | |
983 | ins.offset, /* orig_block_len */ | |
984 | ram_size, /* ram_bytes */ | |
985 | BTRFS_COMPRESS_NONE, /* compress_type */ | |
1af4a0aa | 986 | BTRFS_ORDERED_REGULAR /* type */); |
6f9994db | 987 | if (IS_ERR(em)) |
ace68bac | 988 | goto out_reserve; |
6f9994db | 989 | free_extent_map(em); |
e6dcd2dc | 990 | |
98d20f67 | 991 | cur_alloc_size = ins.offset; |
e6dcd2dc | 992 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
771ed689 | 993 | ram_size, cur_alloc_size, 0); |
ace68bac | 994 | if (ret) |
d9f85963 | 995 | goto out_drop_extent_cache; |
c8b97818 | 996 | |
17d217fe YZ |
997 | if (root->root_key.objectid == |
998 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
999 | ret = btrfs_reloc_clone_csums(inode, start, | |
1000 | cur_alloc_size); | |
00361589 | 1001 | if (ret) |
d9f85963 | 1002 | goto out_drop_extent_cache; |
17d217fe YZ |
1003 | } |
1004 | ||
0b246afa | 1005 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
9cfa3e34 | 1006 | |
d397712b | 1007 | if (disk_num_bytes < cur_alloc_size) |
3b951516 | 1008 | break; |
d397712b | 1009 | |
c8b97818 CM |
1010 | /* we're not doing compressed IO, don't unlock the first |
1011 | * page (which the caller expects to stay locked), don't | |
1012 | * clear any dirty bits and don't set any writeback bits | |
8b62b72b CM |
1013 | * |
1014 | * Do set the Private2 bit so we know this page was properly | |
1015 | * setup for writepage | |
c8b97818 | 1016 | */ |
c2790a2e JB |
1017 | op = unlock ? PAGE_UNLOCK : 0; |
1018 | op |= PAGE_SET_PRIVATE2; | |
a791e35e | 1019 | |
c2790a2e | 1020 | extent_clear_unlock_delalloc(inode, start, |
ba8b04c1 QW |
1021 | start + ram_size - 1, |
1022 | delalloc_end, locked_page, | |
c2790a2e JB |
1023 | EXTENT_LOCKED | EXTENT_DELALLOC, |
1024 | op); | |
c8b97818 | 1025 | disk_num_bytes -= cur_alloc_size; |
c59f8951 CM |
1026 | num_bytes -= cur_alloc_size; |
1027 | alloc_hint = ins.objectid + ins.offset; | |
1028 | start += cur_alloc_size; | |
b888db2b | 1029 | } |
79787eaa | 1030 | out: |
be20aa9d | 1031 | return ret; |
b7d5b0a8 | 1032 | |
d9f85963 | 1033 | out_drop_extent_cache: |
dcdbc059 | 1034 | btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); |
ace68bac | 1035 | out_reserve: |
0b246afa | 1036 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
2ff7e61e | 1037 | btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); |
79787eaa | 1038 | out_unlock: |
ba8b04c1 QW |
1039 | extent_clear_unlock_delalloc(inode, start, end, delalloc_end, |
1040 | locked_page, | |
151a41bc JB |
1041 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | |
1042 | EXTENT_DELALLOC | EXTENT_DEFRAG, | |
1043 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
1044 | PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); | |
79787eaa | 1045 | goto out; |
771ed689 | 1046 | } |
c8b97818 | 1047 | |
771ed689 CM |
1048 | /* |
1049 | * work queue call back to started compression on a file and pages | |
1050 | */ | |
1051 | static noinline void async_cow_start(struct btrfs_work *work) | |
1052 | { | |
1053 | struct async_cow *async_cow; | |
1054 | int num_added = 0; | |
1055 | async_cow = container_of(work, struct async_cow, work); | |
1056 | ||
1057 | compress_file_range(async_cow->inode, async_cow->locked_page, | |
1058 | async_cow->start, async_cow->end, async_cow, | |
1059 | &num_added); | |
8180ef88 | 1060 | if (num_added == 0) { |
cb77fcd8 | 1061 | btrfs_add_delayed_iput(async_cow->inode); |
771ed689 | 1062 | async_cow->inode = NULL; |
8180ef88 | 1063 | } |
771ed689 CM |
1064 | } |
1065 | ||
1066 | /* | |
1067 | * work queue call back to submit previously compressed pages | |
1068 | */ | |
1069 | static noinline void async_cow_submit(struct btrfs_work *work) | |
1070 | { | |
0b246afa | 1071 | struct btrfs_fs_info *fs_info; |
771ed689 CM |
1072 | struct async_cow *async_cow; |
1073 | struct btrfs_root *root; | |
1074 | unsigned long nr_pages; | |
1075 | ||
1076 | async_cow = container_of(work, struct async_cow, work); | |
1077 | ||
1078 | root = async_cow->root; | |
0b246afa | 1079 | fs_info = root->fs_info; |
09cbfeaf KS |
1080 | nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> |
1081 | PAGE_SHIFT; | |
771ed689 | 1082 | |
ee863954 DS |
1083 | /* |
1084 | * atomic_sub_return implies a barrier for waitqueue_active | |
1085 | */ | |
0b246afa | 1086 | if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < |
ee22184b | 1087 | 5 * SZ_1M && |
0b246afa JM |
1088 | waitqueue_active(&fs_info->async_submit_wait)) |
1089 | wake_up(&fs_info->async_submit_wait); | |
771ed689 | 1090 | |
d397712b | 1091 | if (async_cow->inode) |
771ed689 | 1092 | submit_compressed_extents(async_cow->inode, async_cow); |
771ed689 | 1093 | } |
c8b97818 | 1094 | |
771ed689 CM |
1095 | static noinline void async_cow_free(struct btrfs_work *work) |
1096 | { | |
1097 | struct async_cow *async_cow; | |
1098 | async_cow = container_of(work, struct async_cow, work); | |
8180ef88 | 1099 | if (async_cow->inode) |
cb77fcd8 | 1100 | btrfs_add_delayed_iput(async_cow->inode); |
771ed689 CM |
1101 | kfree(async_cow); |
1102 | } | |
1103 | ||
1104 | static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |
1105 | u64 start, u64 end, int *page_started, | |
1106 | unsigned long *nr_written) | |
1107 | { | |
0b246afa | 1108 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
771ed689 CM |
1109 | struct async_cow *async_cow; |
1110 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1111 | unsigned long nr_pages; | |
1112 | u64 cur_end; | |
771ed689 | 1113 | |
a3429ab7 CM |
1114 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, |
1115 | 1, 0, NULL, GFP_NOFS); | |
d397712b | 1116 | while (start < end) { |
771ed689 | 1117 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
79787eaa | 1118 | BUG_ON(!async_cow); /* -ENOMEM */ |
8180ef88 | 1119 | async_cow->inode = igrab(inode); |
771ed689 CM |
1120 | async_cow->root = root; |
1121 | async_cow->locked_page = locked_page; | |
1122 | async_cow->start = start; | |
1123 | ||
f79707b0 | 1124 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS && |
0b246afa | 1125 | !btrfs_test_opt(fs_info, FORCE_COMPRESS)) |
771ed689 CM |
1126 | cur_end = end; |
1127 | else | |
ee22184b | 1128 | cur_end = min(end, start + SZ_512K - 1); |
771ed689 CM |
1129 | |
1130 | async_cow->end = cur_end; | |
1131 | INIT_LIST_HEAD(&async_cow->extents); | |
1132 | ||
9e0af237 LB |
1133 | btrfs_init_work(&async_cow->work, |
1134 | btrfs_delalloc_helper, | |
1135 | async_cow_start, async_cow_submit, | |
1136 | async_cow_free); | |
771ed689 | 1137 | |
09cbfeaf KS |
1138 | nr_pages = (cur_end - start + PAGE_SIZE) >> |
1139 | PAGE_SHIFT; | |
0b246afa | 1140 | atomic_add(nr_pages, &fs_info->async_delalloc_pages); |
771ed689 | 1141 | |
0b246afa | 1142 | btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work); |
771ed689 | 1143 | |
0b246afa JM |
1144 | while (atomic_read(&fs_info->async_submit_draining) && |
1145 | atomic_read(&fs_info->async_delalloc_pages)) { | |
1146 | wait_event(fs_info->async_submit_wait, | |
1147 | (atomic_read(&fs_info->async_delalloc_pages) == | |
1148 | 0)); | |
771ed689 CM |
1149 | } |
1150 | ||
1151 | *nr_written += nr_pages; | |
1152 | start = cur_end + 1; | |
1153 | } | |
1154 | *page_started = 1; | |
1155 | return 0; | |
be20aa9d CM |
1156 | } |
1157 | ||
2ff7e61e | 1158 | static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info, |
17d217fe YZ |
1159 | u64 bytenr, u64 num_bytes) |
1160 | { | |
1161 | int ret; | |
1162 | struct btrfs_ordered_sum *sums; | |
1163 | LIST_HEAD(list); | |
1164 | ||
0b246afa | 1165 | ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, |
a2de733c | 1166 | bytenr + num_bytes - 1, &list, 0); |
17d217fe YZ |
1167 | if (ret == 0 && list_empty(&list)) |
1168 | return 0; | |
1169 | ||
1170 | while (!list_empty(&list)) { | |
1171 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | |
1172 | list_del(&sums->list); | |
1173 | kfree(sums); | |
1174 | } | |
1175 | return 1; | |
1176 | } | |
1177 | ||
d352ac68 CM |
1178 | /* |
1179 | * when nowcow writeback call back. This checks for snapshots or COW copies | |
1180 | * of the extents that exist in the file, and COWs the file as required. | |
1181 | * | |
1182 | * If no cow copies or snapshots exist, we write directly to the existing | |
1183 | * blocks on disk | |
1184 | */ | |
7f366cfe CM |
1185 | static noinline int run_delalloc_nocow(struct inode *inode, |
1186 | struct page *locked_page, | |
771ed689 CM |
1187 | u64 start, u64 end, int *page_started, int force, |
1188 | unsigned long *nr_written) | |
be20aa9d | 1189 | { |
0b246afa | 1190 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
be20aa9d CM |
1191 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1192 | struct extent_buffer *leaf; | |
be20aa9d | 1193 | struct btrfs_path *path; |
80ff3856 | 1194 | struct btrfs_file_extent_item *fi; |
be20aa9d | 1195 | struct btrfs_key found_key; |
6f9994db | 1196 | struct extent_map *em; |
80ff3856 YZ |
1197 | u64 cow_start; |
1198 | u64 cur_offset; | |
1199 | u64 extent_end; | |
5d4f98a2 | 1200 | u64 extent_offset; |
80ff3856 YZ |
1201 | u64 disk_bytenr; |
1202 | u64 num_bytes; | |
b4939680 | 1203 | u64 disk_num_bytes; |
cc95bef6 | 1204 | u64 ram_bytes; |
80ff3856 | 1205 | int extent_type; |
79787eaa | 1206 | int ret, err; |
d899e052 | 1207 | int type; |
80ff3856 YZ |
1208 | int nocow; |
1209 | int check_prev = 1; | |
82d5902d | 1210 | bool nolock; |
4a0cc7ca | 1211 | u64 ino = btrfs_ino(BTRFS_I(inode)); |
be20aa9d CM |
1212 | |
1213 | path = btrfs_alloc_path(); | |
17ca04af | 1214 | if (!path) { |
ba8b04c1 QW |
1215 | extent_clear_unlock_delalloc(inode, start, end, end, |
1216 | locked_page, | |
c2790a2e | 1217 | EXTENT_LOCKED | EXTENT_DELALLOC | |
151a41bc JB |
1218 | EXTENT_DO_ACCOUNTING | |
1219 | EXTENT_DEFRAG, PAGE_UNLOCK | | |
c2790a2e JB |
1220 | PAGE_CLEAR_DIRTY | |
1221 | PAGE_SET_WRITEBACK | | |
1222 | PAGE_END_WRITEBACK); | |
d8926bb3 | 1223 | return -ENOMEM; |
17ca04af | 1224 | } |
82d5902d | 1225 | |
70ddc553 | 1226 | nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); |
82d5902d | 1227 | |
80ff3856 YZ |
1228 | cow_start = (u64)-1; |
1229 | cur_offset = start; | |
1230 | while (1) { | |
e4c3b2dc | 1231 | ret = btrfs_lookup_file_extent(NULL, root, path, ino, |
80ff3856 | 1232 | cur_offset, 0); |
d788a349 | 1233 | if (ret < 0) |
79787eaa | 1234 | goto error; |
80ff3856 YZ |
1235 | if (ret > 0 && path->slots[0] > 0 && check_prev) { |
1236 | leaf = path->nodes[0]; | |
1237 | btrfs_item_key_to_cpu(leaf, &found_key, | |
1238 | path->slots[0] - 1); | |
33345d01 | 1239 | if (found_key.objectid == ino && |
80ff3856 YZ |
1240 | found_key.type == BTRFS_EXTENT_DATA_KEY) |
1241 | path->slots[0]--; | |
1242 | } | |
1243 | check_prev = 0; | |
1244 | next_slot: | |
1245 | leaf = path->nodes[0]; | |
1246 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
1247 | ret = btrfs_next_leaf(root, path); | |
d788a349 | 1248 | if (ret < 0) |
79787eaa | 1249 | goto error; |
80ff3856 YZ |
1250 | if (ret > 0) |
1251 | break; | |
1252 | leaf = path->nodes[0]; | |
1253 | } | |
be20aa9d | 1254 | |
80ff3856 YZ |
1255 | nocow = 0; |
1256 | disk_bytenr = 0; | |
17d217fe | 1257 | num_bytes = 0; |
80ff3856 YZ |
1258 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
1259 | ||
1d512cb7 FM |
1260 | if (found_key.objectid > ino) |
1261 | break; | |
1262 | if (WARN_ON_ONCE(found_key.objectid < ino) || | |
1263 | found_key.type < BTRFS_EXTENT_DATA_KEY) { | |
1264 | path->slots[0]++; | |
1265 | goto next_slot; | |
1266 | } | |
1267 | if (found_key.type > BTRFS_EXTENT_DATA_KEY || | |
80ff3856 YZ |
1268 | found_key.offset > end) |
1269 | break; | |
1270 | ||
1271 | if (found_key.offset > cur_offset) { | |
1272 | extent_end = found_key.offset; | |
e9061e21 | 1273 | extent_type = 0; |
80ff3856 YZ |
1274 | goto out_check; |
1275 | } | |
1276 | ||
1277 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1278 | struct btrfs_file_extent_item); | |
1279 | extent_type = btrfs_file_extent_type(leaf, fi); | |
1280 | ||
cc95bef6 | 1281 | ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); |
d899e052 YZ |
1282 | if (extent_type == BTRFS_FILE_EXTENT_REG || |
1283 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
80ff3856 | 1284 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
5d4f98a2 | 1285 | extent_offset = btrfs_file_extent_offset(leaf, fi); |
80ff3856 YZ |
1286 | extent_end = found_key.offset + |
1287 | btrfs_file_extent_num_bytes(leaf, fi); | |
b4939680 JB |
1288 | disk_num_bytes = |
1289 | btrfs_file_extent_disk_num_bytes(leaf, fi); | |
80ff3856 YZ |
1290 | if (extent_end <= start) { |
1291 | path->slots[0]++; | |
1292 | goto next_slot; | |
1293 | } | |
17d217fe YZ |
1294 | if (disk_bytenr == 0) |
1295 | goto out_check; | |
80ff3856 YZ |
1296 | if (btrfs_file_extent_compression(leaf, fi) || |
1297 | btrfs_file_extent_encryption(leaf, fi) || | |
1298 | btrfs_file_extent_other_encoding(leaf, fi)) | |
1299 | goto out_check; | |
d899e052 YZ |
1300 | if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
1301 | goto out_check; | |
2ff7e61e | 1302 | if (btrfs_extent_readonly(fs_info, disk_bytenr)) |
80ff3856 | 1303 | goto out_check; |
e4c3b2dc | 1304 | if (btrfs_cross_ref_exist(root, ino, |
5d4f98a2 YZ |
1305 | found_key.offset - |
1306 | extent_offset, disk_bytenr)) | |
17d217fe | 1307 | goto out_check; |
5d4f98a2 | 1308 | disk_bytenr += extent_offset; |
17d217fe YZ |
1309 | disk_bytenr += cur_offset - found_key.offset; |
1310 | num_bytes = min(end + 1, extent_end) - cur_offset; | |
e9894fd3 WS |
1311 | /* |
1312 | * if there are pending snapshots for this root, | |
1313 | * we fall into common COW way. | |
1314 | */ | |
1315 | if (!nolock) { | |
9ea24bbe | 1316 | err = btrfs_start_write_no_snapshoting(root); |
e9894fd3 WS |
1317 | if (!err) |
1318 | goto out_check; | |
1319 | } | |
17d217fe YZ |
1320 | /* |
1321 | * force cow if csum exists in the range. | |
1322 | * this ensure that csum for a given extent are | |
1323 | * either valid or do not exist. | |
1324 | */ | |
2ff7e61e JM |
1325 | if (csum_exist_in_range(fs_info, disk_bytenr, |
1326 | num_bytes)) | |
17d217fe | 1327 | goto out_check; |
0b246afa | 1328 | if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) |
f78c436c | 1329 | goto out_check; |
80ff3856 YZ |
1330 | nocow = 1; |
1331 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
1332 | extent_end = found_key.offset + | |
514ac8ad CM |
1333 | btrfs_file_extent_inline_len(leaf, |
1334 | path->slots[0], fi); | |
da17066c | 1335 | extent_end = ALIGN(extent_end, |
0b246afa | 1336 | fs_info->sectorsize); |
80ff3856 YZ |
1337 | } else { |
1338 | BUG_ON(1); | |
1339 | } | |
1340 | out_check: | |
1341 | if (extent_end <= start) { | |
1342 | path->slots[0]++; | |
e9894fd3 | 1343 | if (!nolock && nocow) |
9ea24bbe | 1344 | btrfs_end_write_no_snapshoting(root); |
f78c436c | 1345 | if (nocow) |
0b246afa | 1346 | btrfs_dec_nocow_writers(fs_info, disk_bytenr); |
80ff3856 YZ |
1347 | goto next_slot; |
1348 | } | |
1349 | if (!nocow) { | |
1350 | if (cow_start == (u64)-1) | |
1351 | cow_start = cur_offset; | |
1352 | cur_offset = extent_end; | |
1353 | if (cur_offset > end) | |
1354 | break; | |
1355 | path->slots[0]++; | |
1356 | goto next_slot; | |
7ea394f1 YZ |
1357 | } |
1358 | ||
b3b4aa74 | 1359 | btrfs_release_path(path); |
80ff3856 | 1360 | if (cow_start != (u64)-1) { |
00361589 JB |
1361 | ret = cow_file_range(inode, locked_page, |
1362 | cow_start, found_key.offset - 1, | |
dda3245e WX |
1363 | end, page_started, nr_written, 1, |
1364 | NULL); | |
e9894fd3 WS |
1365 | if (ret) { |
1366 | if (!nolock && nocow) | |
9ea24bbe | 1367 | btrfs_end_write_no_snapshoting(root); |
f78c436c | 1368 | if (nocow) |
0b246afa | 1369 | btrfs_dec_nocow_writers(fs_info, |
f78c436c | 1370 | disk_bytenr); |
79787eaa | 1371 | goto error; |
e9894fd3 | 1372 | } |
80ff3856 | 1373 | cow_start = (u64)-1; |
7ea394f1 | 1374 | } |
80ff3856 | 1375 | |
d899e052 | 1376 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
6f9994db LB |
1377 | u64 orig_start = found_key.offset - extent_offset; |
1378 | ||
1379 | em = create_io_em(inode, cur_offset, num_bytes, | |
1380 | orig_start, | |
1381 | disk_bytenr, /* block_start */ | |
1382 | num_bytes, /* block_len */ | |
1383 | disk_num_bytes, /* orig_block_len */ | |
1384 | ram_bytes, BTRFS_COMPRESS_NONE, | |
1385 | BTRFS_ORDERED_PREALLOC); | |
1386 | if (IS_ERR(em)) { | |
1387 | if (!nolock && nocow) | |
1388 | btrfs_end_write_no_snapshoting(root); | |
1389 | if (nocow) | |
1390 | btrfs_dec_nocow_writers(fs_info, | |
1391 | disk_bytenr); | |
1392 | ret = PTR_ERR(em); | |
1393 | goto error; | |
d899e052 | 1394 | } |
6f9994db LB |
1395 | free_extent_map(em); |
1396 | } | |
1397 | ||
1398 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
d899e052 YZ |
1399 | type = BTRFS_ORDERED_PREALLOC; |
1400 | } else { | |
1401 | type = BTRFS_ORDERED_NOCOW; | |
1402 | } | |
80ff3856 YZ |
1403 | |
1404 | ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, | |
d899e052 | 1405 | num_bytes, num_bytes, type); |
f78c436c | 1406 | if (nocow) |
0b246afa | 1407 | btrfs_dec_nocow_writers(fs_info, disk_bytenr); |
79787eaa | 1408 | BUG_ON(ret); /* -ENOMEM */ |
771ed689 | 1409 | |
efa56464 YZ |
1410 | if (root->root_key.objectid == |
1411 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
1412 | ret = btrfs_reloc_clone_csums(inode, cur_offset, | |
1413 | num_bytes); | |
e9894fd3 WS |
1414 | if (ret) { |
1415 | if (!nolock && nocow) | |
9ea24bbe | 1416 | btrfs_end_write_no_snapshoting(root); |
79787eaa | 1417 | goto error; |
e9894fd3 | 1418 | } |
efa56464 YZ |
1419 | } |
1420 | ||
c2790a2e | 1421 | extent_clear_unlock_delalloc(inode, cur_offset, |
ba8b04c1 | 1422 | cur_offset + num_bytes - 1, end, |
c2790a2e | 1423 | locked_page, EXTENT_LOCKED | |
18513091 WX |
1424 | EXTENT_DELALLOC | |
1425 | EXTENT_CLEAR_DATA_RESV, | |
1426 | PAGE_UNLOCK | PAGE_SET_PRIVATE2); | |
1427 | ||
e9894fd3 | 1428 | if (!nolock && nocow) |
9ea24bbe | 1429 | btrfs_end_write_no_snapshoting(root); |
80ff3856 YZ |
1430 | cur_offset = extent_end; |
1431 | if (cur_offset > end) | |
1432 | break; | |
be20aa9d | 1433 | } |
b3b4aa74 | 1434 | btrfs_release_path(path); |
80ff3856 | 1435 | |
17ca04af | 1436 | if (cur_offset <= end && cow_start == (u64)-1) { |
80ff3856 | 1437 | cow_start = cur_offset; |
17ca04af JB |
1438 | cur_offset = end; |
1439 | } | |
1440 | ||
80ff3856 | 1441 | if (cow_start != (u64)-1) { |
dda3245e WX |
1442 | ret = cow_file_range(inode, locked_page, cow_start, end, end, |
1443 | page_started, nr_written, 1, NULL); | |
d788a349 | 1444 | if (ret) |
79787eaa | 1445 | goto error; |
80ff3856 YZ |
1446 | } |
1447 | ||
79787eaa | 1448 | error: |
17ca04af | 1449 | if (ret && cur_offset < end) |
ba8b04c1 | 1450 | extent_clear_unlock_delalloc(inode, cur_offset, end, end, |
c2790a2e | 1451 | locked_page, EXTENT_LOCKED | |
151a41bc JB |
1452 | EXTENT_DELALLOC | EXTENT_DEFRAG | |
1453 | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | | |
1454 | PAGE_CLEAR_DIRTY | | |
c2790a2e JB |
1455 | PAGE_SET_WRITEBACK | |
1456 | PAGE_END_WRITEBACK); | |
7ea394f1 | 1457 | btrfs_free_path(path); |
79787eaa | 1458 | return ret; |
be20aa9d CM |
1459 | } |
1460 | ||
47059d93 WS |
1461 | static inline int need_force_cow(struct inode *inode, u64 start, u64 end) |
1462 | { | |
1463 | ||
1464 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | |
1465 | !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) | |
1466 | return 0; | |
1467 | ||
1468 | /* | |
1469 | * @defrag_bytes is a hint value, no spinlock held here, | |
1470 | * if is not zero, it means the file is defragging. | |
1471 | * Force cow if given extent needs to be defragged. | |
1472 | */ | |
1473 | if (BTRFS_I(inode)->defrag_bytes && | |
1474 | test_range_bit(&BTRFS_I(inode)->io_tree, start, end, | |
1475 | EXTENT_DEFRAG, 0, NULL)) | |
1476 | return 1; | |
1477 | ||
1478 | return 0; | |
1479 | } | |
1480 | ||
d352ac68 CM |
1481 | /* |
1482 | * extent_io.c call back to do delayed allocation processing | |
1483 | */ | |
c8b97818 | 1484 | static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
771ed689 CM |
1485 | u64 start, u64 end, int *page_started, |
1486 | unsigned long *nr_written) | |
be20aa9d | 1487 | { |
be20aa9d | 1488 | int ret; |
47059d93 | 1489 | int force_cow = need_force_cow(inode, start, end); |
a2135011 | 1490 | |
47059d93 | 1491 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) { |
c8b97818 | 1492 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1493 | page_started, 1, nr_written); |
47059d93 | 1494 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { |
d899e052 | 1495 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1496 | page_started, 0, nr_written); |
7816030e | 1497 | } else if (!inode_need_compress(inode)) { |
dda3245e WX |
1498 | ret = cow_file_range(inode, locked_page, start, end, end, |
1499 | page_started, nr_written, 1, NULL); | |
7ddf5a42 JB |
1500 | } else { |
1501 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
1502 | &BTRFS_I(inode)->runtime_flags); | |
771ed689 | 1503 | ret = cow_file_range_async(inode, locked_page, start, end, |
d397712b | 1504 | page_started, nr_written); |
7ddf5a42 | 1505 | } |
b888db2b CM |
1506 | return ret; |
1507 | } | |
1508 | ||
1bf85046 JM |
1509 | static void btrfs_split_extent_hook(struct inode *inode, |
1510 | struct extent_state *orig, u64 split) | |
9ed74f2d | 1511 | { |
dcab6a3b JB |
1512 | u64 size; |
1513 | ||
0ca1f7ce | 1514 | /* not delalloc, ignore it */ |
9ed74f2d | 1515 | if (!(orig->state & EXTENT_DELALLOC)) |
1bf85046 | 1516 | return; |
9ed74f2d | 1517 | |
dcab6a3b JB |
1518 | size = orig->end - orig->start + 1; |
1519 | if (size > BTRFS_MAX_EXTENT_SIZE) { | |
823bb20a | 1520 | u32 num_extents; |
dcab6a3b JB |
1521 | u64 new_size; |
1522 | ||
1523 | /* | |
ba117213 JB |
1524 | * See the explanation in btrfs_merge_extent_hook, the same |
1525 | * applies here, just in reverse. | |
dcab6a3b JB |
1526 | */ |
1527 | new_size = orig->end - split + 1; | |
823bb20a | 1528 | num_extents = count_max_extents(new_size); |
ba117213 | 1529 | new_size = split - orig->start; |
823bb20a DS |
1530 | num_extents += count_max_extents(new_size); |
1531 | if (count_max_extents(size) >= num_extents) | |
dcab6a3b JB |
1532 | return; |
1533 | } | |
1534 | ||
9e0baf60 JB |
1535 | spin_lock(&BTRFS_I(inode)->lock); |
1536 | BTRFS_I(inode)->outstanding_extents++; | |
1537 | spin_unlock(&BTRFS_I(inode)->lock); | |
9ed74f2d JB |
1538 | } |
1539 | ||
1540 | /* | |
1541 | * extent_io.c merge_extent_hook, used to track merged delayed allocation | |
1542 | * extents so we can keep track of new extents that are just merged onto old | |
1543 | * extents, such as when we are doing sequential writes, so we can properly | |
1544 | * account for the metadata space we'll need. | |
1545 | */ | |
1bf85046 JM |
1546 | static void btrfs_merge_extent_hook(struct inode *inode, |
1547 | struct extent_state *new, | |
1548 | struct extent_state *other) | |
9ed74f2d | 1549 | { |
dcab6a3b | 1550 | u64 new_size, old_size; |
823bb20a | 1551 | u32 num_extents; |
dcab6a3b | 1552 | |
9ed74f2d JB |
1553 | /* not delalloc, ignore it */ |
1554 | if (!(other->state & EXTENT_DELALLOC)) | |
1bf85046 | 1555 | return; |
9ed74f2d | 1556 | |
8461a3de JB |
1557 | if (new->start > other->start) |
1558 | new_size = new->end - other->start + 1; | |
1559 | else | |
1560 | new_size = other->end - new->start + 1; | |
dcab6a3b JB |
1561 | |
1562 | /* we're not bigger than the max, unreserve the space and go */ | |
1563 | if (new_size <= BTRFS_MAX_EXTENT_SIZE) { | |
1564 | spin_lock(&BTRFS_I(inode)->lock); | |
1565 | BTRFS_I(inode)->outstanding_extents--; | |
1566 | spin_unlock(&BTRFS_I(inode)->lock); | |
1567 | return; | |
1568 | } | |
1569 | ||
1570 | /* | |
ba117213 JB |
1571 | * We have to add up either side to figure out how many extents were |
1572 | * accounted for before we merged into one big extent. If the number of | |
1573 | * extents we accounted for is <= the amount we need for the new range | |
1574 | * then we can return, otherwise drop. Think of it like this | |
1575 | * | |
1576 | * [ 4k][MAX_SIZE] | |
1577 | * | |
1578 | * So we've grown the extent by a MAX_SIZE extent, this would mean we | |
1579 | * need 2 outstanding extents, on one side we have 1 and the other side | |
1580 | * we have 1 so they are == and we can return. But in this case | |
1581 | * | |
1582 | * [MAX_SIZE+4k][MAX_SIZE+4k] | |
1583 | * | |
1584 | * Each range on their own accounts for 2 extents, but merged together | |
1585 | * they are only 3 extents worth of accounting, so we need to drop in | |
1586 | * this case. | |
dcab6a3b | 1587 | */ |
ba117213 | 1588 | old_size = other->end - other->start + 1; |
823bb20a | 1589 | num_extents = count_max_extents(old_size); |
ba117213 | 1590 | old_size = new->end - new->start + 1; |
823bb20a DS |
1591 | num_extents += count_max_extents(old_size); |
1592 | if (count_max_extents(new_size) >= num_extents) | |
dcab6a3b JB |
1593 | return; |
1594 | ||
9e0baf60 JB |
1595 | spin_lock(&BTRFS_I(inode)->lock); |
1596 | BTRFS_I(inode)->outstanding_extents--; | |
1597 | spin_unlock(&BTRFS_I(inode)->lock); | |
9ed74f2d JB |
1598 | } |
1599 | ||
eb73c1b7 MX |
1600 | static void btrfs_add_delalloc_inodes(struct btrfs_root *root, |
1601 | struct inode *inode) | |
1602 | { | |
0b246afa JM |
1603 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1604 | ||
eb73c1b7 MX |
1605 | spin_lock(&root->delalloc_lock); |
1606 | if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1607 | list_add_tail(&BTRFS_I(inode)->delalloc_inodes, | |
1608 | &root->delalloc_inodes); | |
1609 | set_bit(BTRFS_INODE_IN_DELALLOC_LIST, | |
1610 | &BTRFS_I(inode)->runtime_flags); | |
1611 | root->nr_delalloc_inodes++; | |
1612 | if (root->nr_delalloc_inodes == 1) { | |
0b246afa | 1613 | spin_lock(&fs_info->delalloc_root_lock); |
eb73c1b7 MX |
1614 | BUG_ON(!list_empty(&root->delalloc_root)); |
1615 | list_add_tail(&root->delalloc_root, | |
0b246afa JM |
1616 | &fs_info->delalloc_roots); |
1617 | spin_unlock(&fs_info->delalloc_root_lock); | |
eb73c1b7 MX |
1618 | } |
1619 | } | |
1620 | spin_unlock(&root->delalloc_lock); | |
1621 | } | |
1622 | ||
1623 | static void btrfs_del_delalloc_inode(struct btrfs_root *root, | |
9e3e97f4 | 1624 | struct btrfs_inode *inode) |
eb73c1b7 | 1625 | { |
9e3e97f4 | 1626 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
0b246afa | 1627 | |
eb73c1b7 | 1628 | spin_lock(&root->delalloc_lock); |
9e3e97f4 NB |
1629 | if (!list_empty(&inode->delalloc_inodes)) { |
1630 | list_del_init(&inode->delalloc_inodes); | |
eb73c1b7 | 1631 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
9e3e97f4 | 1632 | &inode->runtime_flags); |
eb73c1b7 MX |
1633 | root->nr_delalloc_inodes--; |
1634 | if (!root->nr_delalloc_inodes) { | |
0b246afa | 1635 | spin_lock(&fs_info->delalloc_root_lock); |
eb73c1b7 MX |
1636 | BUG_ON(list_empty(&root->delalloc_root)); |
1637 | list_del_init(&root->delalloc_root); | |
0b246afa | 1638 | spin_unlock(&fs_info->delalloc_root_lock); |
eb73c1b7 MX |
1639 | } |
1640 | } | |
1641 | spin_unlock(&root->delalloc_lock); | |
1642 | } | |
1643 | ||
d352ac68 CM |
1644 | /* |
1645 | * extent_io.c set_bit_hook, used to track delayed allocation | |
1646 | * bytes in this file, and to maintain the list of inodes that | |
1647 | * have pending delalloc work to be done. | |
1648 | */ | |
1bf85046 | 1649 | static void btrfs_set_bit_hook(struct inode *inode, |
9ee49a04 | 1650 | struct extent_state *state, unsigned *bits) |
291d673e | 1651 | { |
9ed74f2d | 1652 | |
0b246afa JM |
1653 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
1654 | ||
47059d93 WS |
1655 | if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) |
1656 | WARN_ON(1); | |
75eff68e CM |
1657 | /* |
1658 | * set_bit and clear bit hooks normally require _irqsave/restore | |
27160b6b | 1659 | * but in this case, we are only testing for the DELALLOC |
75eff68e CM |
1660 | * bit, which is only set or cleared with irqs on |
1661 | */ | |
0ca1f7ce | 1662 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
291d673e | 1663 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1664 | u64 len = state->end + 1 - state->start; |
70ddc553 | 1665 | bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); |
9ed74f2d | 1666 | |
9e0baf60 | 1667 | if (*bits & EXTENT_FIRST_DELALLOC) { |
0ca1f7ce | 1668 | *bits &= ~EXTENT_FIRST_DELALLOC; |
9e0baf60 JB |
1669 | } else { |
1670 | spin_lock(&BTRFS_I(inode)->lock); | |
1671 | BTRFS_I(inode)->outstanding_extents++; | |
1672 | spin_unlock(&BTRFS_I(inode)->lock); | |
1673 | } | |
287a0ab9 | 1674 | |
6a3891c5 | 1675 | /* For sanity tests */ |
0b246afa | 1676 | if (btrfs_is_testing(fs_info)) |
6a3891c5 JB |
1677 | return; |
1678 | ||
0b246afa JM |
1679 | __percpu_counter_add(&fs_info->delalloc_bytes, len, |
1680 | fs_info->delalloc_batch); | |
df0af1a5 | 1681 | spin_lock(&BTRFS_I(inode)->lock); |
0ca1f7ce | 1682 | BTRFS_I(inode)->delalloc_bytes += len; |
47059d93 WS |
1683 | if (*bits & EXTENT_DEFRAG) |
1684 | BTRFS_I(inode)->defrag_bytes += len; | |
df0af1a5 | 1685 | if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
eb73c1b7 MX |
1686 | &BTRFS_I(inode)->runtime_flags)) |
1687 | btrfs_add_delalloc_inodes(root, inode); | |
df0af1a5 | 1688 | spin_unlock(&BTRFS_I(inode)->lock); |
291d673e | 1689 | } |
291d673e CM |
1690 | } |
1691 | ||
d352ac68 CM |
1692 | /* |
1693 | * extent_io.c clear_bit_hook, see set_bit_hook for why | |
1694 | */ | |
6fc0ef68 | 1695 | static void btrfs_clear_bit_hook(struct btrfs_inode *inode, |
41074888 | 1696 | struct extent_state *state, |
9ee49a04 | 1697 | unsigned *bits) |
291d673e | 1698 | { |
6fc0ef68 | 1699 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
47059d93 | 1700 | u64 len = state->end + 1 - state->start; |
823bb20a | 1701 | u32 num_extents = count_max_extents(len); |
47059d93 | 1702 | |
6fc0ef68 | 1703 | spin_lock(&inode->lock); |
47059d93 | 1704 | if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) |
6fc0ef68 NB |
1705 | inode->defrag_bytes -= len; |
1706 | spin_unlock(&inode->lock); | |
47059d93 | 1707 | |
75eff68e CM |
1708 | /* |
1709 | * set_bit and clear bit hooks normally require _irqsave/restore | |
27160b6b | 1710 | * but in this case, we are only testing for the DELALLOC |
75eff68e CM |
1711 | * bit, which is only set or cleared with irqs on |
1712 | */ | |
0ca1f7ce | 1713 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
6fc0ef68 NB |
1714 | struct btrfs_root *root = inode->root; |
1715 | bool do_list = !btrfs_is_free_space_inode(inode); | |
bcbfce8a | 1716 | |
9e0baf60 | 1717 | if (*bits & EXTENT_FIRST_DELALLOC) { |
0ca1f7ce | 1718 | *bits &= ~EXTENT_FIRST_DELALLOC; |
9e0baf60 | 1719 | } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { |
6fc0ef68 NB |
1720 | spin_lock(&inode->lock); |
1721 | inode->outstanding_extents -= num_extents; | |
1722 | spin_unlock(&inode->lock); | |
9e0baf60 | 1723 | } |
0ca1f7ce | 1724 | |
b6d08f06 JB |
1725 | /* |
1726 | * We don't reserve metadata space for space cache inodes so we | |
1727 | * don't need to call dellalloc_release_metadata if there is an | |
1728 | * error. | |
1729 | */ | |
1730 | if (*bits & EXTENT_DO_ACCOUNTING && | |
0b246afa | 1731 | root != fs_info->tree_root) |
6fc0ef68 | 1732 | btrfs_delalloc_release_metadata(inode, len); |
0ca1f7ce | 1733 | |
6a3891c5 | 1734 | /* For sanity tests. */ |
0b246afa | 1735 | if (btrfs_is_testing(fs_info)) |
6a3891c5 JB |
1736 | return; |
1737 | ||
0cb59c99 | 1738 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
18513091 WX |
1739 | && do_list && !(state->state & EXTENT_NORESERVE) |
1740 | && (*bits & (EXTENT_DO_ACCOUNTING | | |
1741 | EXTENT_CLEAR_DATA_RESV))) | |
6fc0ef68 NB |
1742 | btrfs_free_reserved_data_space_noquota( |
1743 | &inode->vfs_inode, | |
51773bec | 1744 | state->start, len); |
9ed74f2d | 1745 | |
0b246afa JM |
1746 | __percpu_counter_add(&fs_info->delalloc_bytes, -len, |
1747 | fs_info->delalloc_batch); | |
6fc0ef68 NB |
1748 | spin_lock(&inode->lock); |
1749 | inode->delalloc_bytes -= len; | |
1750 | if (do_list && inode->delalloc_bytes == 0 && | |
df0af1a5 | 1751 | test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
9e3e97f4 NB |
1752 | &inode->runtime_flags)) |
1753 | btrfs_del_delalloc_inode(root, inode); | |
6fc0ef68 | 1754 | spin_unlock(&inode->lock); |
291d673e | 1755 | } |
291d673e CM |
1756 | } |
1757 | ||
d352ac68 CM |
1758 | /* |
1759 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | |
1760 | * we don't create bios that span stripes or chunks | |
6f034ece LB |
1761 | * |
1762 | * return 1 if page cannot be merged to bio | |
1763 | * return 0 if page can be merged to bio | |
1764 | * return error otherwise | |
d352ac68 | 1765 | */ |
81a75f67 | 1766 | int btrfs_merge_bio_hook(struct page *page, unsigned long offset, |
c8b97818 CM |
1767 | size_t size, struct bio *bio, |
1768 | unsigned long bio_flags) | |
239b14b3 | 1769 | { |
0b246afa JM |
1770 | struct inode *inode = page->mapping->host; |
1771 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
4f024f37 | 1772 | u64 logical = (u64)bio->bi_iter.bi_sector << 9; |
239b14b3 CM |
1773 | u64 length = 0; |
1774 | u64 map_length; | |
239b14b3 CM |
1775 | int ret; |
1776 | ||
771ed689 CM |
1777 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
1778 | return 0; | |
1779 | ||
4f024f37 | 1780 | length = bio->bi_iter.bi_size; |
239b14b3 | 1781 | map_length = length; |
0b246afa JM |
1782 | ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, |
1783 | NULL, 0); | |
6f034ece LB |
1784 | if (ret < 0) |
1785 | return ret; | |
d397712b | 1786 | if (map_length < length + size) |
239b14b3 | 1787 | return 1; |
3444a972 | 1788 | return 0; |
239b14b3 CM |
1789 | } |
1790 | ||
d352ac68 CM |
1791 | /* |
1792 | * in order to insert checksums into the metadata in large chunks, | |
1793 | * we wait until bio submission time. All the pages in the bio are | |
1794 | * checksummed and sums are attached onto the ordered extent record. | |
1795 | * | |
1796 | * At IO completion time the cums attached on the ordered extent record | |
1797 | * are inserted into the btree | |
1798 | */ | |
81a75f67 MC |
1799 | static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, |
1800 | int mirror_num, unsigned long bio_flags, | |
eaf25d93 | 1801 | u64 bio_offset) |
065631f6 | 1802 | { |
065631f6 | 1803 | int ret = 0; |
e015640f | 1804 | |
2ff7e61e | 1805 | ret = btrfs_csum_one_bio(inode, bio, 0, 0); |
79787eaa | 1806 | BUG_ON(ret); /* -ENOMEM */ |
4a69a410 CM |
1807 | return 0; |
1808 | } | |
e015640f | 1809 | |
4a69a410 CM |
1810 | /* |
1811 | * in order to insert checksums into the metadata in large chunks, | |
1812 | * we wait until bio submission time. All the pages in the bio are | |
1813 | * checksummed and sums are attached onto the ordered extent record. | |
1814 | * | |
1815 | * At IO completion time the cums attached on the ordered extent record | |
1816 | * are inserted into the btree | |
1817 | */ | |
81a75f67 | 1818 | static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, |
eaf25d93 CM |
1819 | int mirror_num, unsigned long bio_flags, |
1820 | u64 bio_offset) | |
4a69a410 | 1821 | { |
2ff7e61e | 1822 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
61891923 SB |
1823 | int ret; |
1824 | ||
2ff7e61e | 1825 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); |
4246a0b6 CH |
1826 | if (ret) { |
1827 | bio->bi_error = ret; | |
1828 | bio_endio(bio); | |
1829 | } | |
61891923 | 1830 | return ret; |
44b8bd7e CM |
1831 | } |
1832 | ||
d352ac68 | 1833 | /* |
cad321ad CM |
1834 | * extent_io.c submission hook. This does the right thing for csum calculation |
1835 | * on write, or reading the csums from the tree before a read | |
d352ac68 | 1836 | */ |
81a75f67 | 1837 | static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, |
eaf25d93 CM |
1838 | int mirror_num, unsigned long bio_flags, |
1839 | u64 bio_offset) | |
44b8bd7e | 1840 | { |
0b246afa | 1841 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
44b8bd7e | 1842 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0d51e28a | 1843 | enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; |
44b8bd7e | 1844 | int ret = 0; |
19b9bdb0 | 1845 | int skip_sum; |
b812ce28 | 1846 | int async = !atomic_read(&BTRFS_I(inode)->sync_writers); |
44b8bd7e | 1847 | |
6cbff00f | 1848 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
cad321ad | 1849 | |
70ddc553 | 1850 | if (btrfs_is_free_space_inode(BTRFS_I(inode))) |
0d51e28a | 1851 | metadata = BTRFS_WQ_ENDIO_FREE_SPACE; |
0417341e | 1852 | |
37226b21 | 1853 | if (bio_op(bio) != REQ_OP_WRITE) { |
0b246afa | 1854 | ret = btrfs_bio_wq_end_io(fs_info, bio, metadata); |
5fd02043 | 1855 | if (ret) |
61891923 | 1856 | goto out; |
5fd02043 | 1857 | |
d20f7043 | 1858 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
61891923 SB |
1859 | ret = btrfs_submit_compressed_read(inode, bio, |
1860 | mirror_num, | |
1861 | bio_flags); | |
1862 | goto out; | |
c2db1073 | 1863 | } else if (!skip_sum) { |
2ff7e61e | 1864 | ret = btrfs_lookup_bio_sums(inode, bio, NULL); |
c2db1073 | 1865 | if (ret) |
61891923 | 1866 | goto out; |
c2db1073 | 1867 | } |
4d1b5fb4 | 1868 | goto mapit; |
b812ce28 | 1869 | } else if (async && !skip_sum) { |
17d217fe YZ |
1870 | /* csum items have already been cloned */ |
1871 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | |
1872 | goto mapit; | |
19b9bdb0 | 1873 | /* we're doing a write, do the async checksumming */ |
0b246afa JM |
1874 | ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, |
1875 | bio_flags, bio_offset, | |
1876 | __btrfs_submit_bio_start, | |
1877 | __btrfs_submit_bio_done); | |
61891923 | 1878 | goto out; |
b812ce28 | 1879 | } else if (!skip_sum) { |
2ff7e61e | 1880 | ret = btrfs_csum_one_bio(inode, bio, 0, 0); |
b812ce28 JB |
1881 | if (ret) |
1882 | goto out; | |
19b9bdb0 CM |
1883 | } |
1884 | ||
0b86a832 | 1885 | mapit: |
2ff7e61e | 1886 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
61891923 SB |
1887 | |
1888 | out: | |
4246a0b6 CH |
1889 | if (ret < 0) { |
1890 | bio->bi_error = ret; | |
1891 | bio_endio(bio); | |
1892 | } | |
61891923 | 1893 | return ret; |
065631f6 | 1894 | } |
6885f308 | 1895 | |
d352ac68 CM |
1896 | /* |
1897 | * given a list of ordered sums record them in the inode. This happens | |
1898 | * at IO completion time based on sums calculated at bio submission time. | |
1899 | */ | |
ba1da2f4 | 1900 | static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
df9f628e | 1901 | struct inode *inode, struct list_head *list) |
e6dcd2dc | 1902 | { |
e6dcd2dc CM |
1903 | struct btrfs_ordered_sum *sum; |
1904 | ||
c6e30871 | 1905 | list_for_each_entry(sum, list, list) { |
39847c4d | 1906 | trans->adding_csums = 1; |
d20f7043 CM |
1907 | btrfs_csum_file_blocks(trans, |
1908 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | |
39847c4d | 1909 | trans->adding_csums = 0; |
e6dcd2dc CM |
1910 | } |
1911 | return 0; | |
1912 | } | |
1913 | ||
2ac55d41 | 1914 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
ba8b04c1 | 1915 | struct extent_state **cached_state, int dedupe) |
ea8c2819 | 1916 | { |
09cbfeaf | 1917 | WARN_ON((end & (PAGE_SIZE - 1)) == 0); |
ea8c2819 | 1918 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
7cd8c752 | 1919 | cached_state); |
ea8c2819 CM |
1920 | } |
1921 | ||
d352ac68 | 1922 | /* see btrfs_writepage_start_hook for details on why this is required */ |
247e743c CM |
1923 | struct btrfs_writepage_fixup { |
1924 | struct page *page; | |
1925 | struct btrfs_work work; | |
1926 | }; | |
1927 | ||
b2950863 | 1928 | static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
247e743c CM |
1929 | { |
1930 | struct btrfs_writepage_fixup *fixup; | |
1931 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 1932 | struct extent_state *cached_state = NULL; |
247e743c CM |
1933 | struct page *page; |
1934 | struct inode *inode; | |
1935 | u64 page_start; | |
1936 | u64 page_end; | |
87826df0 | 1937 | int ret; |
247e743c CM |
1938 | |
1939 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | |
1940 | page = fixup->page; | |
4a096752 | 1941 | again: |
247e743c CM |
1942 | lock_page(page); |
1943 | if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { | |
1944 | ClearPageChecked(page); | |
1945 | goto out_page; | |
1946 | } | |
1947 | ||
1948 | inode = page->mapping->host; | |
1949 | page_start = page_offset(page); | |
09cbfeaf | 1950 | page_end = page_offset(page) + PAGE_SIZE - 1; |
247e743c | 1951 | |
ff13db41 | 1952 | lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, |
d0082371 | 1953 | &cached_state); |
4a096752 CM |
1954 | |
1955 | /* already ordered? We're done */ | |
8b62b72b | 1956 | if (PagePrivate2(page)) |
247e743c | 1957 | goto out; |
4a096752 | 1958 | |
a776c6fa | 1959 | ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, |
09cbfeaf | 1960 | PAGE_SIZE); |
4a096752 | 1961 | if (ordered) { |
2ac55d41 JB |
1962 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
1963 | page_end, &cached_state, GFP_NOFS); | |
4a096752 CM |
1964 | unlock_page(page); |
1965 | btrfs_start_ordered_extent(inode, ordered, 1); | |
87826df0 | 1966 | btrfs_put_ordered_extent(ordered); |
4a096752 CM |
1967 | goto again; |
1968 | } | |
247e743c | 1969 | |
7cf5b976 | 1970 | ret = btrfs_delalloc_reserve_space(inode, page_start, |
09cbfeaf | 1971 | PAGE_SIZE); |
87826df0 JM |
1972 | if (ret) { |
1973 | mapping_set_error(page->mapping, ret); | |
1974 | end_extent_writepage(page, ret, page_start, page_end); | |
1975 | ClearPageChecked(page); | |
1976 | goto out; | |
1977 | } | |
1978 | ||
ba8b04c1 QW |
1979 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state, |
1980 | 0); | |
247e743c | 1981 | ClearPageChecked(page); |
87826df0 | 1982 | set_page_dirty(page); |
247e743c | 1983 | out: |
2ac55d41 JB |
1984 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1985 | &cached_state, GFP_NOFS); | |
247e743c CM |
1986 | out_page: |
1987 | unlock_page(page); | |
09cbfeaf | 1988 | put_page(page); |
b897abec | 1989 | kfree(fixup); |
247e743c CM |
1990 | } |
1991 | ||
1992 | /* | |
1993 | * There are a few paths in the higher layers of the kernel that directly | |
1994 | * set the page dirty bit without asking the filesystem if it is a | |
1995 | * good idea. This causes problems because we want to make sure COW | |
1996 | * properly happens and the data=ordered rules are followed. | |
1997 | * | |
c8b97818 | 1998 | * In our case any range that doesn't have the ORDERED bit set |
247e743c CM |
1999 | * hasn't been properly setup for IO. We kick off an async process |
2000 | * to fix it up. The async helper will wait for ordered extents, set | |
2001 | * the delalloc bit and make it safe to write the page. | |
2002 | */ | |
b2950863 | 2003 | static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
247e743c CM |
2004 | { |
2005 | struct inode *inode = page->mapping->host; | |
0b246afa | 2006 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
247e743c | 2007 | struct btrfs_writepage_fixup *fixup; |
247e743c | 2008 | |
8b62b72b CM |
2009 | /* this page is properly in the ordered list */ |
2010 | if (TestClearPagePrivate2(page)) | |
247e743c CM |
2011 | return 0; |
2012 | ||
2013 | if (PageChecked(page)) | |
2014 | return -EAGAIN; | |
2015 | ||
2016 | fixup = kzalloc(sizeof(*fixup), GFP_NOFS); | |
2017 | if (!fixup) | |
2018 | return -EAGAIN; | |
f421950f | 2019 | |
247e743c | 2020 | SetPageChecked(page); |
09cbfeaf | 2021 | get_page(page); |
9e0af237 LB |
2022 | btrfs_init_work(&fixup->work, btrfs_fixup_helper, |
2023 | btrfs_writepage_fixup_worker, NULL, NULL); | |
247e743c | 2024 | fixup->page = page; |
0b246afa | 2025 | btrfs_queue_work(fs_info->fixup_workers, &fixup->work); |
87826df0 | 2026 | return -EBUSY; |
247e743c CM |
2027 | } |
2028 | ||
d899e052 YZ |
2029 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
2030 | struct inode *inode, u64 file_pos, | |
2031 | u64 disk_bytenr, u64 disk_num_bytes, | |
2032 | u64 num_bytes, u64 ram_bytes, | |
2033 | u8 compression, u8 encryption, | |
2034 | u16 other_encoding, int extent_type) | |
2035 | { | |
2036 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2037 | struct btrfs_file_extent_item *fi; | |
2038 | struct btrfs_path *path; | |
2039 | struct extent_buffer *leaf; | |
2040 | struct btrfs_key ins; | |
1acae57b | 2041 | int extent_inserted = 0; |
d899e052 YZ |
2042 | int ret; |
2043 | ||
2044 | path = btrfs_alloc_path(); | |
d8926bb3 MF |
2045 | if (!path) |
2046 | return -ENOMEM; | |
d899e052 | 2047 | |
a1ed835e CM |
2048 | /* |
2049 | * we may be replacing one extent in the tree with another. | |
2050 | * The new extent is pinned in the extent map, and we don't want | |
2051 | * to drop it from the cache until it is completely in the btree. | |
2052 | * | |
2053 | * So, tell btrfs_drop_extents to leave this extent in the cache. | |
2054 | * the caller is expected to unpin it and allow it to be merged | |
2055 | * with the others. | |
2056 | */ | |
1acae57b FDBM |
2057 | ret = __btrfs_drop_extents(trans, root, inode, path, file_pos, |
2058 | file_pos + num_bytes, NULL, 0, | |
2059 | 1, sizeof(*fi), &extent_inserted); | |
79787eaa JM |
2060 | if (ret) |
2061 | goto out; | |
d899e052 | 2062 | |
1acae57b | 2063 | if (!extent_inserted) { |
4a0cc7ca | 2064 | ins.objectid = btrfs_ino(BTRFS_I(inode)); |
1acae57b FDBM |
2065 | ins.offset = file_pos; |
2066 | ins.type = BTRFS_EXTENT_DATA_KEY; | |
2067 | ||
2068 | path->leave_spinning = 1; | |
2069 | ret = btrfs_insert_empty_item(trans, root, path, &ins, | |
2070 | sizeof(*fi)); | |
2071 | if (ret) | |
2072 | goto out; | |
2073 | } | |
d899e052 YZ |
2074 | leaf = path->nodes[0]; |
2075 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
2076 | struct btrfs_file_extent_item); | |
2077 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | |
2078 | btrfs_set_file_extent_type(leaf, fi, extent_type); | |
2079 | btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); | |
2080 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); | |
2081 | btrfs_set_file_extent_offset(leaf, fi, 0); | |
2082 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | |
2083 | btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); | |
2084 | btrfs_set_file_extent_compression(leaf, fi, compression); | |
2085 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | |
2086 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | |
b9473439 | 2087 | |
d899e052 | 2088 | btrfs_mark_buffer_dirty(leaf); |
ce195332 | 2089 | btrfs_release_path(path); |
d899e052 YZ |
2090 | |
2091 | inode_add_bytes(inode, num_bytes); | |
d899e052 YZ |
2092 | |
2093 | ins.objectid = disk_bytenr; | |
2094 | ins.offset = disk_num_bytes; | |
2095 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
2ff7e61e | 2096 | ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid, |
f85b7379 | 2097 | btrfs_ino(BTRFS_I(inode)), file_pos, ram_bytes, &ins); |
297d750b | 2098 | /* |
5846a3c2 QW |
2099 | * Release the reserved range from inode dirty range map, as it is |
2100 | * already moved into delayed_ref_head | |
297d750b QW |
2101 | */ |
2102 | btrfs_qgroup_release_data(inode, file_pos, ram_bytes); | |
79787eaa | 2103 | out: |
d899e052 | 2104 | btrfs_free_path(path); |
b9473439 | 2105 | |
79787eaa | 2106 | return ret; |
d899e052 YZ |
2107 | } |
2108 | ||
38c227d8 LB |
2109 | /* snapshot-aware defrag */ |
2110 | struct sa_defrag_extent_backref { | |
2111 | struct rb_node node; | |
2112 | struct old_sa_defrag_extent *old; | |
2113 | u64 root_id; | |
2114 | u64 inum; | |
2115 | u64 file_pos; | |
2116 | u64 extent_offset; | |
2117 | u64 num_bytes; | |
2118 | u64 generation; | |
2119 | }; | |
2120 | ||
2121 | struct old_sa_defrag_extent { | |
2122 | struct list_head list; | |
2123 | struct new_sa_defrag_extent *new; | |
2124 | ||
2125 | u64 extent_offset; | |
2126 | u64 bytenr; | |
2127 | u64 offset; | |
2128 | u64 len; | |
2129 | int count; | |
2130 | }; | |
2131 | ||
2132 | struct new_sa_defrag_extent { | |
2133 | struct rb_root root; | |
2134 | struct list_head head; | |
2135 | struct btrfs_path *path; | |
2136 | struct inode *inode; | |
2137 | u64 file_pos; | |
2138 | u64 len; | |
2139 | u64 bytenr; | |
2140 | u64 disk_len; | |
2141 | u8 compress_type; | |
2142 | }; | |
2143 | ||
2144 | static int backref_comp(struct sa_defrag_extent_backref *b1, | |
2145 | struct sa_defrag_extent_backref *b2) | |
2146 | { | |
2147 | if (b1->root_id < b2->root_id) | |
2148 | return -1; | |
2149 | else if (b1->root_id > b2->root_id) | |
2150 | return 1; | |
2151 | ||
2152 | if (b1->inum < b2->inum) | |
2153 | return -1; | |
2154 | else if (b1->inum > b2->inum) | |
2155 | return 1; | |
2156 | ||
2157 | if (b1->file_pos < b2->file_pos) | |
2158 | return -1; | |
2159 | else if (b1->file_pos > b2->file_pos) | |
2160 | return 1; | |
2161 | ||
2162 | /* | |
2163 | * [------------------------------] ===> (a range of space) | |
2164 | * |<--->| |<---->| =============> (fs/file tree A) | |
2165 | * |<---------------------------->| ===> (fs/file tree B) | |
2166 | * | |
2167 | * A range of space can refer to two file extents in one tree while | |
2168 | * refer to only one file extent in another tree. | |
2169 | * | |
2170 | * So we may process a disk offset more than one time(two extents in A) | |
2171 | * and locate at the same extent(one extent in B), then insert two same | |
2172 | * backrefs(both refer to the extent in B). | |
2173 | */ | |
2174 | return 0; | |
2175 | } | |
2176 | ||
2177 | static void backref_insert(struct rb_root *root, | |
2178 | struct sa_defrag_extent_backref *backref) | |
2179 | { | |
2180 | struct rb_node **p = &root->rb_node; | |
2181 | struct rb_node *parent = NULL; | |
2182 | struct sa_defrag_extent_backref *entry; | |
2183 | int ret; | |
2184 | ||
2185 | while (*p) { | |
2186 | parent = *p; | |
2187 | entry = rb_entry(parent, struct sa_defrag_extent_backref, node); | |
2188 | ||
2189 | ret = backref_comp(backref, entry); | |
2190 | if (ret < 0) | |
2191 | p = &(*p)->rb_left; | |
2192 | else | |
2193 | p = &(*p)->rb_right; | |
2194 | } | |
2195 | ||
2196 | rb_link_node(&backref->node, parent, p); | |
2197 | rb_insert_color(&backref->node, root); | |
2198 | } | |
2199 | ||
2200 | /* | |
2201 | * Note the backref might has changed, and in this case we just return 0. | |
2202 | */ | |
2203 | static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, | |
2204 | void *ctx) | |
2205 | { | |
2206 | struct btrfs_file_extent_item *extent; | |
38c227d8 LB |
2207 | struct old_sa_defrag_extent *old = ctx; |
2208 | struct new_sa_defrag_extent *new = old->new; | |
2209 | struct btrfs_path *path = new->path; | |
2210 | struct btrfs_key key; | |
2211 | struct btrfs_root *root; | |
2212 | struct sa_defrag_extent_backref *backref; | |
2213 | struct extent_buffer *leaf; | |
2214 | struct inode *inode = new->inode; | |
0b246afa | 2215 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
38c227d8 LB |
2216 | int slot; |
2217 | int ret; | |
2218 | u64 extent_offset; | |
2219 | u64 num_bytes; | |
2220 | ||
2221 | if (BTRFS_I(inode)->root->root_key.objectid == root_id && | |
4a0cc7ca | 2222 | inum == btrfs_ino(BTRFS_I(inode))) |
38c227d8 LB |
2223 | return 0; |
2224 | ||
2225 | key.objectid = root_id; | |
2226 | key.type = BTRFS_ROOT_ITEM_KEY; | |
2227 | key.offset = (u64)-1; | |
2228 | ||
38c227d8 LB |
2229 | root = btrfs_read_fs_root_no_name(fs_info, &key); |
2230 | if (IS_ERR(root)) { | |
2231 | if (PTR_ERR(root) == -ENOENT) | |
2232 | return 0; | |
2233 | WARN_ON(1); | |
ab8d0fc4 | 2234 | btrfs_debug(fs_info, "inum=%llu, offset=%llu, root_id=%llu", |
38c227d8 LB |
2235 | inum, offset, root_id); |
2236 | return PTR_ERR(root); | |
2237 | } | |
2238 | ||
2239 | key.objectid = inum; | |
2240 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2241 | if (offset > (u64)-1 << 32) | |
2242 | key.offset = 0; | |
2243 | else | |
2244 | key.offset = offset; | |
2245 | ||
2246 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
fae7f21c | 2247 | if (WARN_ON(ret < 0)) |
38c227d8 | 2248 | return ret; |
50f1319c | 2249 | ret = 0; |
38c227d8 LB |
2250 | |
2251 | while (1) { | |
2252 | cond_resched(); | |
2253 | ||
2254 | leaf = path->nodes[0]; | |
2255 | slot = path->slots[0]; | |
2256 | ||
2257 | if (slot >= btrfs_header_nritems(leaf)) { | |
2258 | ret = btrfs_next_leaf(root, path); | |
2259 | if (ret < 0) { | |
2260 | goto out; | |
2261 | } else if (ret > 0) { | |
2262 | ret = 0; | |
2263 | goto out; | |
2264 | } | |
2265 | continue; | |
2266 | } | |
2267 | ||
2268 | path->slots[0]++; | |
2269 | ||
2270 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
2271 | ||
2272 | if (key.objectid > inum) | |
2273 | goto out; | |
2274 | ||
2275 | if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) | |
2276 | continue; | |
2277 | ||
2278 | extent = btrfs_item_ptr(leaf, slot, | |
2279 | struct btrfs_file_extent_item); | |
2280 | ||
2281 | if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) | |
2282 | continue; | |
2283 | ||
e68afa49 LB |
2284 | /* |
2285 | * 'offset' refers to the exact key.offset, | |
2286 | * NOT the 'offset' field in btrfs_extent_data_ref, ie. | |
2287 | * (key.offset - extent_offset). | |
2288 | */ | |
2289 | if (key.offset != offset) | |
38c227d8 LB |
2290 | continue; |
2291 | ||
e68afa49 | 2292 | extent_offset = btrfs_file_extent_offset(leaf, extent); |
38c227d8 | 2293 | num_bytes = btrfs_file_extent_num_bytes(leaf, extent); |
e68afa49 | 2294 | |
38c227d8 LB |
2295 | if (extent_offset >= old->extent_offset + old->offset + |
2296 | old->len || extent_offset + num_bytes <= | |
2297 | old->extent_offset + old->offset) | |
2298 | continue; | |
38c227d8 LB |
2299 | break; |
2300 | } | |
2301 | ||
2302 | backref = kmalloc(sizeof(*backref), GFP_NOFS); | |
2303 | if (!backref) { | |
2304 | ret = -ENOENT; | |
2305 | goto out; | |
2306 | } | |
2307 | ||
2308 | backref->root_id = root_id; | |
2309 | backref->inum = inum; | |
e68afa49 | 2310 | backref->file_pos = offset; |
38c227d8 LB |
2311 | backref->num_bytes = num_bytes; |
2312 | backref->extent_offset = extent_offset; | |
2313 | backref->generation = btrfs_file_extent_generation(leaf, extent); | |
2314 | backref->old = old; | |
2315 | backref_insert(&new->root, backref); | |
2316 | old->count++; | |
2317 | out: | |
2318 | btrfs_release_path(path); | |
2319 | WARN_ON(ret); | |
2320 | return ret; | |
2321 | } | |
2322 | ||
2323 | static noinline bool record_extent_backrefs(struct btrfs_path *path, | |
2324 | struct new_sa_defrag_extent *new) | |
2325 | { | |
0b246afa | 2326 | struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
38c227d8 LB |
2327 | struct old_sa_defrag_extent *old, *tmp; |
2328 | int ret; | |
2329 | ||
2330 | new->path = path; | |
2331 | ||
2332 | list_for_each_entry_safe(old, tmp, &new->head, list) { | |
e68afa49 LB |
2333 | ret = iterate_inodes_from_logical(old->bytenr + |
2334 | old->extent_offset, fs_info, | |
38c227d8 LB |
2335 | path, record_one_backref, |
2336 | old); | |
4724b106 JB |
2337 | if (ret < 0 && ret != -ENOENT) |
2338 | return false; | |
38c227d8 LB |
2339 | |
2340 | /* no backref to be processed for this extent */ | |
2341 | if (!old->count) { | |
2342 | list_del(&old->list); | |
2343 | kfree(old); | |
2344 | } | |
2345 | } | |
2346 | ||
2347 | if (list_empty(&new->head)) | |
2348 | return false; | |
2349 | ||
2350 | return true; | |
2351 | } | |
2352 | ||
2353 | static int relink_is_mergable(struct extent_buffer *leaf, | |
2354 | struct btrfs_file_extent_item *fi, | |
116e0024 | 2355 | struct new_sa_defrag_extent *new) |
38c227d8 | 2356 | { |
116e0024 | 2357 | if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) |
38c227d8 LB |
2358 | return 0; |
2359 | ||
2360 | if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) | |
2361 | return 0; | |
2362 | ||
116e0024 LB |
2363 | if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) |
2364 | return 0; | |
2365 | ||
2366 | if (btrfs_file_extent_encryption(leaf, fi) || | |
38c227d8 LB |
2367 | btrfs_file_extent_other_encoding(leaf, fi)) |
2368 | return 0; | |
2369 | ||
2370 | return 1; | |
2371 | } | |
2372 | ||
2373 | /* | |
2374 | * Note the backref might has changed, and in this case we just return 0. | |
2375 | */ | |
2376 | static noinline int relink_extent_backref(struct btrfs_path *path, | |
2377 | struct sa_defrag_extent_backref *prev, | |
2378 | struct sa_defrag_extent_backref *backref) | |
2379 | { | |
2380 | struct btrfs_file_extent_item *extent; | |
2381 | struct btrfs_file_extent_item *item; | |
2382 | struct btrfs_ordered_extent *ordered; | |
2383 | struct btrfs_trans_handle *trans; | |
38c227d8 LB |
2384 | struct btrfs_root *root; |
2385 | struct btrfs_key key; | |
2386 | struct extent_buffer *leaf; | |
2387 | struct old_sa_defrag_extent *old = backref->old; | |
2388 | struct new_sa_defrag_extent *new = old->new; | |
0b246afa | 2389 | struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
38c227d8 LB |
2390 | struct inode *inode; |
2391 | struct extent_state *cached = NULL; | |
2392 | int ret = 0; | |
2393 | u64 start; | |
2394 | u64 len; | |
2395 | u64 lock_start; | |
2396 | u64 lock_end; | |
2397 | bool merge = false; | |
2398 | int index; | |
2399 | ||
2400 | if (prev && prev->root_id == backref->root_id && | |
2401 | prev->inum == backref->inum && | |
2402 | prev->file_pos + prev->num_bytes == backref->file_pos) | |
2403 | merge = true; | |
2404 | ||
2405 | /* step 1: get root */ | |
2406 | key.objectid = backref->root_id; | |
2407 | key.type = BTRFS_ROOT_ITEM_KEY; | |
2408 | key.offset = (u64)-1; | |
2409 | ||
38c227d8 LB |
2410 | index = srcu_read_lock(&fs_info->subvol_srcu); |
2411 | ||
2412 | root = btrfs_read_fs_root_no_name(fs_info, &key); | |
2413 | if (IS_ERR(root)) { | |
2414 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2415 | if (PTR_ERR(root) == -ENOENT) | |
2416 | return 0; | |
2417 | return PTR_ERR(root); | |
2418 | } | |
38c227d8 | 2419 | |
bcbba5e6 WS |
2420 | if (btrfs_root_readonly(root)) { |
2421 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2422 | return 0; | |
2423 | } | |
2424 | ||
38c227d8 LB |
2425 | /* step 2: get inode */ |
2426 | key.objectid = backref->inum; | |
2427 | key.type = BTRFS_INODE_ITEM_KEY; | |
2428 | key.offset = 0; | |
2429 | ||
2430 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); | |
2431 | if (IS_ERR(inode)) { | |
2432 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2433 | return 0; | |
2434 | } | |
2435 | ||
2436 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2437 | ||
2438 | /* step 3: relink backref */ | |
2439 | lock_start = backref->file_pos; | |
2440 | lock_end = backref->file_pos + backref->num_bytes - 1; | |
2441 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, | |
ff13db41 | 2442 | &cached); |
38c227d8 LB |
2443 | |
2444 | ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); | |
2445 | if (ordered) { | |
2446 | btrfs_put_ordered_extent(ordered); | |
2447 | goto out_unlock; | |
2448 | } | |
2449 | ||
2450 | trans = btrfs_join_transaction(root); | |
2451 | if (IS_ERR(trans)) { | |
2452 | ret = PTR_ERR(trans); | |
2453 | goto out_unlock; | |
2454 | } | |
2455 | ||
2456 | key.objectid = backref->inum; | |
2457 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2458 | key.offset = backref->file_pos; | |
2459 | ||
2460 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2461 | if (ret < 0) { | |
2462 | goto out_free_path; | |
2463 | } else if (ret > 0) { | |
2464 | ret = 0; | |
2465 | goto out_free_path; | |
2466 | } | |
2467 | ||
2468 | extent = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
2469 | struct btrfs_file_extent_item); | |
2470 | ||
2471 | if (btrfs_file_extent_generation(path->nodes[0], extent) != | |
2472 | backref->generation) | |
2473 | goto out_free_path; | |
2474 | ||
2475 | btrfs_release_path(path); | |
2476 | ||
2477 | start = backref->file_pos; | |
2478 | if (backref->extent_offset < old->extent_offset + old->offset) | |
2479 | start += old->extent_offset + old->offset - | |
2480 | backref->extent_offset; | |
2481 | ||
2482 | len = min(backref->extent_offset + backref->num_bytes, | |
2483 | old->extent_offset + old->offset + old->len); | |
2484 | len -= max(backref->extent_offset, old->extent_offset + old->offset); | |
2485 | ||
2486 | ret = btrfs_drop_extents(trans, root, inode, start, | |
2487 | start + len, 1); | |
2488 | if (ret) | |
2489 | goto out_free_path; | |
2490 | again: | |
4a0cc7ca | 2491 | key.objectid = btrfs_ino(BTRFS_I(inode)); |
38c227d8 LB |
2492 | key.type = BTRFS_EXTENT_DATA_KEY; |
2493 | key.offset = start; | |
2494 | ||
a09a0a70 | 2495 | path->leave_spinning = 1; |
38c227d8 LB |
2496 | if (merge) { |
2497 | struct btrfs_file_extent_item *fi; | |
2498 | u64 extent_len; | |
2499 | struct btrfs_key found_key; | |
2500 | ||
3c9665df | 2501 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
38c227d8 LB |
2502 | if (ret < 0) |
2503 | goto out_free_path; | |
2504 | ||
2505 | path->slots[0]--; | |
2506 | leaf = path->nodes[0]; | |
2507 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
2508 | ||
2509 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
2510 | struct btrfs_file_extent_item); | |
2511 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); | |
2512 | ||
116e0024 LB |
2513 | if (extent_len + found_key.offset == start && |
2514 | relink_is_mergable(leaf, fi, new)) { | |
38c227d8 LB |
2515 | btrfs_set_file_extent_num_bytes(leaf, fi, |
2516 | extent_len + len); | |
2517 | btrfs_mark_buffer_dirty(leaf); | |
2518 | inode_add_bytes(inode, len); | |
2519 | ||
2520 | ret = 1; | |
2521 | goto out_free_path; | |
2522 | } else { | |
2523 | merge = false; | |
2524 | btrfs_release_path(path); | |
2525 | goto again; | |
2526 | } | |
2527 | } | |
2528 | ||
2529 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
2530 | sizeof(*extent)); | |
2531 | if (ret) { | |
66642832 | 2532 | btrfs_abort_transaction(trans, ret); |
38c227d8 LB |
2533 | goto out_free_path; |
2534 | } | |
2535 | ||
2536 | leaf = path->nodes[0]; | |
2537 | item = btrfs_item_ptr(leaf, path->slots[0], | |
2538 | struct btrfs_file_extent_item); | |
2539 | btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); | |
2540 | btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); | |
2541 | btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); | |
2542 | btrfs_set_file_extent_num_bytes(leaf, item, len); | |
2543 | btrfs_set_file_extent_ram_bytes(leaf, item, new->len); | |
2544 | btrfs_set_file_extent_generation(leaf, item, trans->transid); | |
2545 | btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); | |
2546 | btrfs_set_file_extent_compression(leaf, item, new->compress_type); | |
2547 | btrfs_set_file_extent_encryption(leaf, item, 0); | |
2548 | btrfs_set_file_extent_other_encoding(leaf, item, 0); | |
2549 | ||
2550 | btrfs_mark_buffer_dirty(leaf); | |
2551 | inode_add_bytes(inode, len); | |
a09a0a70 | 2552 | btrfs_release_path(path); |
38c227d8 | 2553 | |
2ff7e61e | 2554 | ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr, |
38c227d8 LB |
2555 | new->disk_len, 0, |
2556 | backref->root_id, backref->inum, | |
b06c4bf5 | 2557 | new->file_pos); /* start - extent_offset */ |
38c227d8 | 2558 | if (ret) { |
66642832 | 2559 | btrfs_abort_transaction(trans, ret); |
38c227d8 LB |
2560 | goto out_free_path; |
2561 | } | |
2562 | ||
2563 | ret = 1; | |
2564 | out_free_path: | |
2565 | btrfs_release_path(path); | |
a09a0a70 | 2566 | path->leave_spinning = 0; |
3a45bb20 | 2567 | btrfs_end_transaction(trans); |
38c227d8 LB |
2568 | out_unlock: |
2569 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, | |
2570 | &cached, GFP_NOFS); | |
2571 | iput(inode); | |
2572 | return ret; | |
2573 | } | |
2574 | ||
6f519564 LB |
2575 | static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) |
2576 | { | |
2577 | struct old_sa_defrag_extent *old, *tmp; | |
2578 | ||
2579 | if (!new) | |
2580 | return; | |
2581 | ||
2582 | list_for_each_entry_safe(old, tmp, &new->head, list) { | |
6f519564 LB |
2583 | kfree(old); |
2584 | } | |
2585 | kfree(new); | |
2586 | } | |
2587 | ||
38c227d8 LB |
2588 | static void relink_file_extents(struct new_sa_defrag_extent *new) |
2589 | { | |
0b246afa | 2590 | struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb); |
38c227d8 | 2591 | struct btrfs_path *path; |
38c227d8 LB |
2592 | struct sa_defrag_extent_backref *backref; |
2593 | struct sa_defrag_extent_backref *prev = NULL; | |
2594 | struct inode *inode; | |
2595 | struct btrfs_root *root; | |
2596 | struct rb_node *node; | |
2597 | int ret; | |
2598 | ||
2599 | inode = new->inode; | |
2600 | root = BTRFS_I(inode)->root; | |
2601 | ||
2602 | path = btrfs_alloc_path(); | |
2603 | if (!path) | |
2604 | return; | |
2605 | ||
2606 | if (!record_extent_backrefs(path, new)) { | |
2607 | btrfs_free_path(path); | |
2608 | goto out; | |
2609 | } | |
2610 | btrfs_release_path(path); | |
2611 | ||
2612 | while (1) { | |
2613 | node = rb_first(&new->root); | |
2614 | if (!node) | |
2615 | break; | |
2616 | rb_erase(node, &new->root); | |
2617 | ||
2618 | backref = rb_entry(node, struct sa_defrag_extent_backref, node); | |
2619 | ||
2620 | ret = relink_extent_backref(path, prev, backref); | |
2621 | WARN_ON(ret < 0); | |
2622 | ||
2623 | kfree(prev); | |
2624 | ||
2625 | if (ret == 1) | |
2626 | prev = backref; | |
2627 | else | |
2628 | prev = NULL; | |
2629 | cond_resched(); | |
2630 | } | |
2631 | kfree(prev); | |
2632 | ||
2633 | btrfs_free_path(path); | |
38c227d8 | 2634 | out: |
6f519564 LB |
2635 | free_sa_defrag_extent(new); |
2636 | ||
0b246afa JM |
2637 | atomic_dec(&fs_info->defrag_running); |
2638 | wake_up(&fs_info->transaction_wait); | |
38c227d8 LB |
2639 | } |
2640 | ||
2641 | static struct new_sa_defrag_extent * | |
2642 | record_old_file_extents(struct inode *inode, | |
2643 | struct btrfs_ordered_extent *ordered) | |
2644 | { | |
0b246afa | 2645 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
38c227d8 LB |
2646 | struct btrfs_root *root = BTRFS_I(inode)->root; |
2647 | struct btrfs_path *path; | |
2648 | struct btrfs_key key; | |
6f519564 | 2649 | struct old_sa_defrag_extent *old; |
38c227d8 LB |
2650 | struct new_sa_defrag_extent *new; |
2651 | int ret; | |
2652 | ||
2653 | new = kmalloc(sizeof(*new), GFP_NOFS); | |
2654 | if (!new) | |
2655 | return NULL; | |
2656 | ||
2657 | new->inode = inode; | |
2658 | new->file_pos = ordered->file_offset; | |
2659 | new->len = ordered->len; | |
2660 | new->bytenr = ordered->start; | |
2661 | new->disk_len = ordered->disk_len; | |
2662 | new->compress_type = ordered->compress_type; | |
2663 | new->root = RB_ROOT; | |
2664 | INIT_LIST_HEAD(&new->head); | |
2665 | ||
2666 | path = btrfs_alloc_path(); | |
2667 | if (!path) | |
2668 | goto out_kfree; | |
2669 | ||
4a0cc7ca | 2670 | key.objectid = btrfs_ino(BTRFS_I(inode)); |
38c227d8 LB |
2671 | key.type = BTRFS_EXTENT_DATA_KEY; |
2672 | key.offset = new->file_pos; | |
2673 | ||
2674 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2675 | if (ret < 0) | |
2676 | goto out_free_path; | |
2677 | if (ret > 0 && path->slots[0] > 0) | |
2678 | path->slots[0]--; | |
2679 | ||
2680 | /* find out all the old extents for the file range */ | |
2681 | while (1) { | |
2682 | struct btrfs_file_extent_item *extent; | |
2683 | struct extent_buffer *l; | |
2684 | int slot; | |
2685 | u64 num_bytes; | |
2686 | u64 offset; | |
2687 | u64 end; | |
2688 | u64 disk_bytenr; | |
2689 | u64 extent_offset; | |
2690 | ||
2691 | l = path->nodes[0]; | |
2692 | slot = path->slots[0]; | |
2693 | ||
2694 | if (slot >= btrfs_header_nritems(l)) { | |
2695 | ret = btrfs_next_leaf(root, path); | |
2696 | if (ret < 0) | |
6f519564 | 2697 | goto out_free_path; |
38c227d8 LB |
2698 | else if (ret > 0) |
2699 | break; | |
2700 | continue; | |
2701 | } | |
2702 | ||
2703 | btrfs_item_key_to_cpu(l, &key, slot); | |
2704 | ||
4a0cc7ca | 2705 | if (key.objectid != btrfs_ino(BTRFS_I(inode))) |
38c227d8 LB |
2706 | break; |
2707 | if (key.type != BTRFS_EXTENT_DATA_KEY) | |
2708 | break; | |
2709 | if (key.offset >= new->file_pos + new->len) | |
2710 | break; | |
2711 | ||
2712 | extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); | |
2713 | ||
2714 | num_bytes = btrfs_file_extent_num_bytes(l, extent); | |
2715 | if (key.offset + num_bytes < new->file_pos) | |
2716 | goto next; | |
2717 | ||
2718 | disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); | |
2719 | if (!disk_bytenr) | |
2720 | goto next; | |
2721 | ||
2722 | extent_offset = btrfs_file_extent_offset(l, extent); | |
2723 | ||
2724 | old = kmalloc(sizeof(*old), GFP_NOFS); | |
2725 | if (!old) | |
6f519564 | 2726 | goto out_free_path; |
38c227d8 LB |
2727 | |
2728 | offset = max(new->file_pos, key.offset); | |
2729 | end = min(new->file_pos + new->len, key.offset + num_bytes); | |
2730 | ||
2731 | old->bytenr = disk_bytenr; | |
2732 | old->extent_offset = extent_offset; | |
2733 | old->offset = offset - key.offset; | |
2734 | old->len = end - offset; | |
2735 | old->new = new; | |
2736 | old->count = 0; | |
2737 | list_add_tail(&old->list, &new->head); | |
2738 | next: | |
2739 | path->slots[0]++; | |
2740 | cond_resched(); | |
2741 | } | |
2742 | ||
2743 | btrfs_free_path(path); | |
0b246afa | 2744 | atomic_inc(&fs_info->defrag_running); |
38c227d8 LB |
2745 | |
2746 | return new; | |
2747 | ||
38c227d8 LB |
2748 | out_free_path: |
2749 | btrfs_free_path(path); | |
2750 | out_kfree: | |
6f519564 | 2751 | free_sa_defrag_extent(new); |
38c227d8 LB |
2752 | return NULL; |
2753 | } | |
2754 | ||
2ff7e61e | 2755 | static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info, |
e570fd27 MX |
2756 | u64 start, u64 len) |
2757 | { | |
2758 | struct btrfs_block_group_cache *cache; | |
2759 | ||
0b246afa | 2760 | cache = btrfs_lookup_block_group(fs_info, start); |
e570fd27 MX |
2761 | ASSERT(cache); |
2762 | ||
2763 | spin_lock(&cache->lock); | |
2764 | cache->delalloc_bytes -= len; | |
2765 | spin_unlock(&cache->lock); | |
2766 | ||
2767 | btrfs_put_block_group(cache); | |
2768 | } | |
2769 | ||
d352ac68 CM |
2770 | /* as ordered data IO finishes, this gets called so we can finish |
2771 | * an ordered extent if the range of bytes in the file it covers are | |
2772 | * fully written. | |
2773 | */ | |
5fd02043 | 2774 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) |
e6dcd2dc | 2775 | { |
5fd02043 | 2776 | struct inode *inode = ordered_extent->inode; |
0b246afa | 2777 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc | 2778 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 2779 | struct btrfs_trans_handle *trans = NULL; |
e6dcd2dc | 2780 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
2ac55d41 | 2781 | struct extent_state *cached_state = NULL; |
38c227d8 | 2782 | struct new_sa_defrag_extent *new = NULL; |
261507a0 | 2783 | int compress_type = 0; |
77cef2ec JB |
2784 | int ret = 0; |
2785 | u64 logical_len = ordered_extent->len; | |
82d5902d | 2786 | bool nolock; |
77cef2ec | 2787 | bool truncated = false; |
e6dcd2dc | 2788 | |
70ddc553 | 2789 | nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); |
0cb59c99 | 2790 | |
5fd02043 JB |
2791 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { |
2792 | ret = -EIO; | |
2793 | goto out; | |
2794 | } | |
2795 | ||
7ab7956e NB |
2796 | btrfs_free_io_failure_record(BTRFS_I(inode), |
2797 | ordered_extent->file_offset, | |
2798 | ordered_extent->file_offset + | |
2799 | ordered_extent->len - 1); | |
f612496b | 2800 | |
77cef2ec JB |
2801 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { |
2802 | truncated = true; | |
2803 | logical_len = ordered_extent->truncated_len; | |
2804 | /* Truncated the entire extent, don't bother adding */ | |
2805 | if (!logical_len) | |
2806 | goto out; | |
2807 | } | |
2808 | ||
c2167754 | 2809 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
79787eaa | 2810 | BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ |
94ed938a QW |
2811 | |
2812 | /* | |
2813 | * For mwrite(mmap + memset to write) case, we still reserve | |
2814 | * space for NOCOW range. | |
2815 | * As NOCOW won't cause a new delayed ref, just free the space | |
2816 | */ | |
2817 | btrfs_qgroup_free_data(inode, ordered_extent->file_offset, | |
2818 | ordered_extent->len); | |
6c760c07 JB |
2819 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
2820 | if (nolock) | |
2821 | trans = btrfs_join_transaction_nolock(root); | |
2822 | else | |
2823 | trans = btrfs_join_transaction(root); | |
2824 | if (IS_ERR(trans)) { | |
2825 | ret = PTR_ERR(trans); | |
2826 | trans = NULL; | |
2827 | goto out; | |
c2167754 | 2828 | } |
0b246afa | 2829 | trans->block_rsv = &fs_info->delalloc_block_rsv; |
6c760c07 JB |
2830 | ret = btrfs_update_inode_fallback(trans, root, inode); |
2831 | if (ret) /* -ENOMEM or corruption */ | |
66642832 | 2832 | btrfs_abort_transaction(trans, ret); |
c2167754 YZ |
2833 | goto out; |
2834 | } | |
e6dcd2dc | 2835 | |
2ac55d41 JB |
2836 | lock_extent_bits(io_tree, ordered_extent->file_offset, |
2837 | ordered_extent->file_offset + ordered_extent->len - 1, | |
ff13db41 | 2838 | &cached_state); |
e6dcd2dc | 2839 | |
38c227d8 LB |
2840 | ret = test_range_bit(io_tree, ordered_extent->file_offset, |
2841 | ordered_extent->file_offset + ordered_extent->len - 1, | |
2842 | EXTENT_DEFRAG, 1, cached_state); | |
2843 | if (ret) { | |
2844 | u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); | |
8101c8db | 2845 | if (0 && last_snapshot >= BTRFS_I(inode)->generation) |
38c227d8 LB |
2846 | /* the inode is shared */ |
2847 | new = record_old_file_extents(inode, ordered_extent); | |
2848 | ||
2849 | clear_extent_bit(io_tree, ordered_extent->file_offset, | |
2850 | ordered_extent->file_offset + ordered_extent->len - 1, | |
2851 | EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); | |
2852 | } | |
2853 | ||
0cb59c99 | 2854 | if (nolock) |
7a7eaa40 | 2855 | trans = btrfs_join_transaction_nolock(root); |
0cb59c99 | 2856 | else |
7a7eaa40 | 2857 | trans = btrfs_join_transaction(root); |
79787eaa JM |
2858 | if (IS_ERR(trans)) { |
2859 | ret = PTR_ERR(trans); | |
2860 | trans = NULL; | |
2861 | goto out_unlock; | |
2862 | } | |
a79b7d4b | 2863 | |
0b246afa | 2864 | trans->block_rsv = &fs_info->delalloc_block_rsv; |
c2167754 | 2865 | |
c8b97818 | 2866 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
261507a0 | 2867 | compress_type = ordered_extent->compress_type; |
d899e052 | 2868 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
261507a0 | 2869 | BUG_ON(compress_type); |
7a6d7067 | 2870 | ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), |
d899e052 YZ |
2871 | ordered_extent->file_offset, |
2872 | ordered_extent->file_offset + | |
77cef2ec | 2873 | logical_len); |
d899e052 | 2874 | } else { |
0b246afa | 2875 | BUG_ON(root == fs_info->tree_root); |
d899e052 YZ |
2876 | ret = insert_reserved_file_extent(trans, inode, |
2877 | ordered_extent->file_offset, | |
2878 | ordered_extent->start, | |
2879 | ordered_extent->disk_len, | |
77cef2ec | 2880 | logical_len, logical_len, |
261507a0 | 2881 | compress_type, 0, 0, |
d899e052 | 2882 | BTRFS_FILE_EXTENT_REG); |
e570fd27 | 2883 | if (!ret) |
2ff7e61e | 2884 | btrfs_release_delalloc_bytes(fs_info, |
e570fd27 MX |
2885 | ordered_extent->start, |
2886 | ordered_extent->disk_len); | |
d899e052 | 2887 | } |
5dc562c5 JB |
2888 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
2889 | ordered_extent->file_offset, ordered_extent->len, | |
2890 | trans->transid); | |
79787eaa | 2891 | if (ret < 0) { |
66642832 | 2892 | btrfs_abort_transaction(trans, ret); |
5fd02043 | 2893 | goto out_unlock; |
79787eaa | 2894 | } |
2ac55d41 | 2895 | |
df9f628e | 2896 | add_pending_csums(trans, inode, &ordered_extent->list); |
e6dcd2dc | 2897 | |
6c760c07 JB |
2898 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
2899 | ret = btrfs_update_inode_fallback(trans, root, inode); | |
2900 | if (ret) { /* -ENOMEM or corruption */ | |
66642832 | 2901 | btrfs_abort_transaction(trans, ret); |
6c760c07 | 2902 | goto out_unlock; |
1ef30be1 JB |
2903 | } |
2904 | ret = 0; | |
5fd02043 JB |
2905 | out_unlock: |
2906 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | |
2907 | ordered_extent->file_offset + | |
2908 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | |
c2167754 | 2909 | out: |
0b246afa | 2910 | if (root != fs_info->tree_root) |
691fa059 NB |
2911 | btrfs_delalloc_release_metadata(BTRFS_I(inode), |
2912 | ordered_extent->len); | |
a698d075 | 2913 | if (trans) |
3a45bb20 | 2914 | btrfs_end_transaction(trans); |
0cb59c99 | 2915 | |
77cef2ec JB |
2916 | if (ret || truncated) { |
2917 | u64 start, end; | |
2918 | ||
2919 | if (truncated) | |
2920 | start = ordered_extent->file_offset + logical_len; | |
2921 | else | |
2922 | start = ordered_extent->file_offset; | |
2923 | end = ordered_extent->file_offset + ordered_extent->len - 1; | |
2924 | clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); | |
2925 | ||
2926 | /* Drop the cache for the part of the extent we didn't write. */ | |
dcdbc059 | 2927 | btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); |
5fd02043 | 2928 | |
0bec9ef5 JB |
2929 | /* |
2930 | * If the ordered extent had an IOERR or something else went | |
2931 | * wrong we need to return the space for this ordered extent | |
77cef2ec JB |
2932 | * back to the allocator. We only free the extent in the |
2933 | * truncated case if we didn't write out the extent at all. | |
0bec9ef5 | 2934 | */ |
77cef2ec JB |
2935 | if ((ret || !logical_len) && |
2936 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && | |
0bec9ef5 | 2937 | !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) |
2ff7e61e JM |
2938 | btrfs_free_reserved_extent(fs_info, |
2939 | ordered_extent->start, | |
e570fd27 | 2940 | ordered_extent->disk_len, 1); |
0bec9ef5 JB |
2941 | } |
2942 | ||
2943 | ||
5fd02043 | 2944 | /* |
8bad3c02 LB |
2945 | * This needs to be done to make sure anybody waiting knows we are done |
2946 | * updating everything for this ordered extent. | |
5fd02043 JB |
2947 | */ |
2948 | btrfs_remove_ordered_extent(inode, ordered_extent); | |
2949 | ||
38c227d8 | 2950 | /* for snapshot-aware defrag */ |
6f519564 LB |
2951 | if (new) { |
2952 | if (ret) { | |
2953 | free_sa_defrag_extent(new); | |
0b246afa | 2954 | atomic_dec(&fs_info->defrag_running); |
6f519564 LB |
2955 | } else { |
2956 | relink_file_extents(new); | |
2957 | } | |
2958 | } | |
38c227d8 | 2959 | |
e6dcd2dc CM |
2960 | /* once for us */ |
2961 | btrfs_put_ordered_extent(ordered_extent); | |
2962 | /* once for the tree */ | |
2963 | btrfs_put_ordered_extent(ordered_extent); | |
2964 | ||
5fd02043 JB |
2965 | return ret; |
2966 | } | |
2967 | ||
2968 | static void finish_ordered_fn(struct btrfs_work *work) | |
2969 | { | |
2970 | struct btrfs_ordered_extent *ordered_extent; | |
2971 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); | |
2972 | btrfs_finish_ordered_io(ordered_extent); | |
e6dcd2dc CM |
2973 | } |
2974 | ||
c3988d63 | 2975 | static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
211f90e6 CM |
2976 | struct extent_state *state, int uptodate) |
2977 | { | |
5fd02043 | 2978 | struct inode *inode = page->mapping->host; |
0b246afa | 2979 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
5fd02043 | 2980 | struct btrfs_ordered_extent *ordered_extent = NULL; |
9e0af237 LB |
2981 | struct btrfs_workqueue *wq; |
2982 | btrfs_work_func_t func; | |
5fd02043 | 2983 | |
1abe9b8a | 2984 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
2985 | ||
8b62b72b | 2986 | ClearPagePrivate2(page); |
5fd02043 JB |
2987 | if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
2988 | end - start + 1, uptodate)) | |
c3988d63 | 2989 | return; |
5fd02043 | 2990 | |
70ddc553 | 2991 | if (btrfs_is_free_space_inode(BTRFS_I(inode))) { |
0b246afa | 2992 | wq = fs_info->endio_freespace_worker; |
9e0af237 LB |
2993 | func = btrfs_freespace_write_helper; |
2994 | } else { | |
0b246afa | 2995 | wq = fs_info->endio_write_workers; |
9e0af237 LB |
2996 | func = btrfs_endio_write_helper; |
2997 | } | |
5fd02043 | 2998 | |
9e0af237 LB |
2999 | btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, |
3000 | NULL); | |
3001 | btrfs_queue_work(wq, &ordered_extent->work); | |
211f90e6 CM |
3002 | } |
3003 | ||
dc380aea MX |
3004 | static int __readpage_endio_check(struct inode *inode, |
3005 | struct btrfs_io_bio *io_bio, | |
3006 | int icsum, struct page *page, | |
3007 | int pgoff, u64 start, size_t len) | |
3008 | { | |
3009 | char *kaddr; | |
3010 | u32 csum_expected; | |
3011 | u32 csum = ~(u32)0; | |
dc380aea MX |
3012 | |
3013 | csum_expected = *(((u32 *)io_bio->csum) + icsum); | |
3014 | ||
3015 | kaddr = kmap_atomic(page); | |
3016 | csum = btrfs_csum_data(kaddr + pgoff, csum, len); | |
0b5e3daf | 3017 | btrfs_csum_final(csum, (u8 *)&csum); |
dc380aea MX |
3018 | if (csum != csum_expected) |
3019 | goto zeroit; | |
3020 | ||
3021 | kunmap_atomic(kaddr); | |
3022 | return 0; | |
3023 | zeroit: | |
0970a22e | 3024 | btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, |
6f6b643e | 3025 | io_bio->mirror_num); |
dc380aea MX |
3026 | memset(kaddr + pgoff, 1, len); |
3027 | flush_dcache_page(page); | |
3028 | kunmap_atomic(kaddr); | |
3029 | if (csum_expected == 0) | |
3030 | return 0; | |
3031 | return -EIO; | |
3032 | } | |
3033 | ||
d352ac68 CM |
3034 | /* |
3035 | * when reads are done, we need to check csums to verify the data is correct | |
4a54c8c1 JS |
3036 | * if there's a match, we allow the bio to finish. If not, the code in |
3037 | * extent_io.c will try to find good copies for us. | |
d352ac68 | 3038 | */ |
facc8a22 MX |
3039 | static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
3040 | u64 phy_offset, struct page *page, | |
3041 | u64 start, u64 end, int mirror) | |
07157aac | 3042 | { |
4eee4fa4 | 3043 | size_t offset = start - page_offset(page); |
07157aac | 3044 | struct inode *inode = page->mapping->host; |
d1310b2e | 3045 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
ff79f819 | 3046 | struct btrfs_root *root = BTRFS_I(inode)->root; |
d1310b2e | 3047 | |
d20f7043 CM |
3048 | if (PageChecked(page)) { |
3049 | ClearPageChecked(page); | |
dc380aea | 3050 | return 0; |
d20f7043 | 3051 | } |
6cbff00f CH |
3052 | |
3053 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) | |
dc380aea | 3054 | return 0; |
17d217fe YZ |
3055 | |
3056 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | |
9655d298 | 3057 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
91166212 | 3058 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM); |
b6cda9bc | 3059 | return 0; |
17d217fe | 3060 | } |
d20f7043 | 3061 | |
facc8a22 | 3062 | phy_offset >>= inode->i_sb->s_blocksize_bits; |
dc380aea MX |
3063 | return __readpage_endio_check(inode, io_bio, phy_offset, page, offset, |
3064 | start, (size_t)(end - start + 1)); | |
07157aac | 3065 | } |
b888db2b | 3066 | |
24bbcf04 YZ |
3067 | void btrfs_add_delayed_iput(struct inode *inode) |
3068 | { | |
0b246afa | 3069 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
8089fe62 | 3070 | struct btrfs_inode *binode = BTRFS_I(inode); |
24bbcf04 YZ |
3071 | |
3072 | if (atomic_add_unless(&inode->i_count, -1, 1)) | |
3073 | return; | |
3074 | ||
24bbcf04 | 3075 | spin_lock(&fs_info->delayed_iput_lock); |
8089fe62 DS |
3076 | if (binode->delayed_iput_count == 0) { |
3077 | ASSERT(list_empty(&binode->delayed_iput)); | |
3078 | list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); | |
3079 | } else { | |
3080 | binode->delayed_iput_count++; | |
3081 | } | |
24bbcf04 YZ |
3082 | spin_unlock(&fs_info->delayed_iput_lock); |
3083 | } | |
3084 | ||
2ff7e61e | 3085 | void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) |
24bbcf04 | 3086 | { |
24bbcf04 | 3087 | |
24bbcf04 | 3088 | spin_lock(&fs_info->delayed_iput_lock); |
8089fe62 DS |
3089 | while (!list_empty(&fs_info->delayed_iputs)) { |
3090 | struct btrfs_inode *inode; | |
3091 | ||
3092 | inode = list_first_entry(&fs_info->delayed_iputs, | |
3093 | struct btrfs_inode, delayed_iput); | |
3094 | if (inode->delayed_iput_count) { | |
3095 | inode->delayed_iput_count--; | |
3096 | list_move_tail(&inode->delayed_iput, | |
3097 | &fs_info->delayed_iputs); | |
3098 | } else { | |
3099 | list_del_init(&inode->delayed_iput); | |
3100 | } | |
3101 | spin_unlock(&fs_info->delayed_iput_lock); | |
3102 | iput(&inode->vfs_inode); | |
3103 | spin_lock(&fs_info->delayed_iput_lock); | |
24bbcf04 | 3104 | } |
8089fe62 | 3105 | spin_unlock(&fs_info->delayed_iput_lock); |
24bbcf04 YZ |
3106 | } |
3107 | ||
d68fc57b | 3108 | /* |
42b2aa86 | 3109 | * This is called in transaction commit time. If there are no orphan |
d68fc57b YZ |
3110 | * files in the subvolume, it removes orphan item and frees block_rsv |
3111 | * structure. | |
3112 | */ | |
3113 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, | |
3114 | struct btrfs_root *root) | |
3115 | { | |
0b246afa | 3116 | struct btrfs_fs_info *fs_info = root->fs_info; |
90290e19 | 3117 | struct btrfs_block_rsv *block_rsv; |
d68fc57b YZ |
3118 | int ret; |
3119 | ||
8a35d95f | 3120 | if (atomic_read(&root->orphan_inodes) || |
d68fc57b YZ |
3121 | root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) |
3122 | return; | |
3123 | ||
90290e19 | 3124 | spin_lock(&root->orphan_lock); |
8a35d95f | 3125 | if (atomic_read(&root->orphan_inodes)) { |
90290e19 JB |
3126 | spin_unlock(&root->orphan_lock); |
3127 | return; | |
3128 | } | |
3129 | ||
3130 | if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { | |
3131 | spin_unlock(&root->orphan_lock); | |
3132 | return; | |
3133 | } | |
3134 | ||
3135 | block_rsv = root->orphan_block_rsv; | |
3136 | root->orphan_block_rsv = NULL; | |
3137 | spin_unlock(&root->orphan_lock); | |
3138 | ||
27cdeb70 | 3139 | if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) && |
d68fc57b | 3140 | btrfs_root_refs(&root->root_item) > 0) { |
0b246afa | 3141 | ret = btrfs_del_orphan_item(trans, fs_info->tree_root, |
d68fc57b | 3142 | root->root_key.objectid); |
4ef31a45 | 3143 | if (ret) |
66642832 | 3144 | btrfs_abort_transaction(trans, ret); |
4ef31a45 | 3145 | else |
27cdeb70 MX |
3146 | clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, |
3147 | &root->state); | |
d68fc57b YZ |
3148 | } |
3149 | ||
90290e19 JB |
3150 | if (block_rsv) { |
3151 | WARN_ON(block_rsv->size > 0); | |
2ff7e61e | 3152 | btrfs_free_block_rsv(fs_info, block_rsv); |
d68fc57b YZ |
3153 | } |
3154 | } | |
3155 | ||
7b128766 JB |
3156 | /* |
3157 | * This creates an orphan entry for the given inode in case something goes | |
3158 | * wrong in the middle of an unlink/truncate. | |
d68fc57b YZ |
3159 | * |
3160 | * NOTE: caller of this function should reserve 5 units of metadata for | |
3161 | * this function. | |
7b128766 | 3162 | */ |
73f2e545 NB |
3163 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, |
3164 | struct btrfs_inode *inode) | |
7b128766 | 3165 | { |
73f2e545 NB |
3166 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
3167 | struct btrfs_root *root = inode->root; | |
d68fc57b YZ |
3168 | struct btrfs_block_rsv *block_rsv = NULL; |
3169 | int reserve = 0; | |
3170 | int insert = 0; | |
3171 | int ret; | |
7b128766 | 3172 | |
d68fc57b | 3173 | if (!root->orphan_block_rsv) { |
2ff7e61e JM |
3174 | block_rsv = btrfs_alloc_block_rsv(fs_info, |
3175 | BTRFS_BLOCK_RSV_TEMP); | |
b532402e TI |
3176 | if (!block_rsv) |
3177 | return -ENOMEM; | |
d68fc57b | 3178 | } |
7b128766 | 3179 | |
d68fc57b YZ |
3180 | spin_lock(&root->orphan_lock); |
3181 | if (!root->orphan_block_rsv) { | |
3182 | root->orphan_block_rsv = block_rsv; | |
3183 | } else if (block_rsv) { | |
2ff7e61e | 3184 | btrfs_free_block_rsv(fs_info, block_rsv); |
d68fc57b | 3185 | block_rsv = NULL; |
7b128766 | 3186 | } |
7b128766 | 3187 | |
8a35d95f | 3188 | if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
73f2e545 | 3189 | &inode->runtime_flags)) { |
d68fc57b YZ |
3190 | #if 0 |
3191 | /* | |
3192 | * For proper ENOSPC handling, we should do orphan | |
3193 | * cleanup when mounting. But this introduces backward | |
3194 | * compatibility issue. | |
3195 | */ | |
3196 | if (!xchg(&root->orphan_item_inserted, 1)) | |
3197 | insert = 2; | |
3198 | else | |
3199 | insert = 1; | |
3200 | #endif | |
3201 | insert = 1; | |
321f0e70 | 3202 | atomic_inc(&root->orphan_inodes); |
7b128766 JB |
3203 | } |
3204 | ||
72ac3c0d | 3205 | if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
73f2e545 | 3206 | &inode->runtime_flags)) |
d68fc57b | 3207 | reserve = 1; |
d68fc57b | 3208 | spin_unlock(&root->orphan_lock); |
7b128766 | 3209 | |
d68fc57b YZ |
3210 | /* grab metadata reservation from transaction handle */ |
3211 | if (reserve) { | |
73f2e545 | 3212 | ret = btrfs_orphan_reserve_metadata(trans, inode); |
3b6571c1 JB |
3213 | ASSERT(!ret); |
3214 | if (ret) { | |
3215 | atomic_dec(&root->orphan_inodes); | |
3216 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | |
73f2e545 | 3217 | &inode->runtime_flags); |
3b6571c1 JB |
3218 | if (insert) |
3219 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | |
73f2e545 | 3220 | &inode->runtime_flags); |
3b6571c1 JB |
3221 | return ret; |
3222 | } | |
d68fc57b | 3223 | } |
7b128766 | 3224 | |
d68fc57b YZ |
3225 | /* insert an orphan item to track this unlinked/truncated file */ |
3226 | if (insert >= 1) { | |
73f2e545 | 3227 | ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); |
4ef31a45 | 3228 | if (ret) { |
703c88e0 | 3229 | atomic_dec(&root->orphan_inodes); |
4ef31a45 JB |
3230 | if (reserve) { |
3231 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | |
73f2e545 NB |
3232 | &inode->runtime_flags); |
3233 | btrfs_orphan_release_metadata(inode); | |
4ef31a45 JB |
3234 | } |
3235 | if (ret != -EEXIST) { | |
e8e7cff6 | 3236 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
73f2e545 | 3237 | &inode->runtime_flags); |
66642832 | 3238 | btrfs_abort_transaction(trans, ret); |
4ef31a45 JB |
3239 | return ret; |
3240 | } | |
79787eaa JM |
3241 | } |
3242 | ret = 0; | |
d68fc57b YZ |
3243 | } |
3244 | ||
3245 | /* insert an orphan item to track subvolume contains orphan files */ | |
3246 | if (insert >= 2) { | |
0b246afa | 3247 | ret = btrfs_insert_orphan_item(trans, fs_info->tree_root, |
d68fc57b | 3248 | root->root_key.objectid); |
79787eaa | 3249 | if (ret && ret != -EEXIST) { |
66642832 | 3250 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
3251 | return ret; |
3252 | } | |
d68fc57b YZ |
3253 | } |
3254 | return 0; | |
7b128766 JB |
3255 | } |
3256 | ||
3257 | /* | |
3258 | * We have done the truncate/delete so we can go ahead and remove the orphan | |
3259 | * item for this particular inode. | |
3260 | */ | |
48a3b636 | 3261 | static int btrfs_orphan_del(struct btrfs_trans_handle *trans, |
3d6ae7bb | 3262 | struct btrfs_inode *inode) |
7b128766 | 3263 | { |
3d6ae7bb | 3264 | struct btrfs_root *root = inode->root; |
d68fc57b YZ |
3265 | int delete_item = 0; |
3266 | int release_rsv = 0; | |
7b128766 JB |
3267 | int ret = 0; |
3268 | ||
d68fc57b | 3269 | spin_lock(&root->orphan_lock); |
8a35d95f | 3270 | if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
3d6ae7bb | 3271 | &inode->runtime_flags)) |
d68fc57b | 3272 | delete_item = 1; |
7b128766 | 3273 | |
72ac3c0d | 3274 | if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
3d6ae7bb | 3275 | &inode->runtime_flags)) |
d68fc57b | 3276 | release_rsv = 1; |
d68fc57b | 3277 | spin_unlock(&root->orphan_lock); |
7b128766 | 3278 | |
703c88e0 | 3279 | if (delete_item) { |
8a35d95f | 3280 | atomic_dec(&root->orphan_inodes); |
703c88e0 FDBM |
3281 | if (trans) |
3282 | ret = btrfs_del_orphan_item(trans, root, | |
3d6ae7bb | 3283 | btrfs_ino(inode)); |
8a35d95f | 3284 | } |
7b128766 | 3285 | |
703c88e0 | 3286 | if (release_rsv) |
3d6ae7bb | 3287 | btrfs_orphan_release_metadata(inode); |
703c88e0 | 3288 | |
4ef31a45 | 3289 | return ret; |
7b128766 JB |
3290 | } |
3291 | ||
3292 | /* | |
3293 | * this cleans up any orphans that may be left on the list from the last use | |
3294 | * of this root. | |
3295 | */ | |
66b4ffd1 | 3296 | int btrfs_orphan_cleanup(struct btrfs_root *root) |
7b128766 | 3297 | { |
0b246afa | 3298 | struct btrfs_fs_info *fs_info = root->fs_info; |
7b128766 JB |
3299 | struct btrfs_path *path; |
3300 | struct extent_buffer *leaf; | |
7b128766 JB |
3301 | struct btrfs_key key, found_key; |
3302 | struct btrfs_trans_handle *trans; | |
3303 | struct inode *inode; | |
8f6d7f4f | 3304 | u64 last_objectid = 0; |
7b128766 JB |
3305 | int ret = 0, nr_unlink = 0, nr_truncate = 0; |
3306 | ||
d68fc57b | 3307 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) |
66b4ffd1 | 3308 | return 0; |
c71bf099 YZ |
3309 | |
3310 | path = btrfs_alloc_path(); | |
66b4ffd1 JB |
3311 | if (!path) { |
3312 | ret = -ENOMEM; | |
3313 | goto out; | |
3314 | } | |
e4058b54 | 3315 | path->reada = READA_BACK; |
7b128766 JB |
3316 | |
3317 | key.objectid = BTRFS_ORPHAN_OBJECTID; | |
962a298f | 3318 | key.type = BTRFS_ORPHAN_ITEM_KEY; |
7b128766 JB |
3319 | key.offset = (u64)-1; |
3320 | ||
7b128766 JB |
3321 | while (1) { |
3322 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
66b4ffd1 JB |
3323 | if (ret < 0) |
3324 | goto out; | |
7b128766 JB |
3325 | |
3326 | /* | |
3327 | * if ret == 0 means we found what we were searching for, which | |
25985edc | 3328 | * is weird, but possible, so only screw with path if we didn't |
7b128766 JB |
3329 | * find the key and see if we have stuff that matches |
3330 | */ | |
3331 | if (ret > 0) { | |
66b4ffd1 | 3332 | ret = 0; |
7b128766 JB |
3333 | if (path->slots[0] == 0) |
3334 | break; | |
3335 | path->slots[0]--; | |
3336 | } | |
3337 | ||
3338 | /* pull out the item */ | |
3339 | leaf = path->nodes[0]; | |
7b128766 JB |
3340 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
3341 | ||
3342 | /* make sure the item matches what we want */ | |
3343 | if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) | |
3344 | break; | |
962a298f | 3345 | if (found_key.type != BTRFS_ORPHAN_ITEM_KEY) |
7b128766 JB |
3346 | break; |
3347 | ||
3348 | /* release the path since we're done with it */ | |
b3b4aa74 | 3349 | btrfs_release_path(path); |
7b128766 JB |
3350 | |
3351 | /* | |
3352 | * this is where we are basically btrfs_lookup, without the | |
3353 | * crossing root thing. we store the inode number in the | |
3354 | * offset of the orphan item. | |
3355 | */ | |
8f6d7f4f JB |
3356 | |
3357 | if (found_key.offset == last_objectid) { | |
0b246afa JM |
3358 | btrfs_err(fs_info, |
3359 | "Error removing orphan entry, stopping orphan cleanup"); | |
8f6d7f4f JB |
3360 | ret = -EINVAL; |
3361 | goto out; | |
3362 | } | |
3363 | ||
3364 | last_objectid = found_key.offset; | |
3365 | ||
5d4f98a2 YZ |
3366 | found_key.objectid = found_key.offset; |
3367 | found_key.type = BTRFS_INODE_ITEM_KEY; | |
3368 | found_key.offset = 0; | |
0b246afa | 3369 | inode = btrfs_iget(fs_info->sb, &found_key, root, NULL); |
8c6ffba0 | 3370 | ret = PTR_ERR_OR_ZERO(inode); |
67710892 | 3371 | if (ret && ret != -ENOENT) |
66b4ffd1 | 3372 | goto out; |
7b128766 | 3373 | |
0b246afa | 3374 | if (ret == -ENOENT && root == fs_info->tree_root) { |
f8e9e0b0 AJ |
3375 | struct btrfs_root *dead_root; |
3376 | struct btrfs_fs_info *fs_info = root->fs_info; | |
3377 | int is_dead_root = 0; | |
3378 | ||
3379 | /* | |
3380 | * this is an orphan in the tree root. Currently these | |
3381 | * could come from 2 sources: | |
3382 | * a) a snapshot deletion in progress | |
3383 | * b) a free space cache inode | |
3384 | * We need to distinguish those two, as the snapshot | |
3385 | * orphan must not get deleted. | |
3386 | * find_dead_roots already ran before us, so if this | |
3387 | * is a snapshot deletion, we should find the root | |
3388 | * in the dead_roots list | |
3389 | */ | |
3390 | spin_lock(&fs_info->trans_lock); | |
3391 | list_for_each_entry(dead_root, &fs_info->dead_roots, | |
3392 | root_list) { | |
3393 | if (dead_root->root_key.objectid == | |
3394 | found_key.objectid) { | |
3395 | is_dead_root = 1; | |
3396 | break; | |
3397 | } | |
3398 | } | |
3399 | spin_unlock(&fs_info->trans_lock); | |
3400 | if (is_dead_root) { | |
3401 | /* prevent this orphan from being found again */ | |
3402 | key.offset = found_key.objectid - 1; | |
3403 | continue; | |
3404 | } | |
3405 | } | |
7b128766 | 3406 | /* |
a8c9e576 JB |
3407 | * Inode is already gone but the orphan item is still there, |
3408 | * kill the orphan item. | |
7b128766 | 3409 | */ |
67710892 | 3410 | if (ret == -ENOENT) { |
a8c9e576 | 3411 | trans = btrfs_start_transaction(root, 1); |
66b4ffd1 JB |
3412 | if (IS_ERR(trans)) { |
3413 | ret = PTR_ERR(trans); | |
3414 | goto out; | |
3415 | } | |
0b246afa JM |
3416 | btrfs_debug(fs_info, "auto deleting %Lu", |
3417 | found_key.objectid); | |
a8c9e576 JB |
3418 | ret = btrfs_del_orphan_item(trans, root, |
3419 | found_key.objectid); | |
3a45bb20 | 3420 | btrfs_end_transaction(trans); |
4ef31a45 JB |
3421 | if (ret) |
3422 | goto out; | |
7b128766 JB |
3423 | continue; |
3424 | } | |
3425 | ||
a8c9e576 JB |
3426 | /* |
3427 | * add this inode to the orphan list so btrfs_orphan_del does | |
3428 | * the proper thing when we hit it | |
3429 | */ | |
8a35d95f JB |
3430 | set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
3431 | &BTRFS_I(inode)->runtime_flags); | |
925396ec | 3432 | atomic_inc(&root->orphan_inodes); |
a8c9e576 | 3433 | |
7b128766 JB |
3434 | /* if we have links, this was a truncate, lets do that */ |
3435 | if (inode->i_nlink) { | |
fae7f21c | 3436 | if (WARN_ON(!S_ISREG(inode->i_mode))) { |
a41ad394 JB |
3437 | iput(inode); |
3438 | continue; | |
3439 | } | |
7b128766 | 3440 | nr_truncate++; |
f3fe820c JB |
3441 | |
3442 | /* 1 for the orphan item deletion. */ | |
3443 | trans = btrfs_start_transaction(root, 1); | |
3444 | if (IS_ERR(trans)) { | |
c69b26b0 | 3445 | iput(inode); |
f3fe820c JB |
3446 | ret = PTR_ERR(trans); |
3447 | goto out; | |
3448 | } | |
73f2e545 | 3449 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); |
3a45bb20 | 3450 | btrfs_end_transaction(trans); |
c69b26b0 JB |
3451 | if (ret) { |
3452 | iput(inode); | |
f3fe820c | 3453 | goto out; |
c69b26b0 | 3454 | } |
f3fe820c | 3455 | |
66b4ffd1 | 3456 | ret = btrfs_truncate(inode); |
4a7d0f68 | 3457 | if (ret) |
3d6ae7bb | 3458 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
7b128766 JB |
3459 | } else { |
3460 | nr_unlink++; | |
3461 | } | |
3462 | ||
3463 | /* this will do delete_inode and everything for us */ | |
3464 | iput(inode); | |
66b4ffd1 JB |
3465 | if (ret) |
3466 | goto out; | |
7b128766 | 3467 | } |
3254c876 MX |
3468 | /* release the path since we're done with it */ |
3469 | btrfs_release_path(path); | |
3470 | ||
d68fc57b YZ |
3471 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; |
3472 | ||
3473 | if (root->orphan_block_rsv) | |
2ff7e61e | 3474 | btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, |
d68fc57b YZ |
3475 | (u64)-1); |
3476 | ||
27cdeb70 MX |
3477 | if (root->orphan_block_rsv || |
3478 | test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { | |
7a7eaa40 | 3479 | trans = btrfs_join_transaction(root); |
66b4ffd1 | 3480 | if (!IS_ERR(trans)) |
3a45bb20 | 3481 | btrfs_end_transaction(trans); |
d68fc57b | 3482 | } |
7b128766 JB |
3483 | |
3484 | if (nr_unlink) | |
0b246afa | 3485 | btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink); |
7b128766 | 3486 | if (nr_truncate) |
0b246afa | 3487 | btrfs_debug(fs_info, "truncated %d orphans", nr_truncate); |
66b4ffd1 JB |
3488 | |
3489 | out: | |
3490 | if (ret) | |
0b246afa | 3491 | btrfs_err(fs_info, "could not do orphan cleanup %d", ret); |
66b4ffd1 JB |
3492 | btrfs_free_path(path); |
3493 | return ret; | |
7b128766 JB |
3494 | } |
3495 | ||
46a53cca CM |
3496 | /* |
3497 | * very simple check to peek ahead in the leaf looking for xattrs. If we | |
3498 | * don't find any xattrs, we know there can't be any acls. | |
3499 | * | |
3500 | * slot is the slot the inode is in, objectid is the objectid of the inode | |
3501 | */ | |
3502 | static noinline int acls_after_inode_item(struct extent_buffer *leaf, | |
63541927 FDBM |
3503 | int slot, u64 objectid, |
3504 | int *first_xattr_slot) | |
46a53cca CM |
3505 | { |
3506 | u32 nritems = btrfs_header_nritems(leaf); | |
3507 | struct btrfs_key found_key; | |
f23b5a59 JB |
3508 | static u64 xattr_access = 0; |
3509 | static u64 xattr_default = 0; | |
46a53cca CM |
3510 | int scanned = 0; |
3511 | ||
f23b5a59 | 3512 | if (!xattr_access) { |
97d79299 AG |
3513 | xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS, |
3514 | strlen(XATTR_NAME_POSIX_ACL_ACCESS)); | |
3515 | xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT, | |
3516 | strlen(XATTR_NAME_POSIX_ACL_DEFAULT)); | |
f23b5a59 JB |
3517 | } |
3518 | ||
46a53cca | 3519 | slot++; |
63541927 | 3520 | *first_xattr_slot = -1; |
46a53cca CM |
3521 | while (slot < nritems) { |
3522 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
3523 | ||
3524 | /* we found a different objectid, there must not be acls */ | |
3525 | if (found_key.objectid != objectid) | |
3526 | return 0; | |
3527 | ||
3528 | /* we found an xattr, assume we've got an acl */ | |
f23b5a59 | 3529 | if (found_key.type == BTRFS_XATTR_ITEM_KEY) { |
63541927 FDBM |
3530 | if (*first_xattr_slot == -1) |
3531 | *first_xattr_slot = slot; | |
f23b5a59 JB |
3532 | if (found_key.offset == xattr_access || |
3533 | found_key.offset == xattr_default) | |
3534 | return 1; | |
3535 | } | |
46a53cca CM |
3536 | |
3537 | /* | |
3538 | * we found a key greater than an xattr key, there can't | |
3539 | * be any acls later on | |
3540 | */ | |
3541 | if (found_key.type > BTRFS_XATTR_ITEM_KEY) | |
3542 | return 0; | |
3543 | ||
3544 | slot++; | |
3545 | scanned++; | |
3546 | ||
3547 | /* | |
3548 | * it goes inode, inode backrefs, xattrs, extents, | |
3549 | * so if there are a ton of hard links to an inode there can | |
3550 | * be a lot of backrefs. Don't waste time searching too hard, | |
3551 | * this is just an optimization | |
3552 | */ | |
3553 | if (scanned >= 8) | |
3554 | break; | |
3555 | } | |
3556 | /* we hit the end of the leaf before we found an xattr or | |
3557 | * something larger than an xattr. We have to assume the inode | |
3558 | * has acls | |
3559 | */ | |
63541927 FDBM |
3560 | if (*first_xattr_slot == -1) |
3561 | *first_xattr_slot = slot; | |
46a53cca CM |
3562 | return 1; |
3563 | } | |
3564 | ||
d352ac68 CM |
3565 | /* |
3566 | * read an inode from the btree into the in-memory inode | |
3567 | */ | |
67710892 | 3568 | static int btrfs_read_locked_inode(struct inode *inode) |
39279cc3 | 3569 | { |
0b246afa | 3570 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
39279cc3 | 3571 | struct btrfs_path *path; |
5f39d397 | 3572 | struct extent_buffer *leaf; |
39279cc3 CM |
3573 | struct btrfs_inode_item *inode_item; |
3574 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3575 | struct btrfs_key location; | |
67de1176 | 3576 | unsigned long ptr; |
46a53cca | 3577 | int maybe_acls; |
618e21d5 | 3578 | u32 rdev; |
39279cc3 | 3579 | int ret; |
2f7e33d4 | 3580 | bool filled = false; |
63541927 | 3581 | int first_xattr_slot; |
2f7e33d4 MX |
3582 | |
3583 | ret = btrfs_fill_inode(inode, &rdev); | |
3584 | if (!ret) | |
3585 | filled = true; | |
39279cc3 CM |
3586 | |
3587 | path = btrfs_alloc_path(); | |
67710892 FM |
3588 | if (!path) { |
3589 | ret = -ENOMEM; | |
1748f843 | 3590 | goto make_bad; |
67710892 | 3591 | } |
1748f843 | 3592 | |
39279cc3 | 3593 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); |
dc17ff8f | 3594 | |
39279cc3 | 3595 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); |
67710892 FM |
3596 | if (ret) { |
3597 | if (ret > 0) | |
3598 | ret = -ENOENT; | |
39279cc3 | 3599 | goto make_bad; |
67710892 | 3600 | } |
39279cc3 | 3601 | |
5f39d397 | 3602 | leaf = path->nodes[0]; |
2f7e33d4 MX |
3603 | |
3604 | if (filled) | |
67de1176 | 3605 | goto cache_index; |
2f7e33d4 | 3606 | |
5f39d397 CM |
3607 | inode_item = btrfs_item_ptr(leaf, path->slots[0], |
3608 | struct btrfs_inode_item); | |
5f39d397 | 3609 | inode->i_mode = btrfs_inode_mode(leaf, inode_item); |
bfe86848 | 3610 | set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); |
2f2f43d3 EB |
3611 | i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); |
3612 | i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); | |
6ef06d27 | 3613 | btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); |
5f39d397 | 3614 | |
a937b979 DS |
3615 | inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); |
3616 | inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); | |
5f39d397 | 3617 | |
a937b979 DS |
3618 | inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); |
3619 | inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); | |
5f39d397 | 3620 | |
a937b979 DS |
3621 | inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); |
3622 | inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); | |
5f39d397 | 3623 | |
9cc97d64 | 3624 | BTRFS_I(inode)->i_otime.tv_sec = |
3625 | btrfs_timespec_sec(leaf, &inode_item->otime); | |
3626 | BTRFS_I(inode)->i_otime.tv_nsec = | |
3627 | btrfs_timespec_nsec(leaf, &inode_item->otime); | |
5f39d397 | 3628 | |
a76a3cd4 | 3629 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); |
e02119d5 | 3630 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
5dc562c5 JB |
3631 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); |
3632 | ||
6e17d30b YD |
3633 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); |
3634 | inode->i_generation = BTRFS_I(inode)->generation; | |
3635 | inode->i_rdev = 0; | |
3636 | rdev = btrfs_inode_rdev(leaf, inode_item); | |
3637 | ||
3638 | BTRFS_I(inode)->index_cnt = (u64)-1; | |
3639 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | |
3640 | ||
3641 | cache_index: | |
5dc562c5 JB |
3642 | /* |
3643 | * If we were modified in the current generation and evicted from memory | |
3644 | * and then re-read we need to do a full sync since we don't have any | |
3645 | * idea about which extents were modified before we were evicted from | |
3646 | * cache. | |
6e17d30b YD |
3647 | * |
3648 | * This is required for both inode re-read from disk and delayed inode | |
3649 | * in delayed_nodes_tree. | |
5dc562c5 | 3650 | */ |
0b246afa | 3651 | if (BTRFS_I(inode)->last_trans == fs_info->generation) |
5dc562c5 JB |
3652 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
3653 | &BTRFS_I(inode)->runtime_flags); | |
3654 | ||
bde6c242 FM |
3655 | /* |
3656 | * We don't persist the id of the transaction where an unlink operation | |
3657 | * against the inode was last made. So here we assume the inode might | |
3658 | * have been evicted, and therefore the exact value of last_unlink_trans | |
3659 | * lost, and set it to last_trans to avoid metadata inconsistencies | |
3660 | * between the inode and its parent if the inode is fsync'ed and the log | |
3661 | * replayed. For example, in the scenario: | |
3662 | * | |
3663 | * touch mydir/foo | |
3664 | * ln mydir/foo mydir/bar | |
3665 | * sync | |
3666 | * unlink mydir/bar | |
3667 | * echo 2 > /proc/sys/vm/drop_caches # evicts inode | |
3668 | * xfs_io -c fsync mydir/foo | |
3669 | * <power failure> | |
3670 | * mount fs, triggers fsync log replay | |
3671 | * | |
3672 | * We must make sure that when we fsync our inode foo we also log its | |
3673 | * parent inode, otherwise after log replay the parent still has the | |
3674 | * dentry with the "bar" name but our inode foo has a link count of 1 | |
3675 | * and doesn't have an inode ref with the name "bar" anymore. | |
3676 | * | |
3677 | * Setting last_unlink_trans to last_trans is a pessimistic approach, | |
01327610 | 3678 | * but it guarantees correctness at the expense of occasional full |
bde6c242 FM |
3679 | * transaction commits on fsync if our inode is a directory, or if our |
3680 | * inode is not a directory, logging its parent unnecessarily. | |
3681 | */ | |
3682 | BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; | |
3683 | ||
67de1176 MX |
3684 | path->slots[0]++; |
3685 | if (inode->i_nlink != 1 || | |
3686 | path->slots[0] >= btrfs_header_nritems(leaf)) | |
3687 | goto cache_acl; | |
3688 | ||
3689 | btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); | |
4a0cc7ca | 3690 | if (location.objectid != btrfs_ino(BTRFS_I(inode))) |
67de1176 MX |
3691 | goto cache_acl; |
3692 | ||
3693 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
3694 | if (location.type == BTRFS_INODE_REF_KEY) { | |
3695 | struct btrfs_inode_ref *ref; | |
3696 | ||
3697 | ref = (struct btrfs_inode_ref *)ptr; | |
3698 | BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); | |
3699 | } else if (location.type == BTRFS_INODE_EXTREF_KEY) { | |
3700 | struct btrfs_inode_extref *extref; | |
3701 | ||
3702 | extref = (struct btrfs_inode_extref *)ptr; | |
3703 | BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, | |
3704 | extref); | |
3705 | } | |
2f7e33d4 | 3706 | cache_acl: |
46a53cca CM |
3707 | /* |
3708 | * try to precache a NULL acl entry for files that don't have | |
3709 | * any xattrs or acls | |
3710 | */ | |
33345d01 | 3711 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], |
f85b7379 | 3712 | btrfs_ino(BTRFS_I(inode)), &first_xattr_slot); |
63541927 FDBM |
3713 | if (first_xattr_slot != -1) { |
3714 | path->slots[0] = first_xattr_slot; | |
3715 | ret = btrfs_load_inode_props(inode, path); | |
3716 | if (ret) | |
0b246afa | 3717 | btrfs_err(fs_info, |
351fd353 | 3718 | "error loading props for ino %llu (root %llu): %d", |
4a0cc7ca | 3719 | btrfs_ino(BTRFS_I(inode)), |
63541927 FDBM |
3720 | root->root_key.objectid, ret); |
3721 | } | |
3722 | btrfs_free_path(path); | |
3723 | ||
72c04902 AV |
3724 | if (!maybe_acls) |
3725 | cache_no_acl(inode); | |
46a53cca | 3726 | |
39279cc3 | 3727 | switch (inode->i_mode & S_IFMT) { |
39279cc3 CM |
3728 | case S_IFREG: |
3729 | inode->i_mapping->a_ops = &btrfs_aops; | |
d1310b2e | 3730 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
3731 | inode->i_fop = &btrfs_file_operations; |
3732 | inode->i_op = &btrfs_file_inode_operations; | |
3733 | break; | |
3734 | case S_IFDIR: | |
3735 | inode->i_fop = &btrfs_dir_file_operations; | |
67ade058 | 3736 | inode->i_op = &btrfs_dir_inode_operations; |
39279cc3 CM |
3737 | break; |
3738 | case S_IFLNK: | |
3739 | inode->i_op = &btrfs_symlink_inode_operations; | |
21fc61c7 | 3740 | inode_nohighmem(inode); |
39279cc3 CM |
3741 | inode->i_mapping->a_ops = &btrfs_symlink_aops; |
3742 | break; | |
618e21d5 | 3743 | default: |
0279b4cd | 3744 | inode->i_op = &btrfs_special_inode_operations; |
618e21d5 JB |
3745 | init_special_inode(inode, inode->i_mode, rdev); |
3746 | break; | |
39279cc3 | 3747 | } |
6cbff00f CH |
3748 | |
3749 | btrfs_update_iflags(inode); | |
67710892 | 3750 | return 0; |
39279cc3 CM |
3751 | |
3752 | make_bad: | |
39279cc3 | 3753 | btrfs_free_path(path); |
39279cc3 | 3754 | make_bad_inode(inode); |
67710892 | 3755 | return ret; |
39279cc3 CM |
3756 | } |
3757 | ||
d352ac68 CM |
3758 | /* |
3759 | * given a leaf and an inode, copy the inode fields into the leaf | |
3760 | */ | |
e02119d5 CM |
3761 | static void fill_inode_item(struct btrfs_trans_handle *trans, |
3762 | struct extent_buffer *leaf, | |
5f39d397 | 3763 | struct btrfs_inode_item *item, |
39279cc3 CM |
3764 | struct inode *inode) |
3765 | { | |
51fab693 LB |
3766 | struct btrfs_map_token token; |
3767 | ||
3768 | btrfs_init_map_token(&token); | |
5f39d397 | 3769 | |
51fab693 LB |
3770 | btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); |
3771 | btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); | |
3772 | btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, | |
3773 | &token); | |
3774 | btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); | |
3775 | btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); | |
5f39d397 | 3776 | |
a937b979 | 3777 | btrfs_set_token_timespec_sec(leaf, &item->atime, |
51fab693 | 3778 | inode->i_atime.tv_sec, &token); |
a937b979 | 3779 | btrfs_set_token_timespec_nsec(leaf, &item->atime, |
51fab693 | 3780 | inode->i_atime.tv_nsec, &token); |
5f39d397 | 3781 | |
a937b979 | 3782 | btrfs_set_token_timespec_sec(leaf, &item->mtime, |
51fab693 | 3783 | inode->i_mtime.tv_sec, &token); |
a937b979 | 3784 | btrfs_set_token_timespec_nsec(leaf, &item->mtime, |
51fab693 | 3785 | inode->i_mtime.tv_nsec, &token); |
5f39d397 | 3786 | |
a937b979 | 3787 | btrfs_set_token_timespec_sec(leaf, &item->ctime, |
51fab693 | 3788 | inode->i_ctime.tv_sec, &token); |
a937b979 | 3789 | btrfs_set_token_timespec_nsec(leaf, &item->ctime, |
51fab693 | 3790 | inode->i_ctime.tv_nsec, &token); |
5f39d397 | 3791 | |
9cc97d64 | 3792 | btrfs_set_token_timespec_sec(leaf, &item->otime, |
3793 | BTRFS_I(inode)->i_otime.tv_sec, &token); | |
3794 | btrfs_set_token_timespec_nsec(leaf, &item->otime, | |
3795 | BTRFS_I(inode)->i_otime.tv_nsec, &token); | |
3796 | ||
51fab693 LB |
3797 | btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), |
3798 | &token); | |
3799 | btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, | |
3800 | &token); | |
3801 | btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); | |
3802 | btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); | |
3803 | btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); | |
3804 | btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); | |
3805 | btrfs_set_token_inode_block_group(leaf, item, 0, &token); | |
39279cc3 CM |
3806 | } |
3807 | ||
d352ac68 CM |
3808 | /* |
3809 | * copy everything in the in-memory inode into the btree. | |
3810 | */ | |
2115133f | 3811 | static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, |
d397712b | 3812 | struct btrfs_root *root, struct inode *inode) |
39279cc3 CM |
3813 | { |
3814 | struct btrfs_inode_item *inode_item; | |
3815 | struct btrfs_path *path; | |
5f39d397 | 3816 | struct extent_buffer *leaf; |
39279cc3 CM |
3817 | int ret; |
3818 | ||
3819 | path = btrfs_alloc_path(); | |
16cdcec7 MX |
3820 | if (!path) |
3821 | return -ENOMEM; | |
3822 | ||
b9473439 | 3823 | path->leave_spinning = 1; |
16cdcec7 MX |
3824 | ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, |
3825 | 1); | |
39279cc3 CM |
3826 | if (ret) { |
3827 | if (ret > 0) | |
3828 | ret = -ENOENT; | |
3829 | goto failed; | |
3830 | } | |
3831 | ||
5f39d397 CM |
3832 | leaf = path->nodes[0]; |
3833 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
16cdcec7 | 3834 | struct btrfs_inode_item); |
39279cc3 | 3835 | |
e02119d5 | 3836 | fill_inode_item(trans, leaf, inode_item, inode); |
5f39d397 | 3837 | btrfs_mark_buffer_dirty(leaf); |
15ee9bc7 | 3838 | btrfs_set_inode_last_trans(trans, inode); |
39279cc3 CM |
3839 | ret = 0; |
3840 | failed: | |
39279cc3 CM |
3841 | btrfs_free_path(path); |
3842 | return ret; | |
3843 | } | |
3844 | ||
2115133f CM |
3845 | /* |
3846 | * copy everything in the in-memory inode into the btree. | |
3847 | */ | |
3848 | noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, | |
3849 | struct btrfs_root *root, struct inode *inode) | |
3850 | { | |
0b246afa | 3851 | struct btrfs_fs_info *fs_info = root->fs_info; |
2115133f CM |
3852 | int ret; |
3853 | ||
3854 | /* | |
3855 | * If the inode is a free space inode, we can deadlock during commit | |
3856 | * if we put it into the delayed code. | |
3857 | * | |
3858 | * The data relocation inode should also be directly updated | |
3859 | * without delay | |
3860 | */ | |
70ddc553 | 3861 | if (!btrfs_is_free_space_inode(BTRFS_I(inode)) |
1d52c78a | 3862 | && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
0b246afa | 3863 | && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { |
8ea05e3a AB |
3864 | btrfs_update_root_times(trans, root); |
3865 | ||
2115133f CM |
3866 | ret = btrfs_delayed_update_inode(trans, root, inode); |
3867 | if (!ret) | |
3868 | btrfs_set_inode_last_trans(trans, inode); | |
3869 | return ret; | |
3870 | } | |
3871 | ||
3872 | return btrfs_update_inode_item(trans, root, inode); | |
3873 | } | |
3874 | ||
be6aef60 JB |
3875 | noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, |
3876 | struct btrfs_root *root, | |
3877 | struct inode *inode) | |
2115133f CM |
3878 | { |
3879 | int ret; | |
3880 | ||
3881 | ret = btrfs_update_inode(trans, root, inode); | |
3882 | if (ret == -ENOSPC) | |
3883 | return btrfs_update_inode_item(trans, root, inode); | |
3884 | return ret; | |
3885 | } | |
3886 | ||
d352ac68 CM |
3887 | /* |
3888 | * unlink helper that gets used here in inode.c and in the tree logging | |
3889 | * recovery code. It remove a link in a directory with a given name, and | |
3890 | * also drops the back refs in the inode to the directory | |
3891 | */ | |
92986796 AV |
3892 | static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
3893 | struct btrfs_root *root, | |
4ec5934e NB |
3894 | struct btrfs_inode *dir, |
3895 | struct btrfs_inode *inode, | |
92986796 | 3896 | const char *name, int name_len) |
39279cc3 | 3897 | { |
0b246afa | 3898 | struct btrfs_fs_info *fs_info = root->fs_info; |
39279cc3 | 3899 | struct btrfs_path *path; |
39279cc3 | 3900 | int ret = 0; |
5f39d397 | 3901 | struct extent_buffer *leaf; |
39279cc3 | 3902 | struct btrfs_dir_item *di; |
5f39d397 | 3903 | struct btrfs_key key; |
aec7477b | 3904 | u64 index; |
4ec5934e NB |
3905 | u64 ino = btrfs_ino(inode); |
3906 | u64 dir_ino = btrfs_ino(dir); | |
39279cc3 CM |
3907 | |
3908 | path = btrfs_alloc_path(); | |
54aa1f4d CM |
3909 | if (!path) { |
3910 | ret = -ENOMEM; | |
554233a6 | 3911 | goto out; |
54aa1f4d CM |
3912 | } |
3913 | ||
b9473439 | 3914 | path->leave_spinning = 1; |
33345d01 | 3915 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
39279cc3 CM |
3916 | name, name_len, -1); |
3917 | if (IS_ERR(di)) { | |
3918 | ret = PTR_ERR(di); | |
3919 | goto err; | |
3920 | } | |
3921 | if (!di) { | |
3922 | ret = -ENOENT; | |
3923 | goto err; | |
3924 | } | |
5f39d397 CM |
3925 | leaf = path->nodes[0]; |
3926 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
39279cc3 | 3927 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
54aa1f4d CM |
3928 | if (ret) |
3929 | goto err; | |
b3b4aa74 | 3930 | btrfs_release_path(path); |
39279cc3 | 3931 | |
67de1176 MX |
3932 | /* |
3933 | * If we don't have dir index, we have to get it by looking up | |
3934 | * the inode ref, since we get the inode ref, remove it directly, | |
3935 | * it is unnecessary to do delayed deletion. | |
3936 | * | |
3937 | * But if we have dir index, needn't search inode ref to get it. | |
3938 | * Since the inode ref is close to the inode item, it is better | |
3939 | * that we delay to delete it, and just do this deletion when | |
3940 | * we update the inode item. | |
3941 | */ | |
4ec5934e NB |
3942 | if (inode->dir_index) { |
3943 | ret = btrfs_delayed_delete_inode_ref(inode); | |
67de1176 | 3944 | if (!ret) { |
4ec5934e | 3945 | index = inode->dir_index; |
67de1176 MX |
3946 | goto skip_backref; |
3947 | } | |
3948 | } | |
3949 | ||
33345d01 LZ |
3950 | ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, |
3951 | dir_ino, &index); | |
aec7477b | 3952 | if (ret) { |
0b246afa | 3953 | btrfs_info(fs_info, |
c2cf52eb | 3954 | "failed to delete reference to %.*s, inode %llu parent %llu", |
c1c9ff7c | 3955 | name_len, name, ino, dir_ino); |
66642832 | 3956 | btrfs_abort_transaction(trans, ret); |
aec7477b JB |
3957 | goto err; |
3958 | } | |
67de1176 | 3959 | skip_backref: |
4ec5934e | 3960 | ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index); |
79787eaa | 3961 | if (ret) { |
66642832 | 3962 | btrfs_abort_transaction(trans, ret); |
39279cc3 | 3963 | goto err; |
79787eaa | 3964 | } |
39279cc3 | 3965 | |
4ec5934e NB |
3966 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, |
3967 | dir_ino); | |
79787eaa | 3968 | if (ret != 0 && ret != -ENOENT) { |
66642832 | 3969 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
3970 | goto err; |
3971 | } | |
e02119d5 | 3972 | |
4ec5934e NB |
3973 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir, |
3974 | index); | |
6418c961 CM |
3975 | if (ret == -ENOENT) |
3976 | ret = 0; | |
d4e3991b | 3977 | else if (ret) |
66642832 | 3978 | btrfs_abort_transaction(trans, ret); |
39279cc3 CM |
3979 | err: |
3980 | btrfs_free_path(path); | |
e02119d5 CM |
3981 | if (ret) |
3982 | goto out; | |
3983 | ||
6ef06d27 | 3984 | btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); |
4ec5934e NB |
3985 | inode_inc_iversion(&inode->vfs_inode); |
3986 | inode_inc_iversion(&dir->vfs_inode); | |
3987 | inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = | |
3988 | dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); | |
3989 | ret = btrfs_update_inode(trans, root, &dir->vfs_inode); | |
e02119d5 | 3990 | out: |
39279cc3 CM |
3991 | return ret; |
3992 | } | |
3993 | ||
92986796 AV |
3994 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
3995 | struct btrfs_root *root, | |
4ec5934e | 3996 | struct btrfs_inode *dir, struct btrfs_inode *inode, |
92986796 AV |
3997 | const char *name, int name_len) |
3998 | { | |
3999 | int ret; | |
4000 | ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); | |
4001 | if (!ret) { | |
4ec5934e NB |
4002 | drop_nlink(&inode->vfs_inode); |
4003 | ret = btrfs_update_inode(trans, root, &inode->vfs_inode); | |
92986796 AV |
4004 | } |
4005 | return ret; | |
4006 | } | |
39279cc3 | 4007 | |
a22285a6 YZ |
4008 | /* |
4009 | * helper to start transaction for unlink and rmdir. | |
4010 | * | |
d52be818 JB |
4011 | * unlink and rmdir are special in btrfs, they do not always free space, so |
4012 | * if we cannot make our reservations the normal way try and see if there is | |
4013 | * plenty of slack room in the global reserve to migrate, otherwise we cannot | |
4014 | * allow the unlink to occur. | |
a22285a6 | 4015 | */ |
d52be818 | 4016 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) |
4df27c4d | 4017 | { |
a22285a6 | 4018 | struct btrfs_root *root = BTRFS_I(dir)->root; |
4df27c4d | 4019 | |
e70bea5f JB |
4020 | /* |
4021 | * 1 for the possible orphan item | |
4022 | * 1 for the dir item | |
4023 | * 1 for the dir index | |
4024 | * 1 for the inode ref | |
e70bea5f JB |
4025 | * 1 for the inode |
4026 | */ | |
8eab77ff | 4027 | return btrfs_start_transaction_fallback_global_rsv(root, 5, 5); |
a22285a6 YZ |
4028 | } |
4029 | ||
4030 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | |
4031 | { | |
4032 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
4033 | struct btrfs_trans_handle *trans; | |
2b0143b5 | 4034 | struct inode *inode = d_inode(dentry); |
a22285a6 | 4035 | int ret; |
a22285a6 | 4036 | |
d52be818 | 4037 | trans = __unlink_start_trans(dir); |
a22285a6 YZ |
4038 | if (IS_ERR(trans)) |
4039 | return PTR_ERR(trans); | |
5f39d397 | 4040 | |
4ec5934e NB |
4041 | btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)), |
4042 | 0); | |
12fcfd22 | 4043 | |
4ec5934e NB |
4044 | ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), |
4045 | BTRFS_I(d_inode(dentry)), dentry->d_name.name, | |
4046 | dentry->d_name.len); | |
b532402e TI |
4047 | if (ret) |
4048 | goto out; | |
7b128766 | 4049 | |
a22285a6 | 4050 | if (inode->i_nlink == 0) { |
73f2e545 | 4051 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); |
b532402e TI |
4052 | if (ret) |
4053 | goto out; | |
a22285a6 | 4054 | } |
7b128766 | 4055 | |
b532402e | 4056 | out: |
3a45bb20 | 4057 | btrfs_end_transaction(trans); |
2ff7e61e | 4058 | btrfs_btree_balance_dirty(root->fs_info); |
39279cc3 CM |
4059 | return ret; |
4060 | } | |
4061 | ||
4df27c4d YZ |
4062 | int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, |
4063 | struct btrfs_root *root, | |
4064 | struct inode *dir, u64 objectid, | |
4065 | const char *name, int name_len) | |
4066 | { | |
0b246afa | 4067 | struct btrfs_fs_info *fs_info = root->fs_info; |
4df27c4d YZ |
4068 | struct btrfs_path *path; |
4069 | struct extent_buffer *leaf; | |
4070 | struct btrfs_dir_item *di; | |
4071 | struct btrfs_key key; | |
4072 | u64 index; | |
4073 | int ret; | |
4a0cc7ca | 4074 | u64 dir_ino = btrfs_ino(BTRFS_I(dir)); |
4df27c4d YZ |
4075 | |
4076 | path = btrfs_alloc_path(); | |
4077 | if (!path) | |
4078 | return -ENOMEM; | |
4079 | ||
33345d01 | 4080 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
4df27c4d | 4081 | name, name_len, -1); |
79787eaa JM |
4082 | if (IS_ERR_OR_NULL(di)) { |
4083 | if (!di) | |
4084 | ret = -ENOENT; | |
4085 | else | |
4086 | ret = PTR_ERR(di); | |
4087 | goto out; | |
4088 | } | |
4df27c4d YZ |
4089 | |
4090 | leaf = path->nodes[0]; | |
4091 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
4092 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | |
4093 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
79787eaa | 4094 | if (ret) { |
66642832 | 4095 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
4096 | goto out; |
4097 | } | |
b3b4aa74 | 4098 | btrfs_release_path(path); |
4df27c4d | 4099 | |
0b246afa JM |
4100 | ret = btrfs_del_root_ref(trans, fs_info, objectid, |
4101 | root->root_key.objectid, dir_ino, | |
4102 | &index, name, name_len); | |
4df27c4d | 4103 | if (ret < 0) { |
79787eaa | 4104 | if (ret != -ENOENT) { |
66642832 | 4105 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
4106 | goto out; |
4107 | } | |
33345d01 | 4108 | di = btrfs_search_dir_index_item(root, path, dir_ino, |
4df27c4d | 4109 | name, name_len); |
79787eaa JM |
4110 | if (IS_ERR_OR_NULL(di)) { |
4111 | if (!di) | |
4112 | ret = -ENOENT; | |
4113 | else | |
4114 | ret = PTR_ERR(di); | |
66642832 | 4115 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
4116 | goto out; |
4117 | } | |
4df27c4d YZ |
4118 | |
4119 | leaf = path->nodes[0]; | |
4120 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
b3b4aa74 | 4121 | btrfs_release_path(path); |
4df27c4d YZ |
4122 | index = key.offset; |
4123 | } | |
945d8962 | 4124 | btrfs_release_path(path); |
4df27c4d | 4125 | |
e67bbbb9 | 4126 | ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index); |
79787eaa | 4127 | if (ret) { |
66642832 | 4128 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
4129 | goto out; |
4130 | } | |
4df27c4d | 4131 | |
6ef06d27 | 4132 | btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); |
0c4d2d95 | 4133 | inode_inc_iversion(dir); |
c2050a45 | 4134 | dir->i_mtime = dir->i_ctime = current_time(dir); |
5a24e84c | 4135 | ret = btrfs_update_inode_fallback(trans, root, dir); |
79787eaa | 4136 | if (ret) |
66642832 | 4137 | btrfs_abort_transaction(trans, ret); |
79787eaa | 4138 | out: |
71d7aed0 | 4139 | btrfs_free_path(path); |
79787eaa | 4140 | return ret; |
4df27c4d YZ |
4141 | } |
4142 | ||
39279cc3 CM |
4143 | static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) |
4144 | { | |
2b0143b5 | 4145 | struct inode *inode = d_inode(dentry); |
1832a6d5 | 4146 | int err = 0; |
39279cc3 | 4147 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 | 4148 | struct btrfs_trans_handle *trans; |
44f714da | 4149 | u64 last_unlink_trans; |
39279cc3 | 4150 | |
b3ae244e | 4151 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) |
134d4512 | 4152 | return -ENOTEMPTY; |
4a0cc7ca | 4153 | if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) |
b3ae244e | 4154 | return -EPERM; |
134d4512 | 4155 | |
d52be818 | 4156 | trans = __unlink_start_trans(dir); |
a22285a6 | 4157 | if (IS_ERR(trans)) |
5df6a9f6 | 4158 | return PTR_ERR(trans); |
5df6a9f6 | 4159 | |
4a0cc7ca | 4160 | if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
4df27c4d YZ |
4161 | err = btrfs_unlink_subvol(trans, root, dir, |
4162 | BTRFS_I(inode)->location.objectid, | |
4163 | dentry->d_name.name, | |
4164 | dentry->d_name.len); | |
4165 | goto out; | |
4166 | } | |
4167 | ||
73f2e545 | 4168 | err = btrfs_orphan_add(trans, BTRFS_I(inode)); |
7b128766 | 4169 | if (err) |
4df27c4d | 4170 | goto out; |
7b128766 | 4171 | |
44f714da FM |
4172 | last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; |
4173 | ||
39279cc3 | 4174 | /* now the directory is empty */ |
4ec5934e NB |
4175 | err = btrfs_unlink_inode(trans, root, BTRFS_I(dir), |
4176 | BTRFS_I(d_inode(dentry)), dentry->d_name.name, | |
4177 | dentry->d_name.len); | |
44f714da | 4178 | if (!err) { |
6ef06d27 | 4179 | btrfs_i_size_write(BTRFS_I(inode), 0); |
44f714da FM |
4180 | /* |
4181 | * Propagate the last_unlink_trans value of the deleted dir to | |
4182 | * its parent directory. This is to prevent an unrecoverable | |
4183 | * log tree in the case we do something like this: | |
4184 | * 1) create dir foo | |
4185 | * 2) create snapshot under dir foo | |
4186 | * 3) delete the snapshot | |
4187 | * 4) rmdir foo | |
4188 | * 5) mkdir foo | |
4189 | * 6) fsync foo or some file inside foo | |
4190 | */ | |
4191 | if (last_unlink_trans >= trans->transid) | |
4192 | BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; | |
4193 | } | |
4df27c4d | 4194 | out: |
3a45bb20 | 4195 | btrfs_end_transaction(trans); |
2ff7e61e | 4196 | btrfs_btree_balance_dirty(root->fs_info); |
3954401f | 4197 | |
39279cc3 CM |
4198 | return err; |
4199 | } | |
4200 | ||
28f75a0e CM |
4201 | static int truncate_space_check(struct btrfs_trans_handle *trans, |
4202 | struct btrfs_root *root, | |
4203 | u64 bytes_deleted) | |
4204 | { | |
0b246afa | 4205 | struct btrfs_fs_info *fs_info = root->fs_info; |
28f75a0e CM |
4206 | int ret; |
4207 | ||
dc95f7bf JB |
4208 | /* |
4209 | * This is only used to apply pressure to the enospc system, we don't | |
4210 | * intend to use this reservation at all. | |
4211 | */ | |
2ff7e61e | 4212 | bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted); |
0b246afa JM |
4213 | bytes_deleted *= fs_info->nodesize; |
4214 | ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv, | |
28f75a0e | 4215 | bytes_deleted, BTRFS_RESERVE_NO_FLUSH); |
dc95f7bf | 4216 | if (!ret) { |
0b246afa | 4217 | trace_btrfs_space_reservation(fs_info, "transaction", |
dc95f7bf JB |
4218 | trans->transid, |
4219 | bytes_deleted, 1); | |
28f75a0e | 4220 | trans->bytes_reserved += bytes_deleted; |
dc95f7bf | 4221 | } |
28f75a0e CM |
4222 | return ret; |
4223 | ||
4224 | } | |
4225 | ||
0305cd5f FM |
4226 | static int truncate_inline_extent(struct inode *inode, |
4227 | struct btrfs_path *path, | |
4228 | struct btrfs_key *found_key, | |
4229 | const u64 item_end, | |
4230 | const u64 new_size) | |
4231 | { | |
4232 | struct extent_buffer *leaf = path->nodes[0]; | |
4233 | int slot = path->slots[0]; | |
4234 | struct btrfs_file_extent_item *fi; | |
4235 | u32 size = (u32)(new_size - found_key->offset); | |
4236 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4237 | ||
4238 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
4239 | ||
4240 | if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { | |
4241 | loff_t offset = new_size; | |
09cbfeaf | 4242 | loff_t page_end = ALIGN(offset, PAGE_SIZE); |
0305cd5f FM |
4243 | |
4244 | /* | |
4245 | * Zero out the remaining of the last page of our inline extent, | |
4246 | * instead of directly truncating our inline extent here - that | |
4247 | * would be much more complex (decompressing all the data, then | |
4248 | * compressing the truncated data, which might be bigger than | |
4249 | * the size of the inline extent, resize the extent, etc). | |
4250 | * We release the path because to get the page we might need to | |
4251 | * read the extent item from disk (data not in the page cache). | |
4252 | */ | |
4253 | btrfs_release_path(path); | |
9703fefe CR |
4254 | return btrfs_truncate_block(inode, offset, page_end - offset, |
4255 | 0); | |
0305cd5f FM |
4256 | } |
4257 | ||
4258 | btrfs_set_file_extent_ram_bytes(leaf, fi, size); | |
4259 | size = btrfs_file_extent_calc_inline_size(size); | |
2ff7e61e | 4260 | btrfs_truncate_item(root->fs_info, path, size, 1); |
0305cd5f FM |
4261 | |
4262 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) | |
4263 | inode_sub_bytes(inode, item_end + 1 - new_size); | |
4264 | ||
4265 | return 0; | |
4266 | } | |
4267 | ||
39279cc3 CM |
4268 | /* |
4269 | * this can truncate away extent items, csum items and directory items. | |
4270 | * It starts at a high offset and removes keys until it can't find | |
d352ac68 | 4271 | * any higher than new_size |
39279cc3 CM |
4272 | * |
4273 | * csum items that cross the new i_size are truncated to the new size | |
4274 | * as well. | |
7b128766 JB |
4275 | * |
4276 | * min_type is the minimum key type to truncate down to. If set to 0, this | |
4277 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | |
39279cc3 | 4278 | */ |
8082510e YZ |
4279 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
4280 | struct btrfs_root *root, | |
4281 | struct inode *inode, | |
4282 | u64 new_size, u32 min_type) | |
39279cc3 | 4283 | { |
0b246afa | 4284 | struct btrfs_fs_info *fs_info = root->fs_info; |
39279cc3 | 4285 | struct btrfs_path *path; |
5f39d397 | 4286 | struct extent_buffer *leaf; |
39279cc3 | 4287 | struct btrfs_file_extent_item *fi; |
8082510e YZ |
4288 | struct btrfs_key key; |
4289 | struct btrfs_key found_key; | |
39279cc3 | 4290 | u64 extent_start = 0; |
db94535d | 4291 | u64 extent_num_bytes = 0; |
5d4f98a2 | 4292 | u64 extent_offset = 0; |
39279cc3 | 4293 | u64 item_end = 0; |
c1aa4575 | 4294 | u64 last_size = new_size; |
8082510e | 4295 | u32 found_type = (u8)-1; |
39279cc3 CM |
4296 | int found_extent; |
4297 | int del_item; | |
85e21bac CM |
4298 | int pending_del_nr = 0; |
4299 | int pending_del_slot = 0; | |
179e29e4 | 4300 | int extent_type = -1; |
8082510e YZ |
4301 | int ret; |
4302 | int err = 0; | |
4a0cc7ca | 4303 | u64 ino = btrfs_ino(BTRFS_I(inode)); |
28ed1345 | 4304 | u64 bytes_deleted = 0; |
1262133b JB |
4305 | bool be_nice = 0; |
4306 | bool should_throttle = 0; | |
28f75a0e | 4307 | bool should_end = 0; |
8082510e YZ |
4308 | |
4309 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | |
39279cc3 | 4310 | |
28ed1345 CM |
4311 | /* |
4312 | * for non-free space inodes and ref cows, we want to back off from | |
4313 | * time to time | |
4314 | */ | |
70ddc553 | 4315 | if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && |
28ed1345 CM |
4316 | test_bit(BTRFS_ROOT_REF_COWS, &root->state)) |
4317 | be_nice = 1; | |
4318 | ||
0eb0e19c MF |
4319 | path = btrfs_alloc_path(); |
4320 | if (!path) | |
4321 | return -ENOMEM; | |
e4058b54 | 4322 | path->reada = READA_BACK; |
0eb0e19c | 4323 | |
5dc562c5 JB |
4324 | /* |
4325 | * We want to drop from the next block forward in case this new size is | |
4326 | * not block aligned since we will be keeping the last block of the | |
4327 | * extent just the way it is. | |
4328 | */ | |
27cdeb70 | 4329 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || |
0b246afa | 4330 | root == fs_info->tree_root) |
dcdbc059 | 4331 | btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, |
0b246afa | 4332 | fs_info->sectorsize), |
da17066c | 4333 | (u64)-1, 0); |
8082510e | 4334 | |
16cdcec7 MX |
4335 | /* |
4336 | * This function is also used to drop the items in the log tree before | |
4337 | * we relog the inode, so if root != BTRFS_I(inode)->root, it means | |
4338 | * it is used to drop the loged items. So we shouldn't kill the delayed | |
4339 | * items. | |
4340 | */ | |
4341 | if (min_type == 0 && root == BTRFS_I(inode)->root) | |
4ccb5c72 | 4342 | btrfs_kill_delayed_inode_items(BTRFS_I(inode)); |
16cdcec7 | 4343 | |
33345d01 | 4344 | key.objectid = ino; |
39279cc3 | 4345 | key.offset = (u64)-1; |
5f39d397 CM |
4346 | key.type = (u8)-1; |
4347 | ||
85e21bac | 4348 | search_again: |
28ed1345 CM |
4349 | /* |
4350 | * with a 16K leaf size and 128MB extents, you can actually queue | |
4351 | * up a huge file in a single leaf. Most of the time that | |
4352 | * bytes_deleted is > 0, it will be huge by the time we get here | |
4353 | */ | |
ee22184b | 4354 | if (be_nice && bytes_deleted > SZ_32M) { |
3a45bb20 | 4355 | if (btrfs_should_end_transaction(trans)) { |
28ed1345 CM |
4356 | err = -EAGAIN; |
4357 | goto error; | |
4358 | } | |
4359 | } | |
4360 | ||
4361 | ||
b9473439 | 4362 | path->leave_spinning = 1; |
85e21bac | 4363 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
8082510e YZ |
4364 | if (ret < 0) { |
4365 | err = ret; | |
4366 | goto out; | |
4367 | } | |
d397712b | 4368 | |
85e21bac | 4369 | if (ret > 0) { |
e02119d5 CM |
4370 | /* there are no items in the tree for us to truncate, we're |
4371 | * done | |
4372 | */ | |
8082510e YZ |
4373 | if (path->slots[0] == 0) |
4374 | goto out; | |
85e21bac CM |
4375 | path->slots[0]--; |
4376 | } | |
4377 | ||
d397712b | 4378 | while (1) { |
39279cc3 | 4379 | fi = NULL; |
5f39d397 CM |
4380 | leaf = path->nodes[0]; |
4381 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
962a298f | 4382 | found_type = found_key.type; |
39279cc3 | 4383 | |
33345d01 | 4384 | if (found_key.objectid != ino) |
39279cc3 | 4385 | break; |
5f39d397 | 4386 | |
85e21bac | 4387 | if (found_type < min_type) |
39279cc3 CM |
4388 | break; |
4389 | ||
5f39d397 | 4390 | item_end = found_key.offset; |
39279cc3 | 4391 | if (found_type == BTRFS_EXTENT_DATA_KEY) { |
5f39d397 | 4392 | fi = btrfs_item_ptr(leaf, path->slots[0], |
39279cc3 | 4393 | struct btrfs_file_extent_item); |
179e29e4 CM |
4394 | extent_type = btrfs_file_extent_type(leaf, fi); |
4395 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 4396 | item_end += |
db94535d | 4397 | btrfs_file_extent_num_bytes(leaf, fi); |
179e29e4 | 4398 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
179e29e4 | 4399 | item_end += btrfs_file_extent_inline_len(leaf, |
514ac8ad | 4400 | path->slots[0], fi); |
39279cc3 | 4401 | } |
008630c1 | 4402 | item_end--; |
39279cc3 | 4403 | } |
8082510e YZ |
4404 | if (found_type > min_type) { |
4405 | del_item = 1; | |
4406 | } else { | |
91298eec LB |
4407 | if (item_end < new_size) { |
4408 | /* | |
4409 | * With NO_HOLES mode, for the following mapping | |
4410 | * | |
4411 | * [0-4k][hole][8k-12k] | |
4412 | * | |
4413 | * if truncating isize down to 6k, it ends up | |
4414 | * isize being 8k. | |
4415 | */ | |
4416 | if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) | |
4417 | last_size = new_size; | |
b888db2b | 4418 | break; |
91298eec | 4419 | } |
8082510e YZ |
4420 | if (found_key.offset >= new_size) |
4421 | del_item = 1; | |
4422 | else | |
4423 | del_item = 0; | |
39279cc3 | 4424 | } |
39279cc3 | 4425 | found_extent = 0; |
39279cc3 | 4426 | /* FIXME, shrink the extent if the ref count is only 1 */ |
179e29e4 CM |
4427 | if (found_type != BTRFS_EXTENT_DATA_KEY) |
4428 | goto delete; | |
4429 | ||
7f4f6e0a JB |
4430 | if (del_item) |
4431 | last_size = found_key.offset; | |
4432 | else | |
4433 | last_size = new_size; | |
4434 | ||
179e29e4 | 4435 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { |
39279cc3 | 4436 | u64 num_dec; |
db94535d | 4437 | extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); |
f70a9a6b | 4438 | if (!del_item) { |
db94535d CM |
4439 | u64 orig_num_bytes = |
4440 | btrfs_file_extent_num_bytes(leaf, fi); | |
fda2832f QW |
4441 | extent_num_bytes = ALIGN(new_size - |
4442 | found_key.offset, | |
0b246afa | 4443 | fs_info->sectorsize); |
db94535d CM |
4444 | btrfs_set_file_extent_num_bytes(leaf, fi, |
4445 | extent_num_bytes); | |
4446 | num_dec = (orig_num_bytes - | |
9069218d | 4447 | extent_num_bytes); |
27cdeb70 MX |
4448 | if (test_bit(BTRFS_ROOT_REF_COWS, |
4449 | &root->state) && | |
4450 | extent_start != 0) | |
a76a3cd4 | 4451 | inode_sub_bytes(inode, num_dec); |
5f39d397 | 4452 | btrfs_mark_buffer_dirty(leaf); |
39279cc3 | 4453 | } else { |
db94535d CM |
4454 | extent_num_bytes = |
4455 | btrfs_file_extent_disk_num_bytes(leaf, | |
4456 | fi); | |
5d4f98a2 YZ |
4457 | extent_offset = found_key.offset - |
4458 | btrfs_file_extent_offset(leaf, fi); | |
4459 | ||
39279cc3 | 4460 | /* FIXME blocksize != 4096 */ |
9069218d | 4461 | num_dec = btrfs_file_extent_num_bytes(leaf, fi); |
39279cc3 CM |
4462 | if (extent_start != 0) { |
4463 | found_extent = 1; | |
27cdeb70 MX |
4464 | if (test_bit(BTRFS_ROOT_REF_COWS, |
4465 | &root->state)) | |
a76a3cd4 | 4466 | inode_sub_bytes(inode, num_dec); |
e02119d5 | 4467 | } |
39279cc3 | 4468 | } |
9069218d | 4469 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
c8b97818 CM |
4470 | /* |
4471 | * we can't truncate inline items that have had | |
4472 | * special encodings | |
4473 | */ | |
4474 | if (!del_item && | |
c8b97818 CM |
4475 | btrfs_file_extent_encryption(leaf, fi) == 0 && |
4476 | btrfs_file_extent_other_encoding(leaf, fi) == 0) { | |
514ac8ad CM |
4477 | |
4478 | /* | |
0305cd5f FM |
4479 | * Need to release path in order to truncate a |
4480 | * compressed extent. So delete any accumulated | |
4481 | * extent items so far. | |
514ac8ad | 4482 | */ |
0305cd5f FM |
4483 | if (btrfs_file_extent_compression(leaf, fi) != |
4484 | BTRFS_COMPRESS_NONE && pending_del_nr) { | |
4485 | err = btrfs_del_items(trans, root, path, | |
4486 | pending_del_slot, | |
4487 | pending_del_nr); | |
4488 | if (err) { | |
4489 | btrfs_abort_transaction(trans, | |
0305cd5f FM |
4490 | err); |
4491 | goto error; | |
4492 | } | |
4493 | pending_del_nr = 0; | |
4494 | } | |
4495 | ||
4496 | err = truncate_inline_extent(inode, path, | |
4497 | &found_key, | |
4498 | item_end, | |
4499 | new_size); | |
4500 | if (err) { | |
66642832 | 4501 | btrfs_abort_transaction(trans, err); |
0305cd5f FM |
4502 | goto error; |
4503 | } | |
27cdeb70 MX |
4504 | } else if (test_bit(BTRFS_ROOT_REF_COWS, |
4505 | &root->state)) { | |
0305cd5f | 4506 | inode_sub_bytes(inode, item_end + 1 - new_size); |
9069218d | 4507 | } |
39279cc3 | 4508 | } |
179e29e4 | 4509 | delete: |
39279cc3 | 4510 | if (del_item) { |
85e21bac CM |
4511 | if (!pending_del_nr) { |
4512 | /* no pending yet, add ourselves */ | |
4513 | pending_del_slot = path->slots[0]; | |
4514 | pending_del_nr = 1; | |
4515 | } else if (pending_del_nr && | |
4516 | path->slots[0] + 1 == pending_del_slot) { | |
4517 | /* hop on the pending chunk */ | |
4518 | pending_del_nr++; | |
4519 | pending_del_slot = path->slots[0]; | |
4520 | } else { | |
d397712b | 4521 | BUG(); |
85e21bac | 4522 | } |
39279cc3 CM |
4523 | } else { |
4524 | break; | |
4525 | } | |
28f75a0e CM |
4526 | should_throttle = 0; |
4527 | ||
27cdeb70 MX |
4528 | if (found_extent && |
4529 | (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || | |
0b246afa | 4530 | root == fs_info->tree_root)) { |
b9473439 | 4531 | btrfs_set_path_blocking(path); |
28ed1345 | 4532 | bytes_deleted += extent_num_bytes; |
2ff7e61e | 4533 | ret = btrfs_free_extent(trans, fs_info, extent_start, |
5d4f98a2 YZ |
4534 | extent_num_bytes, 0, |
4535 | btrfs_header_owner(leaf), | |
b06c4bf5 | 4536 | ino, extent_offset); |
39279cc3 | 4537 | BUG_ON(ret); |
2ff7e61e JM |
4538 | if (btrfs_should_throttle_delayed_refs(trans, fs_info)) |
4539 | btrfs_async_run_delayed_refs(fs_info, | |
dd4b857a WX |
4540 | trans->delayed_ref_updates * 2, |
4541 | trans->transid, 0); | |
28f75a0e CM |
4542 | if (be_nice) { |
4543 | if (truncate_space_check(trans, root, | |
4544 | extent_num_bytes)) { | |
4545 | should_end = 1; | |
4546 | } | |
4547 | if (btrfs_should_throttle_delayed_refs(trans, | |
2ff7e61e | 4548 | fs_info)) |
28f75a0e | 4549 | should_throttle = 1; |
28f75a0e | 4550 | } |
39279cc3 | 4551 | } |
85e21bac | 4552 | |
8082510e YZ |
4553 | if (found_type == BTRFS_INODE_ITEM_KEY) |
4554 | break; | |
4555 | ||
4556 | if (path->slots[0] == 0 || | |
1262133b | 4557 | path->slots[0] != pending_del_slot || |
28f75a0e | 4558 | should_throttle || should_end) { |
8082510e YZ |
4559 | if (pending_del_nr) { |
4560 | ret = btrfs_del_items(trans, root, path, | |
4561 | pending_del_slot, | |
4562 | pending_del_nr); | |
79787eaa | 4563 | if (ret) { |
66642832 | 4564 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
4565 | goto error; |
4566 | } | |
8082510e YZ |
4567 | pending_del_nr = 0; |
4568 | } | |
b3b4aa74 | 4569 | btrfs_release_path(path); |
28f75a0e | 4570 | if (should_throttle) { |
1262133b JB |
4571 | unsigned long updates = trans->delayed_ref_updates; |
4572 | if (updates) { | |
4573 | trans->delayed_ref_updates = 0; | |
2ff7e61e JM |
4574 | ret = btrfs_run_delayed_refs(trans, |
4575 | fs_info, | |
4576 | updates * 2); | |
1262133b JB |
4577 | if (ret && !err) |
4578 | err = ret; | |
4579 | } | |
4580 | } | |
28f75a0e CM |
4581 | /* |
4582 | * if we failed to refill our space rsv, bail out | |
4583 | * and let the transaction restart | |
4584 | */ | |
4585 | if (should_end) { | |
4586 | err = -EAGAIN; | |
4587 | goto error; | |
4588 | } | |
85e21bac | 4589 | goto search_again; |
8082510e YZ |
4590 | } else { |
4591 | path->slots[0]--; | |
85e21bac | 4592 | } |
39279cc3 | 4593 | } |
8082510e | 4594 | out: |
85e21bac CM |
4595 | if (pending_del_nr) { |
4596 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | |
4597 | pending_del_nr); | |
79787eaa | 4598 | if (ret) |
66642832 | 4599 | btrfs_abort_transaction(trans, ret); |
85e21bac | 4600 | } |
79787eaa | 4601 | error: |
c1aa4575 | 4602 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) |
7f4f6e0a | 4603 | btrfs_ordered_update_i_size(inode, last_size, NULL); |
28ed1345 | 4604 | |
39279cc3 | 4605 | btrfs_free_path(path); |
28ed1345 | 4606 | |
19fd2df5 LB |
4607 | if (err == 0) { |
4608 | /* only inline file may have last_size != new_size */ | |
4609 | if (new_size >= fs_info->sectorsize || | |
4610 | new_size > fs_info->max_inline) | |
4611 | ASSERT(last_size == new_size); | |
4612 | } | |
4613 | ||
ee22184b | 4614 | if (be_nice && bytes_deleted > SZ_32M) { |
28ed1345 CM |
4615 | unsigned long updates = trans->delayed_ref_updates; |
4616 | if (updates) { | |
4617 | trans->delayed_ref_updates = 0; | |
2ff7e61e JM |
4618 | ret = btrfs_run_delayed_refs(trans, fs_info, |
4619 | updates * 2); | |
28ed1345 CM |
4620 | if (ret && !err) |
4621 | err = ret; | |
4622 | } | |
4623 | } | |
8082510e | 4624 | return err; |
39279cc3 CM |
4625 | } |
4626 | ||
4627 | /* | |
9703fefe | 4628 | * btrfs_truncate_block - read, zero a chunk and write a block |
2aaa6655 JB |
4629 | * @inode - inode that we're zeroing |
4630 | * @from - the offset to start zeroing | |
4631 | * @len - the length to zero, 0 to zero the entire range respective to the | |
4632 | * offset | |
4633 | * @front - zero up to the offset instead of from the offset on | |
4634 | * | |
9703fefe | 4635 | * This will find the block for the "from" offset and cow the block and zero the |
2aaa6655 | 4636 | * part we want to zero. This is used with truncate and hole punching. |
39279cc3 | 4637 | */ |
9703fefe | 4638 | int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, |
2aaa6655 | 4639 | int front) |
39279cc3 | 4640 | { |
0b246afa | 4641 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
2aaa6655 | 4642 | struct address_space *mapping = inode->i_mapping; |
e6dcd2dc CM |
4643 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
4644 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 4645 | struct extent_state *cached_state = NULL; |
e6dcd2dc | 4646 | char *kaddr; |
0b246afa | 4647 | u32 blocksize = fs_info->sectorsize; |
09cbfeaf | 4648 | pgoff_t index = from >> PAGE_SHIFT; |
9703fefe | 4649 | unsigned offset = from & (blocksize - 1); |
39279cc3 | 4650 | struct page *page; |
3b16a4e3 | 4651 | gfp_t mask = btrfs_alloc_write_mask(mapping); |
39279cc3 | 4652 | int ret = 0; |
9703fefe CR |
4653 | u64 block_start; |
4654 | u64 block_end; | |
39279cc3 | 4655 | |
2aaa6655 JB |
4656 | if ((offset & (blocksize - 1)) == 0 && |
4657 | (!len || ((len & (blocksize - 1)) == 0))) | |
39279cc3 | 4658 | goto out; |
9703fefe | 4659 | |
7cf5b976 | 4660 | ret = btrfs_delalloc_reserve_space(inode, |
9703fefe | 4661 | round_down(from, blocksize), blocksize); |
5d5e103a JB |
4662 | if (ret) |
4663 | goto out; | |
39279cc3 | 4664 | |
211c17f5 | 4665 | again: |
3b16a4e3 | 4666 | page = find_or_create_page(mapping, index, mask); |
5d5e103a | 4667 | if (!page) { |
7cf5b976 | 4668 | btrfs_delalloc_release_space(inode, |
9703fefe CR |
4669 | round_down(from, blocksize), |
4670 | blocksize); | |
ac6a2b36 | 4671 | ret = -ENOMEM; |
39279cc3 | 4672 | goto out; |
5d5e103a | 4673 | } |
e6dcd2dc | 4674 | |
9703fefe CR |
4675 | block_start = round_down(from, blocksize); |
4676 | block_end = block_start + blocksize - 1; | |
e6dcd2dc | 4677 | |
39279cc3 | 4678 | if (!PageUptodate(page)) { |
9ebefb18 | 4679 | ret = btrfs_readpage(NULL, page); |
39279cc3 | 4680 | lock_page(page); |
211c17f5 CM |
4681 | if (page->mapping != mapping) { |
4682 | unlock_page(page); | |
09cbfeaf | 4683 | put_page(page); |
211c17f5 CM |
4684 | goto again; |
4685 | } | |
39279cc3 CM |
4686 | if (!PageUptodate(page)) { |
4687 | ret = -EIO; | |
89642229 | 4688 | goto out_unlock; |
39279cc3 CM |
4689 | } |
4690 | } | |
211c17f5 | 4691 | wait_on_page_writeback(page); |
e6dcd2dc | 4692 | |
9703fefe | 4693 | lock_extent_bits(io_tree, block_start, block_end, &cached_state); |
e6dcd2dc CM |
4694 | set_page_extent_mapped(page); |
4695 | ||
9703fefe | 4696 | ordered = btrfs_lookup_ordered_extent(inode, block_start); |
e6dcd2dc | 4697 | if (ordered) { |
9703fefe | 4698 | unlock_extent_cached(io_tree, block_start, block_end, |
2ac55d41 | 4699 | &cached_state, GFP_NOFS); |
e6dcd2dc | 4700 | unlock_page(page); |
09cbfeaf | 4701 | put_page(page); |
eb84ae03 | 4702 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
4703 | btrfs_put_ordered_extent(ordered); |
4704 | goto again; | |
4705 | } | |
4706 | ||
9703fefe | 4707 | clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, |
9e8a4a8b LB |
4708 | EXTENT_DIRTY | EXTENT_DELALLOC | |
4709 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
2ac55d41 | 4710 | 0, 0, &cached_state, GFP_NOFS); |
5d5e103a | 4711 | |
9703fefe | 4712 | ret = btrfs_set_extent_delalloc(inode, block_start, block_end, |
ba8b04c1 | 4713 | &cached_state, 0); |
9ed74f2d | 4714 | if (ret) { |
9703fefe | 4715 | unlock_extent_cached(io_tree, block_start, block_end, |
2ac55d41 | 4716 | &cached_state, GFP_NOFS); |
9ed74f2d JB |
4717 | goto out_unlock; |
4718 | } | |
4719 | ||
9703fefe | 4720 | if (offset != blocksize) { |
2aaa6655 | 4721 | if (!len) |
9703fefe | 4722 | len = blocksize - offset; |
e6dcd2dc | 4723 | kaddr = kmap(page); |
2aaa6655 | 4724 | if (front) |
9703fefe CR |
4725 | memset(kaddr + (block_start - page_offset(page)), |
4726 | 0, offset); | |
2aaa6655 | 4727 | else |
9703fefe CR |
4728 | memset(kaddr + (block_start - page_offset(page)) + offset, |
4729 | 0, len); | |
e6dcd2dc CM |
4730 | flush_dcache_page(page); |
4731 | kunmap(page); | |
4732 | } | |
247e743c | 4733 | ClearPageChecked(page); |
e6dcd2dc | 4734 | set_page_dirty(page); |
9703fefe | 4735 | unlock_extent_cached(io_tree, block_start, block_end, &cached_state, |
2ac55d41 | 4736 | GFP_NOFS); |
39279cc3 | 4737 | |
89642229 | 4738 | out_unlock: |
5d5e103a | 4739 | if (ret) |
9703fefe CR |
4740 | btrfs_delalloc_release_space(inode, block_start, |
4741 | blocksize); | |
39279cc3 | 4742 | unlock_page(page); |
09cbfeaf | 4743 | put_page(page); |
39279cc3 CM |
4744 | out: |
4745 | return ret; | |
4746 | } | |
4747 | ||
16e7549f JB |
4748 | static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode, |
4749 | u64 offset, u64 len) | |
4750 | { | |
0b246afa | 4751 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
16e7549f JB |
4752 | struct btrfs_trans_handle *trans; |
4753 | int ret; | |
4754 | ||
4755 | /* | |
4756 | * Still need to make sure the inode looks like it's been updated so | |
4757 | * that any holes get logged if we fsync. | |
4758 | */ | |
0b246afa JM |
4759 | if (btrfs_fs_incompat(fs_info, NO_HOLES)) { |
4760 | BTRFS_I(inode)->last_trans = fs_info->generation; | |
16e7549f JB |
4761 | BTRFS_I(inode)->last_sub_trans = root->log_transid; |
4762 | BTRFS_I(inode)->last_log_commit = root->last_log_commit; | |
4763 | return 0; | |
4764 | } | |
4765 | ||
4766 | /* | |
4767 | * 1 - for the one we're dropping | |
4768 | * 1 - for the one we're adding | |
4769 | * 1 - for updating the inode. | |
4770 | */ | |
4771 | trans = btrfs_start_transaction(root, 3); | |
4772 | if (IS_ERR(trans)) | |
4773 | return PTR_ERR(trans); | |
4774 | ||
4775 | ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1); | |
4776 | if (ret) { | |
66642832 | 4777 | btrfs_abort_transaction(trans, ret); |
3a45bb20 | 4778 | btrfs_end_transaction(trans); |
16e7549f JB |
4779 | return ret; |
4780 | } | |
4781 | ||
f85b7379 DS |
4782 | ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), |
4783 | offset, 0, 0, len, 0, len, 0, 0, 0); | |
16e7549f | 4784 | if (ret) |
66642832 | 4785 | btrfs_abort_transaction(trans, ret); |
16e7549f JB |
4786 | else |
4787 | btrfs_update_inode(trans, root, inode); | |
3a45bb20 | 4788 | btrfs_end_transaction(trans); |
16e7549f JB |
4789 | return ret; |
4790 | } | |
4791 | ||
695a0d0d JB |
4792 | /* |
4793 | * This function puts in dummy file extents for the area we're creating a hole | |
4794 | * for. So if we are truncating this file to a larger size we need to insert | |
4795 | * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for | |
4796 | * the range between oldsize and size | |
4797 | */ | |
a41ad394 | 4798 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) |
39279cc3 | 4799 | { |
0b246afa | 4800 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
9036c102 YZ |
4801 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4802 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
a22285a6 | 4803 | struct extent_map *em = NULL; |
2ac55d41 | 4804 | struct extent_state *cached_state = NULL; |
5dc562c5 | 4805 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
0b246afa JM |
4806 | u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); |
4807 | u64 block_end = ALIGN(size, fs_info->sectorsize); | |
9036c102 YZ |
4808 | u64 last_byte; |
4809 | u64 cur_offset; | |
4810 | u64 hole_size; | |
9ed74f2d | 4811 | int err = 0; |
39279cc3 | 4812 | |
a71754fc | 4813 | /* |
9703fefe CR |
4814 | * If our size started in the middle of a block we need to zero out the |
4815 | * rest of the block before we expand the i_size, otherwise we could | |
a71754fc JB |
4816 | * expose stale data. |
4817 | */ | |
9703fefe | 4818 | err = btrfs_truncate_block(inode, oldsize, 0, 0); |
a71754fc JB |
4819 | if (err) |
4820 | return err; | |
4821 | ||
9036c102 YZ |
4822 | if (size <= hole_start) |
4823 | return 0; | |
4824 | ||
9036c102 YZ |
4825 | while (1) { |
4826 | struct btrfs_ordered_extent *ordered; | |
fa7c1494 | 4827 | |
ff13db41 | 4828 | lock_extent_bits(io_tree, hole_start, block_end - 1, |
d0082371 | 4829 | &cached_state); |
a776c6fa | 4830 | ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start, |
fa7c1494 | 4831 | block_end - hole_start); |
9036c102 YZ |
4832 | if (!ordered) |
4833 | break; | |
2ac55d41 JB |
4834 | unlock_extent_cached(io_tree, hole_start, block_end - 1, |
4835 | &cached_state, GFP_NOFS); | |
fa7c1494 | 4836 | btrfs_start_ordered_extent(inode, ordered, 1); |
9036c102 YZ |
4837 | btrfs_put_ordered_extent(ordered); |
4838 | } | |
39279cc3 | 4839 | |
9036c102 YZ |
4840 | cur_offset = hole_start; |
4841 | while (1) { | |
fc4f21b1 | 4842 | em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, |
9036c102 | 4843 | block_end - cur_offset, 0); |
79787eaa JM |
4844 | if (IS_ERR(em)) { |
4845 | err = PTR_ERR(em); | |
f2767956 | 4846 | em = NULL; |
79787eaa JM |
4847 | break; |
4848 | } | |
9036c102 | 4849 | last_byte = min(extent_map_end(em), block_end); |
0b246afa | 4850 | last_byte = ALIGN(last_byte, fs_info->sectorsize); |
8082510e | 4851 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
5dc562c5 | 4852 | struct extent_map *hole_em; |
9036c102 | 4853 | hole_size = last_byte - cur_offset; |
9ed74f2d | 4854 | |
16e7549f JB |
4855 | err = maybe_insert_hole(root, inode, cur_offset, |
4856 | hole_size); | |
4857 | if (err) | |
3893e33b | 4858 | break; |
dcdbc059 | 4859 | btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, |
5dc562c5 JB |
4860 | cur_offset + hole_size - 1, 0); |
4861 | hole_em = alloc_extent_map(); | |
4862 | if (!hole_em) { | |
4863 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
4864 | &BTRFS_I(inode)->runtime_flags); | |
4865 | goto next; | |
4866 | } | |
4867 | hole_em->start = cur_offset; | |
4868 | hole_em->len = hole_size; | |
4869 | hole_em->orig_start = cur_offset; | |
8082510e | 4870 | |
5dc562c5 JB |
4871 | hole_em->block_start = EXTENT_MAP_HOLE; |
4872 | hole_em->block_len = 0; | |
b4939680 | 4873 | hole_em->orig_block_len = 0; |
cc95bef6 | 4874 | hole_em->ram_bytes = hole_size; |
0b246afa | 4875 | hole_em->bdev = fs_info->fs_devices->latest_bdev; |
5dc562c5 | 4876 | hole_em->compress_type = BTRFS_COMPRESS_NONE; |
0b246afa | 4877 | hole_em->generation = fs_info->generation; |
8082510e | 4878 | |
5dc562c5 JB |
4879 | while (1) { |
4880 | write_lock(&em_tree->lock); | |
09a2a8f9 | 4881 | err = add_extent_mapping(em_tree, hole_em, 1); |
5dc562c5 JB |
4882 | write_unlock(&em_tree->lock); |
4883 | if (err != -EEXIST) | |
4884 | break; | |
dcdbc059 NB |
4885 | btrfs_drop_extent_cache(BTRFS_I(inode), |
4886 | cur_offset, | |
5dc562c5 JB |
4887 | cur_offset + |
4888 | hole_size - 1, 0); | |
4889 | } | |
4890 | free_extent_map(hole_em); | |
9036c102 | 4891 | } |
16e7549f | 4892 | next: |
9036c102 | 4893 | free_extent_map(em); |
a22285a6 | 4894 | em = NULL; |
9036c102 | 4895 | cur_offset = last_byte; |
8082510e | 4896 | if (cur_offset >= block_end) |
9036c102 YZ |
4897 | break; |
4898 | } | |
a22285a6 | 4899 | free_extent_map(em); |
2ac55d41 JB |
4900 | unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, |
4901 | GFP_NOFS); | |
9036c102 YZ |
4902 | return err; |
4903 | } | |
39279cc3 | 4904 | |
3972f260 | 4905 | static int btrfs_setsize(struct inode *inode, struct iattr *attr) |
8082510e | 4906 | { |
f4a2f4c5 MX |
4907 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4908 | struct btrfs_trans_handle *trans; | |
a41ad394 | 4909 | loff_t oldsize = i_size_read(inode); |
3972f260 ES |
4910 | loff_t newsize = attr->ia_size; |
4911 | int mask = attr->ia_valid; | |
8082510e YZ |
4912 | int ret; |
4913 | ||
3972f260 ES |
4914 | /* |
4915 | * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a | |
4916 | * special case where we need to update the times despite not having | |
4917 | * these flags set. For all other operations the VFS set these flags | |
4918 | * explicitly if it wants a timestamp update. | |
4919 | */ | |
dff6efc3 CH |
4920 | if (newsize != oldsize) { |
4921 | inode_inc_iversion(inode); | |
4922 | if (!(mask & (ATTR_CTIME | ATTR_MTIME))) | |
4923 | inode->i_ctime = inode->i_mtime = | |
c2050a45 | 4924 | current_time(inode); |
dff6efc3 | 4925 | } |
3972f260 | 4926 | |
a41ad394 | 4927 | if (newsize > oldsize) { |
9ea24bbe FM |
4928 | /* |
4929 | * Don't do an expanding truncate while snapshoting is ongoing. | |
4930 | * This is to ensure the snapshot captures a fully consistent | |
4931 | * state of this file - if the snapshot captures this expanding | |
4932 | * truncation, it must capture all writes that happened before | |
4933 | * this truncation. | |
4934 | */ | |
0bc19f90 | 4935 | btrfs_wait_for_snapshot_creation(root); |
a41ad394 | 4936 | ret = btrfs_cont_expand(inode, oldsize, newsize); |
9ea24bbe FM |
4937 | if (ret) { |
4938 | btrfs_end_write_no_snapshoting(root); | |
8082510e | 4939 | return ret; |
9ea24bbe | 4940 | } |
8082510e | 4941 | |
f4a2f4c5 | 4942 | trans = btrfs_start_transaction(root, 1); |
9ea24bbe FM |
4943 | if (IS_ERR(trans)) { |
4944 | btrfs_end_write_no_snapshoting(root); | |
f4a2f4c5 | 4945 | return PTR_ERR(trans); |
9ea24bbe | 4946 | } |
f4a2f4c5 MX |
4947 | |
4948 | i_size_write(inode, newsize); | |
4949 | btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); | |
27772b68 | 4950 | pagecache_isize_extended(inode, oldsize, newsize); |
f4a2f4c5 | 4951 | ret = btrfs_update_inode(trans, root, inode); |
9ea24bbe | 4952 | btrfs_end_write_no_snapshoting(root); |
3a45bb20 | 4953 | btrfs_end_transaction(trans); |
a41ad394 | 4954 | } else { |
8082510e | 4955 | |
a41ad394 JB |
4956 | /* |
4957 | * We're truncating a file that used to have good data down to | |
4958 | * zero. Make sure it gets into the ordered flush list so that | |
4959 | * any new writes get down to disk quickly. | |
4960 | */ | |
4961 | if (newsize == 0) | |
72ac3c0d JB |
4962 | set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, |
4963 | &BTRFS_I(inode)->runtime_flags); | |
8082510e | 4964 | |
f3fe820c JB |
4965 | /* |
4966 | * 1 for the orphan item we're going to add | |
4967 | * 1 for the orphan item deletion. | |
4968 | */ | |
4969 | trans = btrfs_start_transaction(root, 2); | |
4970 | if (IS_ERR(trans)) | |
4971 | return PTR_ERR(trans); | |
4972 | ||
4973 | /* | |
4974 | * We need to do this in case we fail at _any_ point during the | |
4975 | * actual truncate. Once we do the truncate_setsize we could | |
4976 | * invalidate pages which forces any outstanding ordered io to | |
4977 | * be instantly completed which will give us extents that need | |
4978 | * to be truncated. If we fail to get an orphan inode down we | |
4979 | * could have left over extents that were never meant to live, | |
01327610 | 4980 | * so we need to guarantee from this point on that everything |
f3fe820c JB |
4981 | * will be consistent. |
4982 | */ | |
73f2e545 | 4983 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); |
3a45bb20 | 4984 | btrfs_end_transaction(trans); |
f3fe820c JB |
4985 | if (ret) |
4986 | return ret; | |
4987 | ||
a41ad394 JB |
4988 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ |
4989 | truncate_setsize(inode, newsize); | |
2e60a51e MX |
4990 | |
4991 | /* Disable nonlocked read DIO to avoid the end less truncate */ | |
abcefb1e | 4992 | btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); |
2e60a51e | 4993 | inode_dio_wait(inode); |
0b581701 | 4994 | btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); |
2e60a51e | 4995 | |
a41ad394 | 4996 | ret = btrfs_truncate(inode); |
7f4f6e0a JB |
4997 | if (ret && inode->i_nlink) { |
4998 | int err; | |
4999 | ||
19fd2df5 LB |
5000 | /* To get a stable disk_i_size */ |
5001 | err = btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
5002 | if (err) { | |
3d6ae7bb | 5003 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
19fd2df5 LB |
5004 | return err; |
5005 | } | |
5006 | ||
7f4f6e0a JB |
5007 | /* |
5008 | * failed to truncate, disk_i_size is only adjusted down | |
5009 | * as we remove extents, so it should represent the true | |
5010 | * size of the inode, so reset the in memory size and | |
5011 | * delete our orphan entry. | |
5012 | */ | |
5013 | trans = btrfs_join_transaction(root); | |
5014 | if (IS_ERR(trans)) { | |
3d6ae7bb | 5015 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
7f4f6e0a JB |
5016 | return ret; |
5017 | } | |
5018 | i_size_write(inode, BTRFS_I(inode)->disk_i_size); | |
3d6ae7bb | 5019 | err = btrfs_orphan_del(trans, BTRFS_I(inode)); |
7f4f6e0a | 5020 | if (err) |
66642832 | 5021 | btrfs_abort_transaction(trans, err); |
3a45bb20 | 5022 | btrfs_end_transaction(trans); |
7f4f6e0a | 5023 | } |
8082510e YZ |
5024 | } |
5025 | ||
a41ad394 | 5026 | return ret; |
8082510e YZ |
5027 | } |
5028 | ||
9036c102 YZ |
5029 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
5030 | { | |
2b0143b5 | 5031 | struct inode *inode = d_inode(dentry); |
b83cc969 | 5032 | struct btrfs_root *root = BTRFS_I(inode)->root; |
9036c102 | 5033 | int err; |
39279cc3 | 5034 | |
b83cc969 LZ |
5035 | if (btrfs_root_readonly(root)) |
5036 | return -EROFS; | |
5037 | ||
31051c85 | 5038 | err = setattr_prepare(dentry, attr); |
9036c102 YZ |
5039 | if (err) |
5040 | return err; | |
2bf5a725 | 5041 | |
5a3f23d5 | 5042 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
3972f260 | 5043 | err = btrfs_setsize(inode, attr); |
8082510e YZ |
5044 | if (err) |
5045 | return err; | |
39279cc3 | 5046 | } |
9036c102 | 5047 | |
1025774c CH |
5048 | if (attr->ia_valid) { |
5049 | setattr_copy(inode, attr); | |
0c4d2d95 | 5050 | inode_inc_iversion(inode); |
22c44fe6 | 5051 | err = btrfs_dirty_inode(inode); |
1025774c | 5052 | |
22c44fe6 | 5053 | if (!err && attr->ia_valid & ATTR_MODE) |
996a710d | 5054 | err = posix_acl_chmod(inode, inode->i_mode); |
1025774c | 5055 | } |
33268eaf | 5056 | |
39279cc3 CM |
5057 | return err; |
5058 | } | |
61295eb8 | 5059 | |
131e404a FDBM |
5060 | /* |
5061 | * While truncating the inode pages during eviction, we get the VFS calling | |
5062 | * btrfs_invalidatepage() against each page of the inode. This is slow because | |
5063 | * the calls to btrfs_invalidatepage() result in a huge amount of calls to | |
5064 | * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting | |
5065 | * extent_state structures over and over, wasting lots of time. | |
5066 | * | |
5067 | * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all | |
5068 | * those expensive operations on a per page basis and do only the ordered io | |
5069 | * finishing, while we release here the extent_map and extent_state structures, | |
5070 | * without the excessive merging and splitting. | |
5071 | */ | |
5072 | static void evict_inode_truncate_pages(struct inode *inode) | |
5073 | { | |
5074 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
5075 | struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; | |
5076 | struct rb_node *node; | |
5077 | ||
5078 | ASSERT(inode->i_state & I_FREEING); | |
91b0abe3 | 5079 | truncate_inode_pages_final(&inode->i_data); |
131e404a FDBM |
5080 | |
5081 | write_lock(&map_tree->lock); | |
5082 | while (!RB_EMPTY_ROOT(&map_tree->map)) { | |
5083 | struct extent_map *em; | |
5084 | ||
5085 | node = rb_first(&map_tree->map); | |
5086 | em = rb_entry(node, struct extent_map, rb_node); | |
180589ef WS |
5087 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
5088 | clear_bit(EXTENT_FLAG_LOGGING, &em->flags); | |
131e404a FDBM |
5089 | remove_extent_mapping(map_tree, em); |
5090 | free_extent_map(em); | |
7064dd5c FM |
5091 | if (need_resched()) { |
5092 | write_unlock(&map_tree->lock); | |
5093 | cond_resched(); | |
5094 | write_lock(&map_tree->lock); | |
5095 | } | |
131e404a FDBM |
5096 | } |
5097 | write_unlock(&map_tree->lock); | |
5098 | ||
6ca07097 FM |
5099 | /* |
5100 | * Keep looping until we have no more ranges in the io tree. | |
5101 | * We can have ongoing bios started by readpages (called from readahead) | |
9c6429d9 FM |
5102 | * that have their endio callback (extent_io.c:end_bio_extent_readpage) |
5103 | * still in progress (unlocked the pages in the bio but did not yet | |
5104 | * unlocked the ranges in the io tree). Therefore this means some | |
6ca07097 FM |
5105 | * ranges can still be locked and eviction started because before |
5106 | * submitting those bios, which are executed by a separate task (work | |
5107 | * queue kthread), inode references (inode->i_count) were not taken | |
5108 | * (which would be dropped in the end io callback of each bio). | |
5109 | * Therefore here we effectively end up waiting for those bios and | |
5110 | * anyone else holding locked ranges without having bumped the inode's | |
5111 | * reference count - if we don't do it, when they access the inode's | |
5112 | * io_tree to unlock a range it may be too late, leading to an | |
5113 | * use-after-free issue. | |
5114 | */ | |
131e404a FDBM |
5115 | spin_lock(&io_tree->lock); |
5116 | while (!RB_EMPTY_ROOT(&io_tree->state)) { | |
5117 | struct extent_state *state; | |
5118 | struct extent_state *cached_state = NULL; | |
6ca07097 FM |
5119 | u64 start; |
5120 | u64 end; | |
131e404a FDBM |
5121 | |
5122 | node = rb_first(&io_tree->state); | |
5123 | state = rb_entry(node, struct extent_state, rb_node); | |
6ca07097 FM |
5124 | start = state->start; |
5125 | end = state->end; | |
131e404a FDBM |
5126 | spin_unlock(&io_tree->lock); |
5127 | ||
ff13db41 | 5128 | lock_extent_bits(io_tree, start, end, &cached_state); |
b9d0b389 QW |
5129 | |
5130 | /* | |
5131 | * If still has DELALLOC flag, the extent didn't reach disk, | |
5132 | * and its reserved space won't be freed by delayed_ref. | |
5133 | * So we need to free its reserved space here. | |
5134 | * (Refer to comment in btrfs_invalidatepage, case 2) | |
5135 | * | |
5136 | * Note, end is the bytenr of last byte, so we need + 1 here. | |
5137 | */ | |
5138 | if (state->state & EXTENT_DELALLOC) | |
5139 | btrfs_qgroup_free_data(inode, start, end - start + 1); | |
5140 | ||
6ca07097 | 5141 | clear_extent_bit(io_tree, start, end, |
131e404a FDBM |
5142 | EXTENT_LOCKED | EXTENT_DIRTY | |
5143 | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | | |
5144 | EXTENT_DEFRAG, 1, 1, | |
5145 | &cached_state, GFP_NOFS); | |
131e404a | 5146 | |
7064dd5c | 5147 | cond_resched(); |
131e404a FDBM |
5148 | spin_lock(&io_tree->lock); |
5149 | } | |
5150 | spin_unlock(&io_tree->lock); | |
5151 | } | |
5152 | ||
bd555975 | 5153 | void btrfs_evict_inode(struct inode *inode) |
39279cc3 | 5154 | { |
0b246afa | 5155 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
39279cc3 CM |
5156 | struct btrfs_trans_handle *trans; |
5157 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
726c35fa | 5158 | struct btrfs_block_rsv *rsv, *global_rsv; |
3bce876f | 5159 | int steal_from_global = 0; |
3d48d981 | 5160 | u64 min_size; |
39279cc3 CM |
5161 | int ret; |
5162 | ||
1abe9b8a | 5163 | trace_btrfs_inode_evict(inode); |
5164 | ||
3d48d981 NB |
5165 | if (!root) { |
5166 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); | |
5167 | return; | |
5168 | } | |
5169 | ||
0b246afa | 5170 | min_size = btrfs_calc_trunc_metadata_size(fs_info, 1); |
3d48d981 | 5171 | |
131e404a FDBM |
5172 | evict_inode_truncate_pages(inode); |
5173 | ||
69e9c6c6 SB |
5174 | if (inode->i_nlink && |
5175 | ((btrfs_root_refs(&root->root_item) != 0 && | |
5176 | root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || | |
70ddc553 | 5177 | btrfs_is_free_space_inode(BTRFS_I(inode)))) |
bd555975 AV |
5178 | goto no_delete; |
5179 | ||
39279cc3 | 5180 | if (is_bad_inode(inode)) { |
3d6ae7bb | 5181 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
39279cc3 CM |
5182 | goto no_delete; |
5183 | } | |
bd555975 | 5184 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ |
a30e577c JM |
5185 | if (!special_file(inode->i_mode)) |
5186 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
5f39d397 | 5187 | |
7ab7956e | 5188 | btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); |
f612496b | 5189 | |
0b246afa | 5190 | if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { |
6bf02314 | 5191 | BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
8a35d95f | 5192 | &BTRFS_I(inode)->runtime_flags)); |
c71bf099 YZ |
5193 | goto no_delete; |
5194 | } | |
5195 | ||
76dda93c | 5196 | if (inode->i_nlink > 0) { |
69e9c6c6 SB |
5197 | BUG_ON(btrfs_root_refs(&root->root_item) != 0 && |
5198 | root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); | |
76dda93c YZ |
5199 | goto no_delete; |
5200 | } | |
5201 | ||
aa79021f | 5202 | ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); |
0e8c36a9 | 5203 | if (ret) { |
3d6ae7bb | 5204 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
0e8c36a9 MX |
5205 | goto no_delete; |
5206 | } | |
5207 | ||
2ff7e61e | 5208 | rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); |
4289a667 | 5209 | if (!rsv) { |
3d6ae7bb | 5210 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
4289a667 JB |
5211 | goto no_delete; |
5212 | } | |
4a338542 | 5213 | rsv->size = min_size; |
ca7e70f5 | 5214 | rsv->failfast = 1; |
0b246afa | 5215 | global_rsv = &fs_info->global_block_rsv; |
4289a667 | 5216 | |
6ef06d27 | 5217 | btrfs_i_size_write(BTRFS_I(inode), 0); |
5f39d397 | 5218 | |
4289a667 | 5219 | /* |
8407aa46 MX |
5220 | * This is a bit simpler than btrfs_truncate since we've already |
5221 | * reserved our space for our orphan item in the unlink, so we just | |
5222 | * need to reserve some slack space in case we add bytes and update | |
5223 | * inode item when doing the truncate. | |
4289a667 | 5224 | */ |
8082510e | 5225 | while (1) { |
08e007d2 MX |
5226 | ret = btrfs_block_rsv_refill(root, rsv, min_size, |
5227 | BTRFS_RESERVE_FLUSH_LIMIT); | |
726c35fa JB |
5228 | |
5229 | /* | |
5230 | * Try and steal from the global reserve since we will | |
5231 | * likely not use this space anyway, we want to try as | |
5232 | * hard as possible to get this to work. | |
5233 | */ | |
5234 | if (ret) | |
3bce876f JB |
5235 | steal_from_global++; |
5236 | else | |
5237 | steal_from_global = 0; | |
5238 | ret = 0; | |
d68fc57b | 5239 | |
3bce876f JB |
5240 | /* |
5241 | * steal_from_global == 0: we reserved stuff, hooray! | |
5242 | * steal_from_global == 1: we didn't reserve stuff, boo! | |
5243 | * steal_from_global == 2: we've committed, still not a lot of | |
5244 | * room but maybe we'll have room in the global reserve this | |
5245 | * time. | |
5246 | * steal_from_global == 3: abandon all hope! | |
5247 | */ | |
5248 | if (steal_from_global > 2) { | |
0b246afa JM |
5249 | btrfs_warn(fs_info, |
5250 | "Could not get space for a delete, will truncate on mount %d", | |
5251 | ret); | |
3d6ae7bb | 5252 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
2ff7e61e | 5253 | btrfs_free_block_rsv(fs_info, rsv); |
4289a667 | 5254 | goto no_delete; |
d68fc57b | 5255 | } |
7b128766 | 5256 | |
0e8c36a9 | 5257 | trans = btrfs_join_transaction(root); |
4289a667 | 5258 | if (IS_ERR(trans)) { |
3d6ae7bb | 5259 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
2ff7e61e | 5260 | btrfs_free_block_rsv(fs_info, rsv); |
4289a667 | 5261 | goto no_delete; |
d68fc57b | 5262 | } |
7b128766 | 5263 | |
3bce876f | 5264 | /* |
01327610 | 5265 | * We can't just steal from the global reserve, we need to make |
3bce876f JB |
5266 | * sure there is room to do it, if not we need to commit and try |
5267 | * again. | |
5268 | */ | |
5269 | if (steal_from_global) { | |
2ff7e61e | 5270 | if (!btrfs_check_space_for_delayed_refs(trans, fs_info)) |
3bce876f | 5271 | ret = btrfs_block_rsv_migrate(global_rsv, rsv, |
25d609f8 | 5272 | min_size, 0); |
3bce876f JB |
5273 | else |
5274 | ret = -ENOSPC; | |
5275 | } | |
5276 | ||
5277 | /* | |
5278 | * Couldn't steal from the global reserve, we have too much | |
5279 | * pending stuff built up, commit the transaction and try it | |
5280 | * again. | |
5281 | */ | |
5282 | if (ret) { | |
3a45bb20 | 5283 | ret = btrfs_commit_transaction(trans); |
3bce876f | 5284 | if (ret) { |
3d6ae7bb | 5285 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
2ff7e61e | 5286 | btrfs_free_block_rsv(fs_info, rsv); |
3bce876f JB |
5287 | goto no_delete; |
5288 | } | |
5289 | continue; | |
5290 | } else { | |
5291 | steal_from_global = 0; | |
5292 | } | |
5293 | ||
4289a667 JB |
5294 | trans->block_rsv = rsv; |
5295 | ||
d68fc57b | 5296 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); |
28ed1345 | 5297 | if (ret != -ENOSPC && ret != -EAGAIN) |
8082510e | 5298 | break; |
85e21bac | 5299 | |
0b246afa | 5300 | trans->block_rsv = &fs_info->trans_block_rsv; |
3a45bb20 | 5301 | btrfs_end_transaction(trans); |
8082510e | 5302 | trans = NULL; |
2ff7e61e | 5303 | btrfs_btree_balance_dirty(fs_info); |
8082510e | 5304 | } |
5f39d397 | 5305 | |
2ff7e61e | 5306 | btrfs_free_block_rsv(fs_info, rsv); |
4289a667 | 5307 | |
4ef31a45 JB |
5308 | /* |
5309 | * Errors here aren't a big deal, it just means we leave orphan items | |
5310 | * in the tree. They will be cleaned up on the next mount. | |
5311 | */ | |
8082510e | 5312 | if (ret == 0) { |
4289a667 | 5313 | trans->block_rsv = root->orphan_block_rsv; |
3d6ae7bb | 5314 | btrfs_orphan_del(trans, BTRFS_I(inode)); |
4ef31a45 | 5315 | } else { |
3d6ae7bb | 5316 | btrfs_orphan_del(NULL, BTRFS_I(inode)); |
8082510e | 5317 | } |
54aa1f4d | 5318 | |
0b246afa JM |
5319 | trans->block_rsv = &fs_info->trans_block_rsv; |
5320 | if (!(root == fs_info->tree_root || | |
581bb050 | 5321 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) |
4a0cc7ca | 5322 | btrfs_return_ino(root, btrfs_ino(BTRFS_I(inode))); |
581bb050 | 5323 | |
3a45bb20 | 5324 | btrfs_end_transaction(trans); |
2ff7e61e | 5325 | btrfs_btree_balance_dirty(fs_info); |
39279cc3 | 5326 | no_delete: |
f48d1cf5 | 5327 | btrfs_remove_delayed_node(BTRFS_I(inode)); |
dbd5768f | 5328 | clear_inode(inode); |
39279cc3 CM |
5329 | } |
5330 | ||
5331 | /* | |
5332 | * this returns the key found in the dir entry in the location pointer. | |
5333 | * If no dir entries were found, location->objectid is 0. | |
5334 | */ | |
5335 | static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |
5336 | struct btrfs_key *location) | |
5337 | { | |
5338 | const char *name = dentry->d_name.name; | |
5339 | int namelen = dentry->d_name.len; | |
5340 | struct btrfs_dir_item *di; | |
5341 | struct btrfs_path *path; | |
5342 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
0d9f7f3e | 5343 | int ret = 0; |
39279cc3 CM |
5344 | |
5345 | path = btrfs_alloc_path(); | |
d8926bb3 MF |
5346 | if (!path) |
5347 | return -ENOMEM; | |
3954401f | 5348 | |
f85b7379 DS |
5349 | di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)), |
5350 | name, namelen, 0); | |
0d9f7f3e Y |
5351 | if (IS_ERR(di)) |
5352 | ret = PTR_ERR(di); | |
d397712b | 5353 | |
c704005d | 5354 | if (IS_ERR_OR_NULL(di)) |
3954401f | 5355 | goto out_err; |
d397712b | 5356 | |
5f39d397 | 5357 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
39279cc3 | 5358 | out: |
39279cc3 CM |
5359 | btrfs_free_path(path); |
5360 | return ret; | |
3954401f CM |
5361 | out_err: |
5362 | location->objectid = 0; | |
5363 | goto out; | |
39279cc3 CM |
5364 | } |
5365 | ||
5366 | /* | |
5367 | * when we hit a tree root in a directory, the btrfs part of the inode | |
5368 | * needs to be changed to reflect the root directory of the tree root. This | |
5369 | * is kind of like crossing a mount point. | |
5370 | */ | |
2ff7e61e | 5371 | static int fixup_tree_root_location(struct btrfs_fs_info *fs_info, |
4df27c4d YZ |
5372 | struct inode *dir, |
5373 | struct dentry *dentry, | |
5374 | struct btrfs_key *location, | |
5375 | struct btrfs_root **sub_root) | |
39279cc3 | 5376 | { |
4df27c4d YZ |
5377 | struct btrfs_path *path; |
5378 | struct btrfs_root *new_root; | |
5379 | struct btrfs_root_ref *ref; | |
5380 | struct extent_buffer *leaf; | |
1d4c08e0 | 5381 | struct btrfs_key key; |
4df27c4d YZ |
5382 | int ret; |
5383 | int err = 0; | |
39279cc3 | 5384 | |
4df27c4d YZ |
5385 | path = btrfs_alloc_path(); |
5386 | if (!path) { | |
5387 | err = -ENOMEM; | |
5388 | goto out; | |
5389 | } | |
39279cc3 | 5390 | |
4df27c4d | 5391 | err = -ENOENT; |
1d4c08e0 DS |
5392 | key.objectid = BTRFS_I(dir)->root->root_key.objectid; |
5393 | key.type = BTRFS_ROOT_REF_KEY; | |
5394 | key.offset = location->objectid; | |
5395 | ||
0b246afa | 5396 | ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); |
4df27c4d YZ |
5397 | if (ret) { |
5398 | if (ret < 0) | |
5399 | err = ret; | |
5400 | goto out; | |
5401 | } | |
39279cc3 | 5402 | |
4df27c4d YZ |
5403 | leaf = path->nodes[0]; |
5404 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | |
4a0cc7ca | 5405 | if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) || |
4df27c4d YZ |
5406 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) |
5407 | goto out; | |
39279cc3 | 5408 | |
4df27c4d YZ |
5409 | ret = memcmp_extent_buffer(leaf, dentry->d_name.name, |
5410 | (unsigned long)(ref + 1), | |
5411 | dentry->d_name.len); | |
5412 | if (ret) | |
5413 | goto out; | |
5414 | ||
b3b4aa74 | 5415 | btrfs_release_path(path); |
4df27c4d | 5416 | |
0b246afa | 5417 | new_root = btrfs_read_fs_root_no_name(fs_info, location); |
4df27c4d YZ |
5418 | if (IS_ERR(new_root)) { |
5419 | err = PTR_ERR(new_root); | |
5420 | goto out; | |
5421 | } | |
5422 | ||
4df27c4d YZ |
5423 | *sub_root = new_root; |
5424 | location->objectid = btrfs_root_dirid(&new_root->root_item); | |
5425 | location->type = BTRFS_INODE_ITEM_KEY; | |
5426 | location->offset = 0; | |
5427 | err = 0; | |
5428 | out: | |
5429 | btrfs_free_path(path); | |
5430 | return err; | |
39279cc3 CM |
5431 | } |
5432 | ||
5d4f98a2 YZ |
5433 | static void inode_tree_add(struct inode *inode) |
5434 | { | |
5435 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5436 | struct btrfs_inode *entry; | |
03e860bd NP |
5437 | struct rb_node **p; |
5438 | struct rb_node *parent; | |
cef21937 | 5439 | struct rb_node *new = &BTRFS_I(inode)->rb_node; |
4a0cc7ca | 5440 | u64 ino = btrfs_ino(BTRFS_I(inode)); |
5d4f98a2 | 5441 | |
1d3382cb | 5442 | if (inode_unhashed(inode)) |
76dda93c | 5443 | return; |
e1409cef | 5444 | parent = NULL; |
5d4f98a2 | 5445 | spin_lock(&root->inode_lock); |
e1409cef | 5446 | p = &root->inode_tree.rb_node; |
5d4f98a2 YZ |
5447 | while (*p) { |
5448 | parent = *p; | |
5449 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | |
5450 | ||
4a0cc7ca | 5451 | if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode))) |
03e860bd | 5452 | p = &parent->rb_left; |
4a0cc7ca | 5453 | else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode))) |
03e860bd | 5454 | p = &parent->rb_right; |
5d4f98a2 YZ |
5455 | else { |
5456 | WARN_ON(!(entry->vfs_inode.i_state & | |
a4ffdde6 | 5457 | (I_WILL_FREE | I_FREEING))); |
cef21937 | 5458 | rb_replace_node(parent, new, &root->inode_tree); |
03e860bd NP |
5459 | RB_CLEAR_NODE(parent); |
5460 | spin_unlock(&root->inode_lock); | |
cef21937 | 5461 | return; |
5d4f98a2 YZ |
5462 | } |
5463 | } | |
cef21937 FDBM |
5464 | rb_link_node(new, parent, p); |
5465 | rb_insert_color(new, &root->inode_tree); | |
5d4f98a2 YZ |
5466 | spin_unlock(&root->inode_lock); |
5467 | } | |
5468 | ||
5469 | static void inode_tree_del(struct inode *inode) | |
5470 | { | |
0b246afa | 5471 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
5d4f98a2 | 5472 | struct btrfs_root *root = BTRFS_I(inode)->root; |
76dda93c | 5473 | int empty = 0; |
5d4f98a2 | 5474 | |
03e860bd | 5475 | spin_lock(&root->inode_lock); |
5d4f98a2 | 5476 | if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { |
5d4f98a2 | 5477 | rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); |
5d4f98a2 | 5478 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
76dda93c | 5479 | empty = RB_EMPTY_ROOT(&root->inode_tree); |
5d4f98a2 | 5480 | } |
03e860bd | 5481 | spin_unlock(&root->inode_lock); |
76dda93c | 5482 | |
69e9c6c6 | 5483 | if (empty && btrfs_root_refs(&root->root_item) == 0) { |
0b246afa | 5484 | synchronize_srcu(&fs_info->subvol_srcu); |
76dda93c YZ |
5485 | spin_lock(&root->inode_lock); |
5486 | empty = RB_EMPTY_ROOT(&root->inode_tree); | |
5487 | spin_unlock(&root->inode_lock); | |
5488 | if (empty) | |
5489 | btrfs_add_dead_root(root); | |
5490 | } | |
5491 | } | |
5492 | ||
143bede5 | 5493 | void btrfs_invalidate_inodes(struct btrfs_root *root) |
76dda93c | 5494 | { |
0b246afa | 5495 | struct btrfs_fs_info *fs_info = root->fs_info; |
76dda93c YZ |
5496 | struct rb_node *node; |
5497 | struct rb_node *prev; | |
5498 | struct btrfs_inode *entry; | |
5499 | struct inode *inode; | |
5500 | u64 objectid = 0; | |
5501 | ||
0b246afa | 5502 | if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
7813b3db | 5503 | WARN_ON(btrfs_root_refs(&root->root_item) != 0); |
76dda93c YZ |
5504 | |
5505 | spin_lock(&root->inode_lock); | |
5506 | again: | |
5507 | node = root->inode_tree.rb_node; | |
5508 | prev = NULL; | |
5509 | while (node) { | |
5510 | prev = node; | |
5511 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
5512 | ||
4a0cc7ca | 5513 | if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode))) |
76dda93c | 5514 | node = node->rb_left; |
4a0cc7ca | 5515 | else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode))) |
76dda93c YZ |
5516 | node = node->rb_right; |
5517 | else | |
5518 | break; | |
5519 | } | |
5520 | if (!node) { | |
5521 | while (prev) { | |
5522 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | |
4a0cc7ca | 5523 | if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) { |
76dda93c YZ |
5524 | node = prev; |
5525 | break; | |
5526 | } | |
5527 | prev = rb_next(prev); | |
5528 | } | |
5529 | } | |
5530 | while (node) { | |
5531 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4a0cc7ca | 5532 | objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1; |
76dda93c YZ |
5533 | inode = igrab(&entry->vfs_inode); |
5534 | if (inode) { | |
5535 | spin_unlock(&root->inode_lock); | |
5536 | if (atomic_read(&inode->i_count) > 1) | |
5537 | d_prune_aliases(inode); | |
5538 | /* | |
45321ac5 | 5539 | * btrfs_drop_inode will have it removed from |
76dda93c YZ |
5540 | * the inode cache when its usage count |
5541 | * hits zero. | |
5542 | */ | |
5543 | iput(inode); | |
5544 | cond_resched(); | |
5545 | spin_lock(&root->inode_lock); | |
5546 | goto again; | |
5547 | } | |
5548 | ||
5549 | if (cond_resched_lock(&root->inode_lock)) | |
5550 | goto again; | |
5551 | ||
5552 | node = rb_next(node); | |
5553 | } | |
5554 | spin_unlock(&root->inode_lock); | |
5d4f98a2 YZ |
5555 | } |
5556 | ||
e02119d5 CM |
5557 | static int btrfs_init_locked_inode(struct inode *inode, void *p) |
5558 | { | |
5559 | struct btrfs_iget_args *args = p; | |
90d3e592 CM |
5560 | inode->i_ino = args->location->objectid; |
5561 | memcpy(&BTRFS_I(inode)->location, args->location, | |
5562 | sizeof(*args->location)); | |
e02119d5 | 5563 | BTRFS_I(inode)->root = args->root; |
39279cc3 CM |
5564 | return 0; |
5565 | } | |
5566 | ||
5567 | static int btrfs_find_actor(struct inode *inode, void *opaque) | |
5568 | { | |
5569 | struct btrfs_iget_args *args = opaque; | |
90d3e592 | 5570 | return args->location->objectid == BTRFS_I(inode)->location.objectid && |
d397712b | 5571 | args->root == BTRFS_I(inode)->root; |
39279cc3 CM |
5572 | } |
5573 | ||
5d4f98a2 | 5574 | static struct inode *btrfs_iget_locked(struct super_block *s, |
90d3e592 | 5575 | struct btrfs_key *location, |
5d4f98a2 | 5576 | struct btrfs_root *root) |
39279cc3 CM |
5577 | { |
5578 | struct inode *inode; | |
5579 | struct btrfs_iget_args args; | |
90d3e592 | 5580 | unsigned long hashval = btrfs_inode_hash(location->objectid, root); |
778ba82b | 5581 | |
90d3e592 | 5582 | args.location = location; |
39279cc3 CM |
5583 | args.root = root; |
5584 | ||
778ba82b | 5585 | inode = iget5_locked(s, hashval, btrfs_find_actor, |
39279cc3 CM |
5586 | btrfs_init_locked_inode, |
5587 | (void *)&args); | |
5588 | return inode; | |
5589 | } | |
5590 | ||
1a54ef8c BR |
5591 | /* Get an inode object given its location and corresponding root. |
5592 | * Returns in *is_new if the inode was read from disk | |
5593 | */ | |
5594 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |
73f73415 | 5595 | struct btrfs_root *root, int *new) |
1a54ef8c BR |
5596 | { |
5597 | struct inode *inode; | |
5598 | ||
90d3e592 | 5599 | inode = btrfs_iget_locked(s, location, root); |
1a54ef8c | 5600 | if (!inode) |
5d4f98a2 | 5601 | return ERR_PTR(-ENOMEM); |
1a54ef8c BR |
5602 | |
5603 | if (inode->i_state & I_NEW) { | |
67710892 FM |
5604 | int ret; |
5605 | ||
5606 | ret = btrfs_read_locked_inode(inode); | |
1748f843 MF |
5607 | if (!is_bad_inode(inode)) { |
5608 | inode_tree_add(inode); | |
5609 | unlock_new_inode(inode); | |
5610 | if (new) | |
5611 | *new = 1; | |
5612 | } else { | |
e0b6d65b ST |
5613 | unlock_new_inode(inode); |
5614 | iput(inode); | |
67710892 FM |
5615 | ASSERT(ret < 0); |
5616 | inode = ERR_PTR(ret < 0 ? ret : -ESTALE); | |
1748f843 MF |
5617 | } |
5618 | } | |
5619 | ||
1a54ef8c BR |
5620 | return inode; |
5621 | } | |
5622 | ||
4df27c4d YZ |
5623 | static struct inode *new_simple_dir(struct super_block *s, |
5624 | struct btrfs_key *key, | |
5625 | struct btrfs_root *root) | |
5626 | { | |
5627 | struct inode *inode = new_inode(s); | |
5628 | ||
5629 | if (!inode) | |
5630 | return ERR_PTR(-ENOMEM); | |
5631 | ||
4df27c4d YZ |
5632 | BTRFS_I(inode)->root = root; |
5633 | memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); | |
72ac3c0d | 5634 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); |
4df27c4d YZ |
5635 | |
5636 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | |
848cce0d | 5637 | inode->i_op = &btrfs_dir_ro_inode_operations; |
1fdf4194 | 5638 | inode->i_opflags &= ~IOP_XATTR; |
4df27c4d YZ |
5639 | inode->i_fop = &simple_dir_operations; |
5640 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | |
c2050a45 | 5641 | inode->i_mtime = current_time(inode); |
9cc97d64 | 5642 | inode->i_atime = inode->i_mtime; |
5643 | inode->i_ctime = inode->i_mtime; | |
5644 | BTRFS_I(inode)->i_otime = inode->i_mtime; | |
4df27c4d YZ |
5645 | |
5646 | return inode; | |
5647 | } | |
5648 | ||
3de4586c | 5649 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) |
39279cc3 | 5650 | { |
0b246afa | 5651 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
d397712b | 5652 | struct inode *inode; |
4df27c4d | 5653 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 CM |
5654 | struct btrfs_root *sub_root = root; |
5655 | struct btrfs_key location; | |
76dda93c | 5656 | int index; |
b4aff1f8 | 5657 | int ret = 0; |
39279cc3 CM |
5658 | |
5659 | if (dentry->d_name.len > BTRFS_NAME_LEN) | |
5660 | return ERR_PTR(-ENAMETOOLONG); | |
5f39d397 | 5661 | |
39e3c955 | 5662 | ret = btrfs_inode_by_name(dir, dentry, &location); |
39279cc3 CM |
5663 | if (ret < 0) |
5664 | return ERR_PTR(ret); | |
5f39d397 | 5665 | |
4df27c4d | 5666 | if (location.objectid == 0) |
5662344b | 5667 | return ERR_PTR(-ENOENT); |
4df27c4d YZ |
5668 | |
5669 | if (location.type == BTRFS_INODE_ITEM_KEY) { | |
73f73415 | 5670 | inode = btrfs_iget(dir->i_sb, &location, root, NULL); |
4df27c4d YZ |
5671 | return inode; |
5672 | } | |
5673 | ||
5674 | BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); | |
5675 | ||
0b246afa | 5676 | index = srcu_read_lock(&fs_info->subvol_srcu); |
2ff7e61e | 5677 | ret = fixup_tree_root_location(fs_info, dir, dentry, |
4df27c4d YZ |
5678 | &location, &sub_root); |
5679 | if (ret < 0) { | |
5680 | if (ret != -ENOENT) | |
5681 | inode = ERR_PTR(ret); | |
5682 | else | |
5683 | inode = new_simple_dir(dir->i_sb, &location, sub_root); | |
5684 | } else { | |
73f73415 | 5685 | inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); |
39279cc3 | 5686 | } |
0b246afa | 5687 | srcu_read_unlock(&fs_info->subvol_srcu, index); |
76dda93c | 5688 | |
34d19bad | 5689 | if (!IS_ERR(inode) && root != sub_root) { |
0b246afa | 5690 | down_read(&fs_info->cleanup_work_sem); |
c71bf099 | 5691 | if (!(inode->i_sb->s_flags & MS_RDONLY)) |
66b4ffd1 | 5692 | ret = btrfs_orphan_cleanup(sub_root); |
0b246afa | 5693 | up_read(&fs_info->cleanup_work_sem); |
01cd3367 JB |
5694 | if (ret) { |
5695 | iput(inode); | |
66b4ffd1 | 5696 | inode = ERR_PTR(ret); |
01cd3367 | 5697 | } |
c71bf099 YZ |
5698 | } |
5699 | ||
3de4586c CM |
5700 | return inode; |
5701 | } | |
5702 | ||
fe15ce44 | 5703 | static int btrfs_dentry_delete(const struct dentry *dentry) |
76dda93c YZ |
5704 | { |
5705 | struct btrfs_root *root; | |
2b0143b5 | 5706 | struct inode *inode = d_inode(dentry); |
76dda93c | 5707 | |
848cce0d | 5708 | if (!inode && !IS_ROOT(dentry)) |
2b0143b5 | 5709 | inode = d_inode(dentry->d_parent); |
76dda93c | 5710 | |
848cce0d LZ |
5711 | if (inode) { |
5712 | root = BTRFS_I(inode)->root; | |
efefb143 YZ |
5713 | if (btrfs_root_refs(&root->root_item) == 0) |
5714 | return 1; | |
848cce0d | 5715 | |
4a0cc7ca | 5716 | if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
848cce0d | 5717 | return 1; |
efefb143 | 5718 | } |
76dda93c YZ |
5719 | return 0; |
5720 | } | |
5721 | ||
b4aff1f8 JB |
5722 | static void btrfs_dentry_release(struct dentry *dentry) |
5723 | { | |
944a4515 | 5724 | kfree(dentry->d_fsdata); |
b4aff1f8 JB |
5725 | } |
5726 | ||
3de4586c | 5727 | static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, |
00cd8dd3 | 5728 | unsigned int flags) |
3de4586c | 5729 | { |
5662344b | 5730 | struct inode *inode; |
a66e7cc6 | 5731 | |
5662344b TI |
5732 | inode = btrfs_lookup_dentry(dir, dentry); |
5733 | if (IS_ERR(inode)) { | |
5734 | if (PTR_ERR(inode) == -ENOENT) | |
5735 | inode = NULL; | |
5736 | else | |
5737 | return ERR_CAST(inode); | |
5738 | } | |
5739 | ||
41d28bca | 5740 | return d_splice_alias(inode, dentry); |
39279cc3 CM |
5741 | } |
5742 | ||
16cdcec7 | 5743 | unsigned char btrfs_filetype_table[] = { |
39279cc3 CM |
5744 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK |
5745 | }; | |
5746 | ||
9cdda8d3 | 5747 | static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) |
39279cc3 | 5748 | { |
9cdda8d3 | 5749 | struct inode *inode = file_inode(file); |
2ff7e61e | 5750 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
39279cc3 CM |
5751 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5752 | struct btrfs_item *item; | |
5753 | struct btrfs_dir_item *di; | |
5754 | struct btrfs_key key; | |
5f39d397 | 5755 | struct btrfs_key found_key; |
39279cc3 | 5756 | struct btrfs_path *path; |
16cdcec7 MX |
5757 | struct list_head ins_list; |
5758 | struct list_head del_list; | |
39279cc3 | 5759 | int ret; |
5f39d397 | 5760 | struct extent_buffer *leaf; |
39279cc3 | 5761 | int slot; |
39279cc3 CM |
5762 | unsigned char d_type; |
5763 | int over = 0; | |
5f39d397 CM |
5764 | char tmp_name[32]; |
5765 | char *name_ptr; | |
5766 | int name_len; | |
02dbfc99 | 5767 | bool put = false; |
c2951f32 | 5768 | struct btrfs_key location; |
5f39d397 | 5769 | |
9cdda8d3 AV |
5770 | if (!dir_emit_dots(file, ctx)) |
5771 | return 0; | |
5772 | ||
49593bfa | 5773 | path = btrfs_alloc_path(); |
16cdcec7 MX |
5774 | if (!path) |
5775 | return -ENOMEM; | |
ff5714cc | 5776 | |
e4058b54 | 5777 | path->reada = READA_FORWARD; |
49593bfa | 5778 | |
c2951f32 JM |
5779 | INIT_LIST_HEAD(&ins_list); |
5780 | INIT_LIST_HEAD(&del_list); | |
5781 | put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); | |
16cdcec7 | 5782 | |
c2951f32 | 5783 | key.type = BTRFS_DIR_INDEX_KEY; |
9cdda8d3 | 5784 | key.offset = ctx->pos; |
4a0cc7ca | 5785 | key.objectid = btrfs_ino(BTRFS_I(inode)); |
5f39d397 | 5786 | |
39279cc3 CM |
5787 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
5788 | if (ret < 0) | |
5789 | goto err; | |
49593bfa DW |
5790 | |
5791 | while (1) { | |
5f39d397 | 5792 | leaf = path->nodes[0]; |
39279cc3 | 5793 | slot = path->slots[0]; |
b9e03af0 LZ |
5794 | if (slot >= btrfs_header_nritems(leaf)) { |
5795 | ret = btrfs_next_leaf(root, path); | |
5796 | if (ret < 0) | |
5797 | goto err; | |
5798 | else if (ret > 0) | |
5799 | break; | |
5800 | continue; | |
39279cc3 | 5801 | } |
3de4586c | 5802 | |
dd3cc16b | 5803 | item = btrfs_item_nr(slot); |
5f39d397 CM |
5804 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
5805 | ||
5806 | if (found_key.objectid != key.objectid) | |
39279cc3 | 5807 | break; |
c2951f32 | 5808 | if (found_key.type != BTRFS_DIR_INDEX_KEY) |
39279cc3 | 5809 | break; |
9cdda8d3 | 5810 | if (found_key.offset < ctx->pos) |
b9e03af0 | 5811 | goto next; |
c2951f32 | 5812 | if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) |
16cdcec7 | 5813 | goto next; |
5f39d397 | 5814 | |
9cdda8d3 | 5815 | ctx->pos = found_key.offset; |
49593bfa | 5816 | |
39279cc3 | 5817 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
2ff7e61e | 5818 | if (verify_dir_item(fs_info, leaf, di)) |
c2951f32 | 5819 | goto next; |
22a94d44 | 5820 | |
c2951f32 JM |
5821 | name_len = btrfs_dir_name_len(leaf, di); |
5822 | if (name_len <= sizeof(tmp_name)) { | |
5823 | name_ptr = tmp_name; | |
5824 | } else { | |
5825 | name_ptr = kmalloc(name_len, GFP_KERNEL); | |
5826 | if (!name_ptr) { | |
5827 | ret = -ENOMEM; | |
5828 | goto err; | |
5f39d397 | 5829 | } |
c2951f32 JM |
5830 | } |
5831 | read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), | |
5832 | name_len); | |
3de4586c | 5833 | |
c2951f32 JM |
5834 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; |
5835 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
fede766f | 5836 | |
c2951f32 JM |
5837 | over = !dir_emit(ctx, name_ptr, name_len, location.objectid, |
5838 | d_type); | |
5f39d397 | 5839 | |
c2951f32 JM |
5840 | if (name_ptr != tmp_name) |
5841 | kfree(name_ptr); | |
5f39d397 | 5842 | |
c2951f32 JM |
5843 | if (over) |
5844 | goto nopos; | |
d2fbb2b5 | 5845 | ctx->pos++; |
b9e03af0 LZ |
5846 | next: |
5847 | path->slots[0]++; | |
39279cc3 | 5848 | } |
49593bfa | 5849 | |
d2fbb2b5 | 5850 | ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); |
c2951f32 | 5851 | if (ret) |
bc4ef759 DS |
5852 | goto nopos; |
5853 | ||
db62efbb ZB |
5854 | /* |
5855 | * Stop new entries from being returned after we return the last | |
5856 | * entry. | |
5857 | * | |
5858 | * New directory entries are assigned a strictly increasing | |
5859 | * offset. This means that new entries created during readdir | |
5860 | * are *guaranteed* to be seen in the future by that readdir. | |
5861 | * This has broken buggy programs which operate on names as | |
5862 | * they're returned by readdir. Until we re-use freed offsets | |
5863 | * we have this hack to stop new entries from being returned | |
5864 | * under the assumption that they'll never reach this huge | |
5865 | * offset. | |
5866 | * | |
5867 | * This is being careful not to overflow 32bit loff_t unless the | |
5868 | * last entry requires it because doing so has broken 32bit apps | |
5869 | * in the past. | |
5870 | */ | |
c2951f32 JM |
5871 | if (ctx->pos >= INT_MAX) |
5872 | ctx->pos = LLONG_MAX; | |
5873 | else | |
5874 | ctx->pos = INT_MAX; | |
39279cc3 CM |
5875 | nopos: |
5876 | ret = 0; | |
5877 | err: | |
02dbfc99 OS |
5878 | if (put) |
5879 | btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); | |
39279cc3 | 5880 | btrfs_free_path(path); |
39279cc3 CM |
5881 | return ret; |
5882 | } | |
5883 | ||
a9185b41 | 5884 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
39279cc3 CM |
5885 | { |
5886 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5887 | struct btrfs_trans_handle *trans; | |
5888 | int ret = 0; | |
0af3d00b | 5889 | bool nolock = false; |
39279cc3 | 5890 | |
72ac3c0d | 5891 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) |
4ca8b41e CM |
5892 | return 0; |
5893 | ||
70ddc553 NB |
5894 | if (btrfs_fs_closing(root->fs_info) && |
5895 | btrfs_is_free_space_inode(BTRFS_I(inode))) | |
82d5902d | 5896 | nolock = true; |
0af3d00b | 5897 | |
a9185b41 | 5898 | if (wbc->sync_mode == WB_SYNC_ALL) { |
0af3d00b | 5899 | if (nolock) |
7a7eaa40 | 5900 | trans = btrfs_join_transaction_nolock(root); |
0af3d00b | 5901 | else |
7a7eaa40 | 5902 | trans = btrfs_join_transaction(root); |
3612b495 TI |
5903 | if (IS_ERR(trans)) |
5904 | return PTR_ERR(trans); | |
3a45bb20 | 5905 | ret = btrfs_commit_transaction(trans); |
39279cc3 CM |
5906 | } |
5907 | return ret; | |
5908 | } | |
5909 | ||
5910 | /* | |
54aa1f4d | 5911 | * This is somewhat expensive, updating the tree every time the |
39279cc3 CM |
5912 | * inode changes. But, it is most likely to find the inode in cache. |
5913 | * FIXME, needs more benchmarking...there are no reasons other than performance | |
5914 | * to keep or drop this code. | |
5915 | */ | |
48a3b636 | 5916 | static int btrfs_dirty_inode(struct inode *inode) |
39279cc3 | 5917 | { |
2ff7e61e | 5918 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
39279cc3 CM |
5919 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5920 | struct btrfs_trans_handle *trans; | |
8929ecfa YZ |
5921 | int ret; |
5922 | ||
72ac3c0d | 5923 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) |
22c44fe6 | 5924 | return 0; |
39279cc3 | 5925 | |
7a7eaa40 | 5926 | trans = btrfs_join_transaction(root); |
22c44fe6 JB |
5927 | if (IS_ERR(trans)) |
5928 | return PTR_ERR(trans); | |
8929ecfa YZ |
5929 | |
5930 | ret = btrfs_update_inode(trans, root, inode); | |
94b60442 CM |
5931 | if (ret && ret == -ENOSPC) { |
5932 | /* whoops, lets try again with the full transaction */ | |
3a45bb20 | 5933 | btrfs_end_transaction(trans); |
94b60442 | 5934 | trans = btrfs_start_transaction(root, 1); |
22c44fe6 JB |
5935 | if (IS_ERR(trans)) |
5936 | return PTR_ERR(trans); | |
8929ecfa | 5937 | |
94b60442 | 5938 | ret = btrfs_update_inode(trans, root, inode); |
94b60442 | 5939 | } |
3a45bb20 | 5940 | btrfs_end_transaction(trans); |
16cdcec7 | 5941 | if (BTRFS_I(inode)->delayed_node) |
2ff7e61e | 5942 | btrfs_balance_delayed_items(fs_info); |
22c44fe6 JB |
5943 | |
5944 | return ret; | |
5945 | } | |
5946 | ||
5947 | /* | |
5948 | * This is a copy of file_update_time. We need this so we can return error on | |
5949 | * ENOSPC for updating the inode in the case of file write and mmap writes. | |
5950 | */ | |
e41f941a JB |
5951 | static int btrfs_update_time(struct inode *inode, struct timespec *now, |
5952 | int flags) | |
22c44fe6 | 5953 | { |
2bc55652 AB |
5954 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5955 | ||
5956 | if (btrfs_root_readonly(root)) | |
5957 | return -EROFS; | |
5958 | ||
e41f941a | 5959 | if (flags & S_VERSION) |
22c44fe6 | 5960 | inode_inc_iversion(inode); |
e41f941a JB |
5961 | if (flags & S_CTIME) |
5962 | inode->i_ctime = *now; | |
5963 | if (flags & S_MTIME) | |
5964 | inode->i_mtime = *now; | |
5965 | if (flags & S_ATIME) | |
5966 | inode->i_atime = *now; | |
5967 | return btrfs_dirty_inode(inode); | |
39279cc3 CM |
5968 | } |
5969 | ||
d352ac68 CM |
5970 | /* |
5971 | * find the highest existing sequence number in a directory | |
5972 | * and then set the in-memory index_cnt variable to reflect | |
5973 | * free sequence numbers | |
5974 | */ | |
4c570655 | 5975 | static int btrfs_set_inode_index_count(struct btrfs_inode *inode) |
aec7477b | 5976 | { |
4c570655 | 5977 | struct btrfs_root *root = inode->root; |
aec7477b JB |
5978 | struct btrfs_key key, found_key; |
5979 | struct btrfs_path *path; | |
5980 | struct extent_buffer *leaf; | |
5981 | int ret; | |
5982 | ||
4c570655 | 5983 | key.objectid = btrfs_ino(inode); |
962a298f | 5984 | key.type = BTRFS_DIR_INDEX_KEY; |
aec7477b JB |
5985 | key.offset = (u64)-1; |
5986 | ||
5987 | path = btrfs_alloc_path(); | |
5988 | if (!path) | |
5989 | return -ENOMEM; | |
5990 | ||
5991 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
5992 | if (ret < 0) | |
5993 | goto out; | |
5994 | /* FIXME: we should be able to handle this */ | |
5995 | if (ret == 0) | |
5996 | goto out; | |
5997 | ret = 0; | |
5998 | ||
5999 | /* | |
6000 | * MAGIC NUMBER EXPLANATION: | |
6001 | * since we search a directory based on f_pos we have to start at 2 | |
6002 | * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody | |
6003 | * else has to start at 2 | |
6004 | */ | |
6005 | if (path->slots[0] == 0) { | |
4c570655 | 6006 | inode->index_cnt = 2; |
aec7477b JB |
6007 | goto out; |
6008 | } | |
6009 | ||
6010 | path->slots[0]--; | |
6011 | ||
6012 | leaf = path->nodes[0]; | |
6013 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
6014 | ||
4c570655 | 6015 | if (found_key.objectid != btrfs_ino(inode) || |
962a298f | 6016 | found_key.type != BTRFS_DIR_INDEX_KEY) { |
4c570655 | 6017 | inode->index_cnt = 2; |
aec7477b JB |
6018 | goto out; |
6019 | } | |
6020 | ||
4c570655 | 6021 | inode->index_cnt = found_key.offset + 1; |
aec7477b JB |
6022 | out: |
6023 | btrfs_free_path(path); | |
6024 | return ret; | |
6025 | } | |
6026 | ||
d352ac68 CM |
6027 | /* |
6028 | * helper to find a free sequence number in a given directory. This current | |
6029 | * code is very simple, later versions will do smarter things in the btree | |
6030 | */ | |
877574e2 | 6031 | int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) |
aec7477b JB |
6032 | { |
6033 | int ret = 0; | |
6034 | ||
877574e2 NB |
6035 | if (dir->index_cnt == (u64)-1) { |
6036 | ret = btrfs_inode_delayed_dir_index_count(dir); | |
16cdcec7 | 6037 | if (ret) { |
877574e2 | 6038 | ret = btrfs_set_inode_index_count(dir); |
16cdcec7 MX |
6039 | if (ret) |
6040 | return ret; | |
6041 | } | |
aec7477b JB |
6042 | } |
6043 | ||
877574e2 NB |
6044 | *index = dir->index_cnt; |
6045 | dir->index_cnt++; | |
aec7477b JB |
6046 | |
6047 | return ret; | |
6048 | } | |
6049 | ||
b0d5d10f CM |
6050 | static int btrfs_insert_inode_locked(struct inode *inode) |
6051 | { | |
6052 | struct btrfs_iget_args args; | |
6053 | args.location = &BTRFS_I(inode)->location; | |
6054 | args.root = BTRFS_I(inode)->root; | |
6055 | ||
6056 | return insert_inode_locked4(inode, | |
6057 | btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), | |
6058 | btrfs_find_actor, &args); | |
6059 | } | |
6060 | ||
39279cc3 CM |
6061 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, |
6062 | struct btrfs_root *root, | |
aec7477b | 6063 | struct inode *dir, |
9c58309d | 6064 | const char *name, int name_len, |
175a4eb7 AV |
6065 | u64 ref_objectid, u64 objectid, |
6066 | umode_t mode, u64 *index) | |
39279cc3 | 6067 | { |
0b246afa | 6068 | struct btrfs_fs_info *fs_info = root->fs_info; |
39279cc3 | 6069 | struct inode *inode; |
5f39d397 | 6070 | struct btrfs_inode_item *inode_item; |
39279cc3 | 6071 | struct btrfs_key *location; |
5f39d397 | 6072 | struct btrfs_path *path; |
9c58309d CM |
6073 | struct btrfs_inode_ref *ref; |
6074 | struct btrfs_key key[2]; | |
6075 | u32 sizes[2]; | |
ef3b9af5 | 6076 | int nitems = name ? 2 : 1; |
9c58309d | 6077 | unsigned long ptr; |
39279cc3 | 6078 | int ret; |
39279cc3 | 6079 | |
5f39d397 | 6080 | path = btrfs_alloc_path(); |
d8926bb3 MF |
6081 | if (!path) |
6082 | return ERR_PTR(-ENOMEM); | |
5f39d397 | 6083 | |
0b246afa | 6084 | inode = new_inode(fs_info->sb); |
8fb27640 YS |
6085 | if (!inode) { |
6086 | btrfs_free_path(path); | |
39279cc3 | 6087 | return ERR_PTR(-ENOMEM); |
8fb27640 | 6088 | } |
39279cc3 | 6089 | |
5762b5c9 FM |
6090 | /* |
6091 | * O_TMPFILE, set link count to 0, so that after this point, | |
6092 | * we fill in an inode item with the correct link count. | |
6093 | */ | |
6094 | if (!name) | |
6095 | set_nlink(inode, 0); | |
6096 | ||
581bb050 LZ |
6097 | /* |
6098 | * we have to initialize this early, so we can reclaim the inode | |
6099 | * number if we fail afterwards in this function. | |
6100 | */ | |
6101 | inode->i_ino = objectid; | |
6102 | ||
ef3b9af5 | 6103 | if (dir && name) { |
1abe9b8a | 6104 | trace_btrfs_inode_request(dir); |
6105 | ||
877574e2 | 6106 | ret = btrfs_set_inode_index(BTRFS_I(dir), index); |
09771430 | 6107 | if (ret) { |
8fb27640 | 6108 | btrfs_free_path(path); |
09771430 | 6109 | iput(inode); |
aec7477b | 6110 | return ERR_PTR(ret); |
09771430 | 6111 | } |
ef3b9af5 FM |
6112 | } else if (dir) { |
6113 | *index = 0; | |
aec7477b JB |
6114 | } |
6115 | /* | |
6116 | * index_cnt is ignored for everything but a dir, | |
6117 | * btrfs_get_inode_index_count has an explanation for the magic | |
6118 | * number | |
6119 | */ | |
6120 | BTRFS_I(inode)->index_cnt = 2; | |
67de1176 | 6121 | BTRFS_I(inode)->dir_index = *index; |
39279cc3 | 6122 | BTRFS_I(inode)->root = root; |
e02119d5 | 6123 | BTRFS_I(inode)->generation = trans->transid; |
76195853 | 6124 | inode->i_generation = BTRFS_I(inode)->generation; |
b888db2b | 6125 | |
5dc562c5 JB |
6126 | /* |
6127 | * We could have gotten an inode number from somebody who was fsynced | |
6128 | * and then removed in this same transaction, so let's just set full | |
6129 | * sync since it will be a full sync anyway and this will blow away the | |
6130 | * old info in the log. | |
6131 | */ | |
6132 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
6133 | ||
9c58309d | 6134 | key[0].objectid = objectid; |
962a298f | 6135 | key[0].type = BTRFS_INODE_ITEM_KEY; |
9c58309d CM |
6136 | key[0].offset = 0; |
6137 | ||
9c58309d | 6138 | sizes[0] = sizeof(struct btrfs_inode_item); |
ef3b9af5 FM |
6139 | |
6140 | if (name) { | |
6141 | /* | |
6142 | * Start new inodes with an inode_ref. This is slightly more | |
6143 | * efficient for small numbers of hard links since they will | |
6144 | * be packed into one item. Extended refs will kick in if we | |
6145 | * add more hard links than can fit in the ref item. | |
6146 | */ | |
6147 | key[1].objectid = objectid; | |
962a298f | 6148 | key[1].type = BTRFS_INODE_REF_KEY; |
ef3b9af5 FM |
6149 | key[1].offset = ref_objectid; |
6150 | ||
6151 | sizes[1] = name_len + sizeof(*ref); | |
6152 | } | |
9c58309d | 6153 | |
b0d5d10f CM |
6154 | location = &BTRFS_I(inode)->location; |
6155 | location->objectid = objectid; | |
6156 | location->offset = 0; | |
962a298f | 6157 | location->type = BTRFS_INODE_ITEM_KEY; |
b0d5d10f CM |
6158 | |
6159 | ret = btrfs_insert_inode_locked(inode); | |
6160 | if (ret < 0) | |
6161 | goto fail; | |
6162 | ||
b9473439 | 6163 | path->leave_spinning = 1; |
ef3b9af5 | 6164 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); |
9c58309d | 6165 | if (ret != 0) |
b0d5d10f | 6166 | goto fail_unlock; |
5f39d397 | 6167 | |
ecc11fab | 6168 | inode_init_owner(inode, dir, mode); |
a76a3cd4 | 6169 | inode_set_bytes(inode, 0); |
9cc97d64 | 6170 | |
c2050a45 | 6171 | inode->i_mtime = current_time(inode); |
9cc97d64 | 6172 | inode->i_atime = inode->i_mtime; |
6173 | inode->i_ctime = inode->i_mtime; | |
6174 | BTRFS_I(inode)->i_otime = inode->i_mtime; | |
6175 | ||
5f39d397 CM |
6176 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
6177 | struct btrfs_inode_item); | |
b159fa28 | 6178 | memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, |
293f7e07 | 6179 | sizeof(*inode_item)); |
e02119d5 | 6180 | fill_inode_item(trans, path->nodes[0], inode_item, inode); |
9c58309d | 6181 | |
ef3b9af5 FM |
6182 | if (name) { |
6183 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, | |
6184 | struct btrfs_inode_ref); | |
6185 | btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); | |
6186 | btrfs_set_inode_ref_index(path->nodes[0], ref, *index); | |
6187 | ptr = (unsigned long)(ref + 1); | |
6188 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | |
6189 | } | |
9c58309d | 6190 | |
5f39d397 CM |
6191 | btrfs_mark_buffer_dirty(path->nodes[0]); |
6192 | btrfs_free_path(path); | |
6193 | ||
6cbff00f CH |
6194 | btrfs_inherit_iflags(inode, dir); |
6195 | ||
569254b0 | 6196 | if (S_ISREG(mode)) { |
0b246afa | 6197 | if (btrfs_test_opt(fs_info, NODATASUM)) |
94272164 | 6198 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; |
0b246afa | 6199 | if (btrfs_test_opt(fs_info, NODATACOW)) |
f2bdf9a8 JB |
6200 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | |
6201 | BTRFS_INODE_NODATASUM; | |
94272164 CM |
6202 | } |
6203 | ||
5d4f98a2 | 6204 | inode_tree_add(inode); |
1abe9b8a | 6205 | |
6206 | trace_btrfs_inode_new(inode); | |
1973f0fa | 6207 | btrfs_set_inode_last_trans(trans, inode); |
1abe9b8a | 6208 | |
8ea05e3a AB |
6209 | btrfs_update_root_times(trans, root); |
6210 | ||
63541927 FDBM |
6211 | ret = btrfs_inode_inherit_props(trans, inode, dir); |
6212 | if (ret) | |
0b246afa | 6213 | btrfs_err(fs_info, |
63541927 | 6214 | "error inheriting props for ino %llu (root %llu): %d", |
f85b7379 | 6215 | btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); |
63541927 | 6216 | |
39279cc3 | 6217 | return inode; |
b0d5d10f CM |
6218 | |
6219 | fail_unlock: | |
6220 | unlock_new_inode(inode); | |
5f39d397 | 6221 | fail: |
ef3b9af5 | 6222 | if (dir && name) |
aec7477b | 6223 | BTRFS_I(dir)->index_cnt--; |
5f39d397 | 6224 | btrfs_free_path(path); |
09771430 | 6225 | iput(inode); |
5f39d397 | 6226 | return ERR_PTR(ret); |
39279cc3 CM |
6227 | } |
6228 | ||
6229 | static inline u8 btrfs_inode_type(struct inode *inode) | |
6230 | { | |
6231 | return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; | |
6232 | } | |
6233 | ||
d352ac68 CM |
6234 | /* |
6235 | * utility function to add 'inode' into 'parent_inode' with | |
6236 | * a give name and a given sequence number. | |
6237 | * if 'add_backref' is true, also insert a backref from the | |
6238 | * inode to the parent directory. | |
6239 | */ | |
e02119d5 | 6240 | int btrfs_add_link(struct btrfs_trans_handle *trans, |
db0a669f | 6241 | struct btrfs_inode *parent_inode, struct btrfs_inode *inode, |
e02119d5 | 6242 | const char *name, int name_len, int add_backref, u64 index) |
39279cc3 | 6243 | { |
db0a669f | 6244 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
4df27c4d | 6245 | int ret = 0; |
39279cc3 | 6246 | struct btrfs_key key; |
db0a669f NB |
6247 | struct btrfs_root *root = parent_inode->root; |
6248 | u64 ino = btrfs_ino(inode); | |
6249 | u64 parent_ino = btrfs_ino(parent_inode); | |
5f39d397 | 6250 | |
33345d01 | 6251 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
db0a669f | 6252 | memcpy(&key, &inode->root->root_key, sizeof(key)); |
4df27c4d | 6253 | } else { |
33345d01 | 6254 | key.objectid = ino; |
962a298f | 6255 | key.type = BTRFS_INODE_ITEM_KEY; |
4df27c4d YZ |
6256 | key.offset = 0; |
6257 | } | |
6258 | ||
33345d01 | 6259 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
0b246afa JM |
6260 | ret = btrfs_add_root_ref(trans, fs_info, key.objectid, |
6261 | root->root_key.objectid, parent_ino, | |
6262 | index, name, name_len); | |
4df27c4d | 6263 | } else if (add_backref) { |
33345d01 LZ |
6264 | ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, |
6265 | parent_ino, index); | |
4df27c4d | 6266 | } |
39279cc3 | 6267 | |
79787eaa JM |
6268 | /* Nothing to clean up yet */ |
6269 | if (ret) | |
6270 | return ret; | |
4df27c4d | 6271 | |
79787eaa | 6272 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
db0a669f NB |
6273 | parent_inode, &key, |
6274 | btrfs_inode_type(&inode->vfs_inode), index); | |
9c52057c | 6275 | if (ret == -EEXIST || ret == -EOVERFLOW) |
79787eaa JM |
6276 | goto fail_dir_item; |
6277 | else if (ret) { | |
66642832 | 6278 | btrfs_abort_transaction(trans, ret); |
79787eaa | 6279 | return ret; |
39279cc3 | 6280 | } |
79787eaa | 6281 | |
db0a669f | 6282 | btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + |
79787eaa | 6283 | name_len * 2); |
db0a669f NB |
6284 | inode_inc_iversion(&parent_inode->vfs_inode); |
6285 | parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime = | |
6286 | current_time(&parent_inode->vfs_inode); | |
6287 | ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); | |
79787eaa | 6288 | if (ret) |
66642832 | 6289 | btrfs_abort_transaction(trans, ret); |
39279cc3 | 6290 | return ret; |
fe66a05a CM |
6291 | |
6292 | fail_dir_item: | |
6293 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
6294 | u64 local_index; | |
6295 | int err; | |
0b246afa JM |
6296 | err = btrfs_del_root_ref(trans, fs_info, key.objectid, |
6297 | root->root_key.objectid, parent_ino, | |
6298 | &local_index, name, name_len); | |
fe66a05a CM |
6299 | |
6300 | } else if (add_backref) { | |
6301 | u64 local_index; | |
6302 | int err; | |
6303 | ||
6304 | err = btrfs_del_inode_ref(trans, root, name, name_len, | |
6305 | ino, parent_ino, &local_index); | |
6306 | } | |
6307 | return ret; | |
39279cc3 CM |
6308 | } |
6309 | ||
6310 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | |
cef415af NB |
6311 | struct btrfs_inode *dir, struct dentry *dentry, |
6312 | struct btrfs_inode *inode, int backref, u64 index) | |
39279cc3 | 6313 | { |
cef415af | 6314 | int err = btrfs_add_link(trans, dir, inode, |
a1b075d2 JB |
6315 | dentry->d_name.name, dentry->d_name.len, |
6316 | backref, index); | |
39279cc3 CM |
6317 | if (err > 0) |
6318 | err = -EEXIST; | |
6319 | return err; | |
6320 | } | |
6321 | ||
618e21d5 | 6322 | static int btrfs_mknod(struct inode *dir, struct dentry *dentry, |
1a67aafb | 6323 | umode_t mode, dev_t rdev) |
618e21d5 | 6324 | { |
2ff7e61e | 6325 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
618e21d5 JB |
6326 | struct btrfs_trans_handle *trans; |
6327 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 6328 | struct inode *inode = NULL; |
618e21d5 JB |
6329 | int err; |
6330 | int drop_inode = 0; | |
6331 | u64 objectid; | |
00e4e6b3 | 6332 | u64 index = 0; |
618e21d5 | 6333 | |
9ed74f2d JB |
6334 | /* |
6335 | * 2 for inode item and ref | |
6336 | * 2 for dir items | |
6337 | * 1 for xattr if selinux is on | |
6338 | */ | |
a22285a6 YZ |
6339 | trans = btrfs_start_transaction(root, 5); |
6340 | if (IS_ERR(trans)) | |
6341 | return PTR_ERR(trans); | |
1832a6d5 | 6342 | |
581bb050 LZ |
6343 | err = btrfs_find_free_ino(root, &objectid); |
6344 | if (err) | |
6345 | goto out_unlock; | |
6346 | ||
aec7477b | 6347 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
f85b7379 DS |
6348 | dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, |
6349 | mode, &index); | |
7cf96da3 TI |
6350 | if (IS_ERR(inode)) { |
6351 | err = PTR_ERR(inode); | |
618e21d5 | 6352 | goto out_unlock; |
7cf96da3 | 6353 | } |
618e21d5 | 6354 | |
ad19db71 CS |
6355 | /* |
6356 | * If the active LSM wants to access the inode during | |
6357 | * d_instantiate it needs these. Smack checks to see | |
6358 | * if the filesystem supports xattrs by looking at the | |
6359 | * ops vector. | |
6360 | */ | |
ad19db71 | 6361 | inode->i_op = &btrfs_special_inode_operations; |
b0d5d10f CM |
6362 | init_special_inode(inode, inode->i_mode, rdev); |
6363 | ||
6364 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
618e21d5 | 6365 | if (err) |
b0d5d10f CM |
6366 | goto out_unlock_inode; |
6367 | ||
cef415af NB |
6368 | err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), |
6369 | 0, index); | |
b0d5d10f CM |
6370 | if (err) { |
6371 | goto out_unlock_inode; | |
6372 | } else { | |
1b4ab1bb | 6373 | btrfs_update_inode(trans, root, inode); |
b0d5d10f | 6374 | unlock_new_inode(inode); |
08c422c2 | 6375 | d_instantiate(dentry, inode); |
618e21d5 | 6376 | } |
b0d5d10f | 6377 | |
618e21d5 | 6378 | out_unlock: |
3a45bb20 | 6379 | btrfs_end_transaction(trans); |
2ff7e61e JM |
6380 | btrfs_balance_delayed_items(fs_info); |
6381 | btrfs_btree_balance_dirty(fs_info); | |
618e21d5 JB |
6382 | if (drop_inode) { |
6383 | inode_dec_link_count(inode); | |
6384 | iput(inode); | |
6385 | } | |
618e21d5 | 6386 | return err; |
b0d5d10f CM |
6387 | |
6388 | out_unlock_inode: | |
6389 | drop_inode = 1; | |
6390 | unlock_new_inode(inode); | |
6391 | goto out_unlock; | |
6392 | ||
618e21d5 JB |
6393 | } |
6394 | ||
39279cc3 | 6395 | static int btrfs_create(struct inode *dir, struct dentry *dentry, |
ebfc3b49 | 6396 | umode_t mode, bool excl) |
39279cc3 | 6397 | { |
2ff7e61e | 6398 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
39279cc3 CM |
6399 | struct btrfs_trans_handle *trans; |
6400 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 6401 | struct inode *inode = NULL; |
43baa579 | 6402 | int drop_inode_on_err = 0; |
a22285a6 | 6403 | int err; |
39279cc3 | 6404 | u64 objectid; |
00e4e6b3 | 6405 | u64 index = 0; |
39279cc3 | 6406 | |
9ed74f2d JB |
6407 | /* |
6408 | * 2 for inode item and ref | |
6409 | * 2 for dir items | |
6410 | * 1 for xattr if selinux is on | |
6411 | */ | |
a22285a6 YZ |
6412 | trans = btrfs_start_transaction(root, 5); |
6413 | if (IS_ERR(trans)) | |
6414 | return PTR_ERR(trans); | |
9ed74f2d | 6415 | |
581bb050 LZ |
6416 | err = btrfs_find_free_ino(root, &objectid); |
6417 | if (err) | |
6418 | goto out_unlock; | |
6419 | ||
aec7477b | 6420 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
f85b7379 DS |
6421 | dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, |
6422 | mode, &index); | |
7cf96da3 TI |
6423 | if (IS_ERR(inode)) { |
6424 | err = PTR_ERR(inode); | |
39279cc3 | 6425 | goto out_unlock; |
7cf96da3 | 6426 | } |
43baa579 | 6427 | drop_inode_on_err = 1; |
ad19db71 CS |
6428 | /* |
6429 | * If the active LSM wants to access the inode during | |
6430 | * d_instantiate it needs these. Smack checks to see | |
6431 | * if the filesystem supports xattrs by looking at the | |
6432 | * ops vector. | |
6433 | */ | |
6434 | inode->i_fop = &btrfs_file_operations; | |
6435 | inode->i_op = &btrfs_file_inode_operations; | |
b0d5d10f | 6436 | inode->i_mapping->a_ops = &btrfs_aops; |
b0d5d10f CM |
6437 | |
6438 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
6439 | if (err) | |
6440 | goto out_unlock_inode; | |
6441 | ||
6442 | err = btrfs_update_inode(trans, root, inode); | |
6443 | if (err) | |
6444 | goto out_unlock_inode; | |
ad19db71 | 6445 | |
cef415af NB |
6446 | err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), |
6447 | 0, index); | |
39279cc3 | 6448 | if (err) |
b0d5d10f | 6449 | goto out_unlock_inode; |
43baa579 | 6450 | |
43baa579 | 6451 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
b0d5d10f | 6452 | unlock_new_inode(inode); |
43baa579 FB |
6453 | d_instantiate(dentry, inode); |
6454 | ||
39279cc3 | 6455 | out_unlock: |
3a45bb20 | 6456 | btrfs_end_transaction(trans); |
43baa579 | 6457 | if (err && drop_inode_on_err) { |
39279cc3 CM |
6458 | inode_dec_link_count(inode); |
6459 | iput(inode); | |
6460 | } | |
2ff7e61e JM |
6461 | btrfs_balance_delayed_items(fs_info); |
6462 | btrfs_btree_balance_dirty(fs_info); | |
39279cc3 | 6463 | return err; |
b0d5d10f CM |
6464 | |
6465 | out_unlock_inode: | |
6466 | unlock_new_inode(inode); | |
6467 | goto out_unlock; | |
6468 | ||
39279cc3 CM |
6469 | } |
6470 | ||
6471 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |
6472 | struct dentry *dentry) | |
6473 | { | |
271dba45 | 6474 | struct btrfs_trans_handle *trans = NULL; |
39279cc3 | 6475 | struct btrfs_root *root = BTRFS_I(dir)->root; |
2b0143b5 | 6476 | struct inode *inode = d_inode(old_dentry); |
2ff7e61e | 6477 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
00e4e6b3 | 6478 | u64 index; |
39279cc3 CM |
6479 | int err; |
6480 | int drop_inode = 0; | |
6481 | ||
4a8be425 TH |
6482 | /* do not allow sys_link's with other subvols of the same device */ |
6483 | if (root->objectid != BTRFS_I(inode)->root->objectid) | |
3ab3564f | 6484 | return -EXDEV; |
4a8be425 | 6485 | |
f186373f | 6486 | if (inode->i_nlink >= BTRFS_LINK_MAX) |
c055e99e | 6487 | return -EMLINK; |
4a8be425 | 6488 | |
877574e2 | 6489 | err = btrfs_set_inode_index(BTRFS_I(dir), &index); |
aec7477b JB |
6490 | if (err) |
6491 | goto fail; | |
6492 | ||
a22285a6 | 6493 | /* |
7e6b6465 | 6494 | * 2 items for inode and inode ref |
a22285a6 | 6495 | * 2 items for dir items |
7e6b6465 | 6496 | * 1 item for parent inode |
a22285a6 | 6497 | */ |
7e6b6465 | 6498 | trans = btrfs_start_transaction(root, 5); |
a22285a6 YZ |
6499 | if (IS_ERR(trans)) { |
6500 | err = PTR_ERR(trans); | |
271dba45 | 6501 | trans = NULL; |
a22285a6 YZ |
6502 | goto fail; |
6503 | } | |
5f39d397 | 6504 | |
67de1176 MX |
6505 | /* There are several dir indexes for this inode, clear the cache. */ |
6506 | BTRFS_I(inode)->dir_index = 0ULL; | |
8b558c5f | 6507 | inc_nlink(inode); |
0c4d2d95 | 6508 | inode_inc_iversion(inode); |
c2050a45 | 6509 | inode->i_ctime = current_time(inode); |
7de9c6ee | 6510 | ihold(inode); |
e9976151 | 6511 | set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); |
aec7477b | 6512 | |
cef415af NB |
6513 | err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), |
6514 | 1, index); | |
5f39d397 | 6515 | |
a5719521 | 6516 | if (err) { |
54aa1f4d | 6517 | drop_inode = 1; |
a5719521 | 6518 | } else { |
10d9f309 | 6519 | struct dentry *parent = dentry->d_parent; |
a5719521 | 6520 | err = btrfs_update_inode(trans, root, inode); |
79787eaa JM |
6521 | if (err) |
6522 | goto fail; | |
ef3b9af5 FM |
6523 | if (inode->i_nlink == 1) { |
6524 | /* | |
6525 | * If new hard link count is 1, it's a file created | |
6526 | * with open(2) O_TMPFILE flag. | |
6527 | */ | |
3d6ae7bb | 6528 | err = btrfs_orphan_del(trans, BTRFS_I(inode)); |
ef3b9af5 FM |
6529 | if (err) |
6530 | goto fail; | |
6531 | } | |
08c422c2 | 6532 | d_instantiate(dentry, inode); |
9ca5fbfb | 6533 | btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); |
a5719521 | 6534 | } |
39279cc3 | 6535 | |
2ff7e61e | 6536 | btrfs_balance_delayed_items(fs_info); |
1832a6d5 | 6537 | fail: |
271dba45 | 6538 | if (trans) |
3a45bb20 | 6539 | btrfs_end_transaction(trans); |
39279cc3 CM |
6540 | if (drop_inode) { |
6541 | inode_dec_link_count(inode); | |
6542 | iput(inode); | |
6543 | } | |
2ff7e61e | 6544 | btrfs_btree_balance_dirty(fs_info); |
39279cc3 CM |
6545 | return err; |
6546 | } | |
6547 | ||
18bb1db3 | 6548 | static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
39279cc3 | 6549 | { |
2ff7e61e | 6550 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
b9d86667 | 6551 | struct inode *inode = NULL; |
39279cc3 CM |
6552 | struct btrfs_trans_handle *trans; |
6553 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
6554 | int err = 0; | |
6555 | int drop_on_err = 0; | |
b9d86667 | 6556 | u64 objectid = 0; |
00e4e6b3 | 6557 | u64 index = 0; |
39279cc3 | 6558 | |
9ed74f2d JB |
6559 | /* |
6560 | * 2 items for inode and ref | |
6561 | * 2 items for dir items | |
6562 | * 1 for xattr if selinux is on | |
6563 | */ | |
a22285a6 YZ |
6564 | trans = btrfs_start_transaction(root, 5); |
6565 | if (IS_ERR(trans)) | |
6566 | return PTR_ERR(trans); | |
39279cc3 | 6567 | |
581bb050 LZ |
6568 | err = btrfs_find_free_ino(root, &objectid); |
6569 | if (err) | |
6570 | goto out_fail; | |
6571 | ||
aec7477b | 6572 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
f85b7379 DS |
6573 | dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, |
6574 | S_IFDIR | mode, &index); | |
39279cc3 CM |
6575 | if (IS_ERR(inode)) { |
6576 | err = PTR_ERR(inode); | |
6577 | goto out_fail; | |
6578 | } | |
5f39d397 | 6579 | |
39279cc3 | 6580 | drop_on_err = 1; |
b0d5d10f CM |
6581 | /* these must be set before we unlock the inode */ |
6582 | inode->i_op = &btrfs_dir_inode_operations; | |
6583 | inode->i_fop = &btrfs_dir_file_operations; | |
33268eaf | 6584 | |
2a7dba39 | 6585 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf | 6586 | if (err) |
b0d5d10f | 6587 | goto out_fail_inode; |
39279cc3 | 6588 | |
6ef06d27 | 6589 | btrfs_i_size_write(BTRFS_I(inode), 0); |
39279cc3 CM |
6590 | err = btrfs_update_inode(trans, root, inode); |
6591 | if (err) | |
b0d5d10f | 6592 | goto out_fail_inode; |
5f39d397 | 6593 | |
db0a669f NB |
6594 | err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), |
6595 | dentry->d_name.name, | |
6596 | dentry->d_name.len, 0, index); | |
39279cc3 | 6597 | if (err) |
b0d5d10f | 6598 | goto out_fail_inode; |
5f39d397 | 6599 | |
39279cc3 | 6600 | d_instantiate(dentry, inode); |
b0d5d10f CM |
6601 | /* |
6602 | * mkdir is special. We're unlocking after we call d_instantiate | |
6603 | * to avoid a race with nfsd calling d_instantiate. | |
6604 | */ | |
6605 | unlock_new_inode(inode); | |
39279cc3 | 6606 | drop_on_err = 0; |
39279cc3 CM |
6607 | |
6608 | out_fail: | |
3a45bb20 | 6609 | btrfs_end_transaction(trans); |
c7cfb8a5 WS |
6610 | if (drop_on_err) { |
6611 | inode_dec_link_count(inode); | |
39279cc3 | 6612 | iput(inode); |
c7cfb8a5 | 6613 | } |
2ff7e61e JM |
6614 | btrfs_balance_delayed_items(fs_info); |
6615 | btrfs_btree_balance_dirty(fs_info); | |
39279cc3 | 6616 | return err; |
b0d5d10f CM |
6617 | |
6618 | out_fail_inode: | |
6619 | unlock_new_inode(inode); | |
6620 | goto out_fail; | |
39279cc3 CM |
6621 | } |
6622 | ||
e6c4efd8 QW |
6623 | /* Find next extent map of a given extent map, caller needs to ensure locks */ |
6624 | static struct extent_map *next_extent_map(struct extent_map *em) | |
6625 | { | |
6626 | struct rb_node *next; | |
6627 | ||
6628 | next = rb_next(&em->rb_node); | |
6629 | if (!next) | |
6630 | return NULL; | |
6631 | return container_of(next, struct extent_map, rb_node); | |
6632 | } | |
6633 | ||
6634 | static struct extent_map *prev_extent_map(struct extent_map *em) | |
6635 | { | |
6636 | struct rb_node *prev; | |
6637 | ||
6638 | prev = rb_prev(&em->rb_node); | |
6639 | if (!prev) | |
6640 | return NULL; | |
6641 | return container_of(prev, struct extent_map, rb_node); | |
6642 | } | |
6643 | ||
d352ac68 | 6644 | /* helper for btfs_get_extent. Given an existing extent in the tree, |
e6c4efd8 | 6645 | * the existing extent is the nearest extent to map_start, |
d352ac68 | 6646 | * and an extent that you want to insert, deal with overlap and insert |
e6c4efd8 | 6647 | * the best fitted new extent into the tree. |
d352ac68 | 6648 | */ |
3b951516 CM |
6649 | static int merge_extent_mapping(struct extent_map_tree *em_tree, |
6650 | struct extent_map *existing, | |
e6dcd2dc | 6651 | struct extent_map *em, |
51f395ad | 6652 | u64 map_start) |
3b951516 | 6653 | { |
e6c4efd8 QW |
6654 | struct extent_map *prev; |
6655 | struct extent_map *next; | |
6656 | u64 start; | |
6657 | u64 end; | |
3b951516 | 6658 | u64 start_diff; |
3b951516 | 6659 | |
e6dcd2dc | 6660 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
e6c4efd8 QW |
6661 | |
6662 | if (existing->start > map_start) { | |
6663 | next = existing; | |
6664 | prev = prev_extent_map(next); | |
6665 | } else { | |
6666 | prev = existing; | |
6667 | next = next_extent_map(prev); | |
6668 | } | |
6669 | ||
6670 | start = prev ? extent_map_end(prev) : em->start; | |
6671 | start = max_t(u64, start, em->start); | |
6672 | end = next ? next->start : extent_map_end(em); | |
6673 | end = min_t(u64, end, extent_map_end(em)); | |
6674 | start_diff = start - em->start; | |
6675 | em->start = start; | |
6676 | em->len = end - start; | |
c8b97818 CM |
6677 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
6678 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | |
e6dcd2dc | 6679 | em->block_start += start_diff; |
c8b97818 CM |
6680 | em->block_len -= start_diff; |
6681 | } | |
09a2a8f9 | 6682 | return add_extent_mapping(em_tree, em, 0); |
3b951516 CM |
6683 | } |
6684 | ||
c8b97818 | 6685 | static noinline int uncompress_inline(struct btrfs_path *path, |
e40da0e5 | 6686 | struct page *page, |
c8b97818 CM |
6687 | size_t pg_offset, u64 extent_offset, |
6688 | struct btrfs_file_extent_item *item) | |
6689 | { | |
6690 | int ret; | |
6691 | struct extent_buffer *leaf = path->nodes[0]; | |
6692 | char *tmp; | |
6693 | size_t max_size; | |
6694 | unsigned long inline_size; | |
6695 | unsigned long ptr; | |
261507a0 | 6696 | int compress_type; |
c8b97818 CM |
6697 | |
6698 | WARN_ON(pg_offset != 0); | |
261507a0 | 6699 | compress_type = btrfs_file_extent_compression(leaf, item); |
c8b97818 CM |
6700 | max_size = btrfs_file_extent_ram_bytes(leaf, item); |
6701 | inline_size = btrfs_file_extent_inline_item_len(leaf, | |
dd3cc16b | 6702 | btrfs_item_nr(path->slots[0])); |
c8b97818 | 6703 | tmp = kmalloc(inline_size, GFP_NOFS); |
8d413713 TI |
6704 | if (!tmp) |
6705 | return -ENOMEM; | |
c8b97818 CM |
6706 | ptr = btrfs_file_extent_inline_start(item); |
6707 | ||
6708 | read_extent_buffer(leaf, tmp, ptr, inline_size); | |
6709 | ||
09cbfeaf | 6710 | max_size = min_t(unsigned long, PAGE_SIZE, max_size); |
261507a0 LZ |
6711 | ret = btrfs_decompress(compress_type, tmp, page, |
6712 | extent_offset, inline_size, max_size); | |
c8b97818 | 6713 | kfree(tmp); |
166ae5a4 | 6714 | return ret; |
c8b97818 CM |
6715 | } |
6716 | ||
d352ac68 CM |
6717 | /* |
6718 | * a bit scary, this does extent mapping from logical file offset to the disk. | |
d397712b CM |
6719 | * the ugly parts come from merging extents from the disk with the in-ram |
6720 | * representation. This gets more complex because of the data=ordered code, | |
d352ac68 CM |
6721 | * where the in-ram extents might be locked pending data=ordered completion. |
6722 | * | |
6723 | * This also copies inline extents directly into the page. | |
6724 | */ | |
d397712b | 6725 | |
fc4f21b1 NB |
6726 | struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, |
6727 | struct page *page, | |
6728 | size_t pg_offset, u64 start, u64 len, | |
6729 | int create) | |
a52d9a80 | 6730 | { |
fc4f21b1 | 6731 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
a52d9a80 CM |
6732 | int ret; |
6733 | int err = 0; | |
a52d9a80 CM |
6734 | u64 extent_start = 0; |
6735 | u64 extent_end = 0; | |
fc4f21b1 | 6736 | u64 objectid = btrfs_ino(inode); |
a52d9a80 | 6737 | u32 found_type; |
f421950f | 6738 | struct btrfs_path *path = NULL; |
fc4f21b1 | 6739 | struct btrfs_root *root = inode->root; |
a52d9a80 | 6740 | struct btrfs_file_extent_item *item; |
5f39d397 CM |
6741 | struct extent_buffer *leaf; |
6742 | struct btrfs_key found_key; | |
a52d9a80 | 6743 | struct extent_map *em = NULL; |
fc4f21b1 NB |
6744 | struct extent_map_tree *em_tree = &inode->extent_tree; |
6745 | struct extent_io_tree *io_tree = &inode->io_tree; | |
a52d9a80 | 6746 | struct btrfs_trans_handle *trans = NULL; |
7ffbb598 | 6747 | const bool new_inline = !page || create; |
a52d9a80 | 6748 | |
a52d9a80 | 6749 | again: |
890871be | 6750 | read_lock(&em_tree->lock); |
d1310b2e | 6751 | em = lookup_extent_mapping(em_tree, start, len); |
a061fc8d | 6752 | if (em) |
0b246afa | 6753 | em->bdev = fs_info->fs_devices->latest_bdev; |
890871be | 6754 | read_unlock(&em_tree->lock); |
d1310b2e | 6755 | |
a52d9a80 | 6756 | if (em) { |
e1c4b745 CM |
6757 | if (em->start > start || em->start + em->len <= start) |
6758 | free_extent_map(em); | |
6759 | else if (em->block_start == EXTENT_MAP_INLINE && page) | |
70dec807 CM |
6760 | free_extent_map(em); |
6761 | else | |
6762 | goto out; | |
a52d9a80 | 6763 | } |
172ddd60 | 6764 | em = alloc_extent_map(); |
a52d9a80 | 6765 | if (!em) { |
d1310b2e CM |
6766 | err = -ENOMEM; |
6767 | goto out; | |
a52d9a80 | 6768 | } |
0b246afa | 6769 | em->bdev = fs_info->fs_devices->latest_bdev; |
d1310b2e | 6770 | em->start = EXTENT_MAP_HOLE; |
445a6944 | 6771 | em->orig_start = EXTENT_MAP_HOLE; |
d1310b2e | 6772 | em->len = (u64)-1; |
c8b97818 | 6773 | em->block_len = (u64)-1; |
f421950f CM |
6774 | |
6775 | if (!path) { | |
6776 | path = btrfs_alloc_path(); | |
026fd317 JB |
6777 | if (!path) { |
6778 | err = -ENOMEM; | |
6779 | goto out; | |
6780 | } | |
6781 | /* | |
6782 | * Chances are we'll be called again, so go ahead and do | |
6783 | * readahead | |
6784 | */ | |
e4058b54 | 6785 | path->reada = READA_FORWARD; |
f421950f CM |
6786 | } |
6787 | ||
179e29e4 CM |
6788 | ret = btrfs_lookup_file_extent(trans, root, path, |
6789 | objectid, start, trans != NULL); | |
a52d9a80 CM |
6790 | if (ret < 0) { |
6791 | err = ret; | |
6792 | goto out; | |
6793 | } | |
6794 | ||
6795 | if (ret != 0) { | |
6796 | if (path->slots[0] == 0) | |
6797 | goto not_found; | |
6798 | path->slots[0]--; | |
6799 | } | |
6800 | ||
5f39d397 CM |
6801 | leaf = path->nodes[0]; |
6802 | item = btrfs_item_ptr(leaf, path->slots[0], | |
a52d9a80 | 6803 | struct btrfs_file_extent_item); |
a52d9a80 | 6804 | /* are we inside the extent that was found? */ |
5f39d397 | 6805 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
962a298f | 6806 | found_type = found_key.type; |
5f39d397 | 6807 | if (found_key.objectid != objectid || |
a52d9a80 | 6808 | found_type != BTRFS_EXTENT_DATA_KEY) { |
25a50341 JB |
6809 | /* |
6810 | * If we backup past the first extent we want to move forward | |
6811 | * and see if there is an extent in front of us, otherwise we'll | |
6812 | * say there is a hole for our whole search range which can | |
6813 | * cause problems. | |
6814 | */ | |
6815 | extent_end = start; | |
6816 | goto next; | |
a52d9a80 CM |
6817 | } |
6818 | ||
5f39d397 CM |
6819 | found_type = btrfs_file_extent_type(leaf, item); |
6820 | extent_start = found_key.offset; | |
d899e052 YZ |
6821 | if (found_type == BTRFS_FILE_EXTENT_REG || |
6822 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
a52d9a80 | 6823 | extent_end = extent_start + |
db94535d | 6824 | btrfs_file_extent_num_bytes(leaf, item); |
9036c102 YZ |
6825 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { |
6826 | size_t size; | |
514ac8ad | 6827 | size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); |
da17066c | 6828 | extent_end = ALIGN(extent_start + size, |
0b246afa | 6829 | fs_info->sectorsize); |
9036c102 | 6830 | } |
25a50341 | 6831 | next: |
9036c102 YZ |
6832 | if (start >= extent_end) { |
6833 | path->slots[0]++; | |
6834 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
6835 | ret = btrfs_next_leaf(root, path); | |
6836 | if (ret < 0) { | |
6837 | err = ret; | |
6838 | goto out; | |
a52d9a80 | 6839 | } |
9036c102 YZ |
6840 | if (ret > 0) |
6841 | goto not_found; | |
6842 | leaf = path->nodes[0]; | |
a52d9a80 | 6843 | } |
9036c102 YZ |
6844 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
6845 | if (found_key.objectid != objectid || | |
6846 | found_key.type != BTRFS_EXTENT_DATA_KEY) | |
6847 | goto not_found; | |
6848 | if (start + len <= found_key.offset) | |
6849 | goto not_found; | |
e2eca69d WS |
6850 | if (start > found_key.offset) |
6851 | goto next; | |
9036c102 | 6852 | em->start = start; |
70c8a91c | 6853 | em->orig_start = start; |
9036c102 YZ |
6854 | em->len = found_key.offset - start; |
6855 | goto not_found_em; | |
6856 | } | |
6857 | ||
fc4f21b1 | 6858 | btrfs_extent_item_to_extent_map(inode, path, item, |
9cdc5124 | 6859 | new_inline, em); |
7ffbb598 | 6860 | |
d899e052 YZ |
6861 | if (found_type == BTRFS_FILE_EXTENT_REG || |
6862 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
a52d9a80 CM |
6863 | goto insert; |
6864 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 6865 | unsigned long ptr; |
a52d9a80 | 6866 | char *map; |
3326d1b0 CM |
6867 | size_t size; |
6868 | size_t extent_offset; | |
6869 | size_t copy_size; | |
a52d9a80 | 6870 | |
7ffbb598 | 6871 | if (new_inline) |
689f9346 | 6872 | goto out; |
5f39d397 | 6873 | |
514ac8ad | 6874 | size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); |
9036c102 | 6875 | extent_offset = page_offset(page) + pg_offset - extent_start; |
09cbfeaf KS |
6876 | copy_size = min_t(u64, PAGE_SIZE - pg_offset, |
6877 | size - extent_offset); | |
3326d1b0 | 6878 | em->start = extent_start + extent_offset; |
0b246afa | 6879 | em->len = ALIGN(copy_size, fs_info->sectorsize); |
b4939680 | 6880 | em->orig_block_len = em->len; |
70c8a91c | 6881 | em->orig_start = em->start; |
689f9346 | 6882 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; |
179e29e4 | 6883 | if (create == 0 && !PageUptodate(page)) { |
261507a0 LZ |
6884 | if (btrfs_file_extent_compression(leaf, item) != |
6885 | BTRFS_COMPRESS_NONE) { | |
e40da0e5 | 6886 | ret = uncompress_inline(path, page, pg_offset, |
c8b97818 | 6887 | extent_offset, item); |
166ae5a4 ZB |
6888 | if (ret) { |
6889 | err = ret; | |
6890 | goto out; | |
6891 | } | |
c8b97818 CM |
6892 | } else { |
6893 | map = kmap(page); | |
6894 | read_extent_buffer(leaf, map + pg_offset, ptr, | |
6895 | copy_size); | |
09cbfeaf | 6896 | if (pg_offset + copy_size < PAGE_SIZE) { |
93c82d57 | 6897 | memset(map + pg_offset + copy_size, 0, |
09cbfeaf | 6898 | PAGE_SIZE - pg_offset - |
93c82d57 CM |
6899 | copy_size); |
6900 | } | |
c8b97818 CM |
6901 | kunmap(page); |
6902 | } | |
179e29e4 CM |
6903 | flush_dcache_page(page); |
6904 | } else if (create && PageUptodate(page)) { | |
6bf7e080 | 6905 | BUG(); |
179e29e4 CM |
6906 | if (!trans) { |
6907 | kunmap(page); | |
6908 | free_extent_map(em); | |
6909 | em = NULL; | |
ff5714cc | 6910 | |
b3b4aa74 | 6911 | btrfs_release_path(path); |
7a7eaa40 | 6912 | trans = btrfs_join_transaction(root); |
ff5714cc | 6913 | |
3612b495 TI |
6914 | if (IS_ERR(trans)) |
6915 | return ERR_CAST(trans); | |
179e29e4 CM |
6916 | goto again; |
6917 | } | |
c8b97818 | 6918 | map = kmap(page); |
70dec807 | 6919 | write_extent_buffer(leaf, map + pg_offset, ptr, |
179e29e4 | 6920 | copy_size); |
c8b97818 | 6921 | kunmap(page); |
179e29e4 | 6922 | btrfs_mark_buffer_dirty(leaf); |
a52d9a80 | 6923 | } |
d1310b2e | 6924 | set_extent_uptodate(io_tree, em->start, |
507903b8 | 6925 | extent_map_end(em) - 1, NULL, GFP_NOFS); |
a52d9a80 | 6926 | goto insert; |
a52d9a80 CM |
6927 | } |
6928 | not_found: | |
6929 | em->start = start; | |
70c8a91c | 6930 | em->orig_start = start; |
d1310b2e | 6931 | em->len = len; |
a52d9a80 | 6932 | not_found_em: |
5f39d397 | 6933 | em->block_start = EXTENT_MAP_HOLE; |
9036c102 | 6934 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); |
a52d9a80 | 6935 | insert: |
b3b4aa74 | 6936 | btrfs_release_path(path); |
d1310b2e | 6937 | if (em->start > start || extent_map_end(em) <= start) { |
0b246afa | 6938 | btrfs_err(fs_info, |
5d163e0e JM |
6939 | "bad extent! em: [%llu %llu] passed [%llu %llu]", |
6940 | em->start, em->len, start, len); | |
a52d9a80 CM |
6941 | err = -EIO; |
6942 | goto out; | |
6943 | } | |
d1310b2e CM |
6944 | |
6945 | err = 0; | |
890871be | 6946 | write_lock(&em_tree->lock); |
09a2a8f9 | 6947 | ret = add_extent_mapping(em_tree, em, 0); |
3b951516 CM |
6948 | /* it is possible that someone inserted the extent into the tree |
6949 | * while we had the lock dropped. It is also possible that | |
6950 | * an overlapping map exists in the tree | |
6951 | */ | |
a52d9a80 | 6952 | if (ret == -EEXIST) { |
3b951516 | 6953 | struct extent_map *existing; |
e6dcd2dc CM |
6954 | |
6955 | ret = 0; | |
6956 | ||
e6c4efd8 QW |
6957 | existing = search_extent_mapping(em_tree, start, len); |
6958 | /* | |
6959 | * existing will always be non-NULL, since there must be | |
6960 | * extent causing the -EEXIST. | |
6961 | */ | |
8dff9c85 | 6962 | if (existing->start == em->start && |
8e2bd3b7 | 6963 | extent_map_end(existing) >= extent_map_end(em) && |
8dff9c85 CM |
6964 | em->block_start == existing->block_start) { |
6965 | /* | |
8e2bd3b7 OS |
6966 | * The existing extent map already encompasses the |
6967 | * entire extent map we tried to add. | |
8dff9c85 CM |
6968 | */ |
6969 | free_extent_map(em); | |
6970 | em = existing; | |
6971 | err = 0; | |
6972 | ||
6973 | } else if (start >= extent_map_end(existing) || | |
32be3a1a | 6974 | start <= existing->start) { |
e6c4efd8 QW |
6975 | /* |
6976 | * The existing extent map is the one nearest to | |
6977 | * the [start, start + len) range which overlaps | |
6978 | */ | |
6979 | err = merge_extent_mapping(em_tree, existing, | |
6980 | em, start); | |
e1c4b745 | 6981 | free_extent_map(existing); |
e6c4efd8 | 6982 | if (err) { |
3b951516 CM |
6983 | free_extent_map(em); |
6984 | em = NULL; | |
6985 | } | |
6986 | } else { | |
6987 | free_extent_map(em); | |
6988 | em = existing; | |
e6dcd2dc | 6989 | err = 0; |
a52d9a80 | 6990 | } |
a52d9a80 | 6991 | } |
890871be | 6992 | write_unlock(&em_tree->lock); |
a52d9a80 | 6993 | out: |
1abe9b8a | 6994 | |
fc4f21b1 | 6995 | trace_btrfs_get_extent(root, inode, em); |
1abe9b8a | 6996 | |
527afb44 | 6997 | btrfs_free_path(path); |
a52d9a80 | 6998 | if (trans) { |
3a45bb20 | 6999 | ret = btrfs_end_transaction(trans); |
d397712b | 7000 | if (!err) |
a52d9a80 CM |
7001 | err = ret; |
7002 | } | |
a52d9a80 CM |
7003 | if (err) { |
7004 | free_extent_map(em); | |
a52d9a80 CM |
7005 | return ERR_PTR(err); |
7006 | } | |
79787eaa | 7007 | BUG_ON(!em); /* Error is always set */ |
a52d9a80 CM |
7008 | return em; |
7009 | } | |
7010 | ||
fc4f21b1 NB |
7011 | struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, |
7012 | struct page *page, | |
7013 | size_t pg_offset, u64 start, u64 len, | |
7014 | int create) | |
ec29ed5b CM |
7015 | { |
7016 | struct extent_map *em; | |
7017 | struct extent_map *hole_em = NULL; | |
7018 | u64 range_start = start; | |
7019 | u64 end; | |
7020 | u64 found; | |
7021 | u64 found_end; | |
7022 | int err = 0; | |
7023 | ||
7024 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | |
7025 | if (IS_ERR(em)) | |
7026 | return em; | |
7027 | if (em) { | |
7028 | /* | |
f9e4fb53 LB |
7029 | * if our em maps to |
7030 | * - a hole or | |
7031 | * - a pre-alloc extent, | |
7032 | * there might actually be delalloc bytes behind it. | |
ec29ed5b | 7033 | */ |
f9e4fb53 LB |
7034 | if (em->block_start != EXTENT_MAP_HOLE && |
7035 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
ec29ed5b CM |
7036 | return em; |
7037 | else | |
7038 | hole_em = em; | |
7039 | } | |
7040 | ||
7041 | /* check to see if we've wrapped (len == -1 or similar) */ | |
7042 | end = start + len; | |
7043 | if (end < start) | |
7044 | end = (u64)-1; | |
7045 | else | |
7046 | end -= 1; | |
7047 | ||
7048 | em = NULL; | |
7049 | ||
7050 | /* ok, we didn't find anything, lets look for delalloc */ | |
fc4f21b1 | 7051 | found = count_range_bits(&inode->io_tree, &range_start, |
ec29ed5b CM |
7052 | end, len, EXTENT_DELALLOC, 1); |
7053 | found_end = range_start + found; | |
7054 | if (found_end < range_start) | |
7055 | found_end = (u64)-1; | |
7056 | ||
7057 | /* | |
7058 | * we didn't find anything useful, return | |
7059 | * the original results from get_extent() | |
7060 | */ | |
7061 | if (range_start > end || found_end <= start) { | |
7062 | em = hole_em; | |
7063 | hole_em = NULL; | |
7064 | goto out; | |
7065 | } | |
7066 | ||
7067 | /* adjust the range_start to make sure it doesn't | |
7068 | * go backwards from the start they passed in | |
7069 | */ | |
67871254 | 7070 | range_start = max(start, range_start); |
ec29ed5b CM |
7071 | found = found_end - range_start; |
7072 | ||
7073 | if (found > 0) { | |
7074 | u64 hole_start = start; | |
7075 | u64 hole_len = len; | |
7076 | ||
172ddd60 | 7077 | em = alloc_extent_map(); |
ec29ed5b CM |
7078 | if (!em) { |
7079 | err = -ENOMEM; | |
7080 | goto out; | |
7081 | } | |
7082 | /* | |
7083 | * when btrfs_get_extent can't find anything it | |
7084 | * returns one huge hole | |
7085 | * | |
7086 | * make sure what it found really fits our range, and | |
7087 | * adjust to make sure it is based on the start from | |
7088 | * the caller | |
7089 | */ | |
7090 | if (hole_em) { | |
7091 | u64 calc_end = extent_map_end(hole_em); | |
7092 | ||
7093 | if (calc_end <= start || (hole_em->start > end)) { | |
7094 | free_extent_map(hole_em); | |
7095 | hole_em = NULL; | |
7096 | } else { | |
7097 | hole_start = max(hole_em->start, start); | |
7098 | hole_len = calc_end - hole_start; | |
7099 | } | |
7100 | } | |
7101 | em->bdev = NULL; | |
7102 | if (hole_em && range_start > hole_start) { | |
7103 | /* our hole starts before our delalloc, so we | |
7104 | * have to return just the parts of the hole | |
7105 | * that go until the delalloc starts | |
7106 | */ | |
7107 | em->len = min(hole_len, | |
7108 | range_start - hole_start); | |
7109 | em->start = hole_start; | |
7110 | em->orig_start = hole_start; | |
7111 | /* | |
7112 | * don't adjust block start at all, | |
7113 | * it is fixed at EXTENT_MAP_HOLE | |
7114 | */ | |
7115 | em->block_start = hole_em->block_start; | |
7116 | em->block_len = hole_len; | |
f9e4fb53 LB |
7117 | if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) |
7118 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
ec29ed5b CM |
7119 | } else { |
7120 | em->start = range_start; | |
7121 | em->len = found; | |
7122 | em->orig_start = range_start; | |
7123 | em->block_start = EXTENT_MAP_DELALLOC; | |
7124 | em->block_len = found; | |
7125 | } | |
7126 | } else if (hole_em) { | |
7127 | return hole_em; | |
7128 | } | |
7129 | out: | |
7130 | ||
7131 | free_extent_map(hole_em); | |
7132 | if (err) { | |
7133 | free_extent_map(em); | |
7134 | return ERR_PTR(err); | |
7135 | } | |
7136 | return em; | |
7137 | } | |
7138 | ||
5f9a8a51 FM |
7139 | static struct extent_map *btrfs_create_dio_extent(struct inode *inode, |
7140 | const u64 start, | |
7141 | const u64 len, | |
7142 | const u64 orig_start, | |
7143 | const u64 block_start, | |
7144 | const u64 block_len, | |
7145 | const u64 orig_block_len, | |
7146 | const u64 ram_bytes, | |
7147 | const int type) | |
7148 | { | |
7149 | struct extent_map *em = NULL; | |
7150 | int ret; | |
7151 | ||
5f9a8a51 | 7152 | if (type != BTRFS_ORDERED_NOCOW) { |
6f9994db LB |
7153 | em = create_io_em(inode, start, len, orig_start, |
7154 | block_start, block_len, orig_block_len, | |
7155 | ram_bytes, | |
7156 | BTRFS_COMPRESS_NONE, /* compress_type */ | |
7157 | type); | |
5f9a8a51 FM |
7158 | if (IS_ERR(em)) |
7159 | goto out; | |
7160 | } | |
7161 | ret = btrfs_add_ordered_extent_dio(inode, start, block_start, | |
7162 | len, block_len, type); | |
7163 | if (ret) { | |
7164 | if (em) { | |
7165 | free_extent_map(em); | |
dcdbc059 | 7166 | btrfs_drop_extent_cache(BTRFS_I(inode), start, |
5f9a8a51 FM |
7167 | start + len - 1, 0); |
7168 | } | |
7169 | em = ERR_PTR(ret); | |
7170 | } | |
7171 | out: | |
5f9a8a51 FM |
7172 | |
7173 | return em; | |
7174 | } | |
7175 | ||
4b46fce2 JB |
7176 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
7177 | u64 start, u64 len) | |
7178 | { | |
0b246afa | 7179 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
4b46fce2 | 7180 | struct btrfs_root *root = BTRFS_I(inode)->root; |
70c8a91c | 7181 | struct extent_map *em; |
4b46fce2 JB |
7182 | struct btrfs_key ins; |
7183 | u64 alloc_hint; | |
7184 | int ret; | |
4b46fce2 | 7185 | |
4b46fce2 | 7186 | alloc_hint = get_extent_allocation_hint(inode, start, len); |
0b246afa | 7187 | ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, |
da17066c | 7188 | 0, alloc_hint, &ins, 1, 1); |
00361589 JB |
7189 | if (ret) |
7190 | return ERR_PTR(ret); | |
4b46fce2 | 7191 | |
5f9a8a51 FM |
7192 | em = btrfs_create_dio_extent(inode, start, ins.offset, start, |
7193 | ins.objectid, ins.offset, ins.offset, | |
6288d6ea | 7194 | ins.offset, BTRFS_ORDERED_REGULAR); |
0b246afa | 7195 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
5f9a8a51 | 7196 | if (IS_ERR(em)) |
2ff7e61e JM |
7197 | btrfs_free_reserved_extent(fs_info, ins.objectid, |
7198 | ins.offset, 1); | |
de0ee0ed | 7199 | |
4b46fce2 JB |
7200 | return em; |
7201 | } | |
7202 | ||
46bfbb5c CM |
7203 | /* |
7204 | * returns 1 when the nocow is safe, < 1 on error, 0 if the | |
7205 | * block must be cow'd | |
7206 | */ | |
00361589 | 7207 | noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, |
7ee9e440 JB |
7208 | u64 *orig_start, u64 *orig_block_len, |
7209 | u64 *ram_bytes) | |
46bfbb5c | 7210 | { |
2ff7e61e | 7211 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
46bfbb5c CM |
7212 | struct btrfs_path *path; |
7213 | int ret; | |
7214 | struct extent_buffer *leaf; | |
7215 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7b2b7085 | 7216 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
46bfbb5c CM |
7217 | struct btrfs_file_extent_item *fi; |
7218 | struct btrfs_key key; | |
7219 | u64 disk_bytenr; | |
7220 | u64 backref_offset; | |
7221 | u64 extent_end; | |
7222 | u64 num_bytes; | |
7223 | int slot; | |
7224 | int found_type; | |
7ee9e440 | 7225 | bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); |
e77751aa | 7226 | |
46bfbb5c CM |
7227 | path = btrfs_alloc_path(); |
7228 | if (!path) | |
7229 | return -ENOMEM; | |
7230 | ||
f85b7379 DS |
7231 | ret = btrfs_lookup_file_extent(NULL, root, path, |
7232 | btrfs_ino(BTRFS_I(inode)), offset, 0); | |
46bfbb5c CM |
7233 | if (ret < 0) |
7234 | goto out; | |
7235 | ||
7236 | slot = path->slots[0]; | |
7237 | if (ret == 1) { | |
7238 | if (slot == 0) { | |
7239 | /* can't find the item, must cow */ | |
7240 | ret = 0; | |
7241 | goto out; | |
7242 | } | |
7243 | slot--; | |
7244 | } | |
7245 | ret = 0; | |
7246 | leaf = path->nodes[0]; | |
7247 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
4a0cc7ca | 7248 | if (key.objectid != btrfs_ino(BTRFS_I(inode)) || |
46bfbb5c CM |
7249 | key.type != BTRFS_EXTENT_DATA_KEY) { |
7250 | /* not our file or wrong item type, must cow */ | |
7251 | goto out; | |
7252 | } | |
7253 | ||
7254 | if (key.offset > offset) { | |
7255 | /* Wrong offset, must cow */ | |
7256 | goto out; | |
7257 | } | |
7258 | ||
7259 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
7260 | found_type = btrfs_file_extent_type(leaf, fi); | |
7261 | if (found_type != BTRFS_FILE_EXTENT_REG && | |
7262 | found_type != BTRFS_FILE_EXTENT_PREALLOC) { | |
7263 | /* not a regular extent, must cow */ | |
7264 | goto out; | |
7265 | } | |
7ee9e440 JB |
7266 | |
7267 | if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) | |
7268 | goto out; | |
7269 | ||
e77751aa MX |
7270 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); |
7271 | if (extent_end <= offset) | |
7272 | goto out; | |
7273 | ||
46bfbb5c | 7274 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
7ee9e440 JB |
7275 | if (disk_bytenr == 0) |
7276 | goto out; | |
7277 | ||
7278 | if (btrfs_file_extent_compression(leaf, fi) || | |
7279 | btrfs_file_extent_encryption(leaf, fi) || | |
7280 | btrfs_file_extent_other_encoding(leaf, fi)) | |
7281 | goto out; | |
7282 | ||
46bfbb5c CM |
7283 | backref_offset = btrfs_file_extent_offset(leaf, fi); |
7284 | ||
7ee9e440 JB |
7285 | if (orig_start) { |
7286 | *orig_start = key.offset - backref_offset; | |
7287 | *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); | |
7288 | *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); | |
7289 | } | |
eb384b55 | 7290 | |
2ff7e61e | 7291 | if (btrfs_extent_readonly(fs_info, disk_bytenr)) |
46bfbb5c | 7292 | goto out; |
7b2b7085 MX |
7293 | |
7294 | num_bytes = min(offset + *len, extent_end) - offset; | |
7295 | if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
7296 | u64 range_end; | |
7297 | ||
da17066c JM |
7298 | range_end = round_up(offset + num_bytes, |
7299 | root->fs_info->sectorsize) - 1; | |
7b2b7085 MX |
7300 | ret = test_range_bit(io_tree, offset, range_end, |
7301 | EXTENT_DELALLOC, 0, NULL); | |
7302 | if (ret) { | |
7303 | ret = -EAGAIN; | |
7304 | goto out; | |
7305 | } | |
7306 | } | |
7307 | ||
1bda19eb | 7308 | btrfs_release_path(path); |
46bfbb5c CM |
7309 | |
7310 | /* | |
7311 | * look for other files referencing this extent, if we | |
7312 | * find any we must cow | |
7313 | */ | |
00361589 | 7314 | |
e4c3b2dc | 7315 | ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)), |
00361589 | 7316 | key.offset - backref_offset, disk_bytenr); |
00361589 JB |
7317 | if (ret) { |
7318 | ret = 0; | |
7319 | goto out; | |
7320 | } | |
46bfbb5c CM |
7321 | |
7322 | /* | |
7323 | * adjust disk_bytenr and num_bytes to cover just the bytes | |
7324 | * in this extent we are about to write. If there | |
7325 | * are any csums in that range we have to cow in order | |
7326 | * to keep the csums correct | |
7327 | */ | |
7328 | disk_bytenr += backref_offset; | |
7329 | disk_bytenr += offset - key.offset; | |
2ff7e61e JM |
7330 | if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes)) |
7331 | goto out; | |
46bfbb5c CM |
7332 | /* |
7333 | * all of the above have passed, it is safe to overwrite this extent | |
7334 | * without cow | |
7335 | */ | |
eb384b55 | 7336 | *len = num_bytes; |
46bfbb5c CM |
7337 | ret = 1; |
7338 | out: | |
7339 | btrfs_free_path(path); | |
7340 | return ret; | |
7341 | } | |
7342 | ||
fc4adbff AG |
7343 | bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) |
7344 | { | |
7345 | struct radix_tree_root *root = &inode->i_mapping->page_tree; | |
7346 | int found = false; | |
7347 | void **pagep = NULL; | |
7348 | struct page *page = NULL; | |
7349 | int start_idx; | |
7350 | int end_idx; | |
7351 | ||
09cbfeaf | 7352 | start_idx = start >> PAGE_SHIFT; |
fc4adbff AG |
7353 | |
7354 | /* | |
7355 | * end is the last byte in the last page. end == start is legal | |
7356 | */ | |
09cbfeaf | 7357 | end_idx = end >> PAGE_SHIFT; |
fc4adbff AG |
7358 | |
7359 | rcu_read_lock(); | |
7360 | ||
7361 | /* Most of the code in this while loop is lifted from | |
7362 | * find_get_page. It's been modified to begin searching from a | |
7363 | * page and return just the first page found in that range. If the | |
7364 | * found idx is less than or equal to the end idx then we know that | |
7365 | * a page exists. If no pages are found or if those pages are | |
7366 | * outside of the range then we're fine (yay!) */ | |
7367 | while (page == NULL && | |
7368 | radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) { | |
7369 | page = radix_tree_deref_slot(pagep); | |
7370 | if (unlikely(!page)) | |
7371 | break; | |
7372 | ||
7373 | if (radix_tree_exception(page)) { | |
809f9016 FM |
7374 | if (radix_tree_deref_retry(page)) { |
7375 | page = NULL; | |
fc4adbff | 7376 | continue; |
809f9016 | 7377 | } |
fc4adbff AG |
7378 | /* |
7379 | * Otherwise, shmem/tmpfs must be storing a swap entry | |
7380 | * here as an exceptional entry: so return it without | |
7381 | * attempting to raise page count. | |
7382 | */ | |
6fdef6d4 | 7383 | page = NULL; |
fc4adbff AG |
7384 | break; /* TODO: Is this relevant for this use case? */ |
7385 | } | |
7386 | ||
91405151 FM |
7387 | if (!page_cache_get_speculative(page)) { |
7388 | page = NULL; | |
fc4adbff | 7389 | continue; |
91405151 | 7390 | } |
fc4adbff AG |
7391 | |
7392 | /* | |
7393 | * Has the page moved? | |
7394 | * This is part of the lockless pagecache protocol. See | |
7395 | * include/linux/pagemap.h for details. | |
7396 | */ | |
7397 | if (unlikely(page != *pagep)) { | |
09cbfeaf | 7398 | put_page(page); |
fc4adbff AG |
7399 | page = NULL; |
7400 | } | |
7401 | } | |
7402 | ||
7403 | if (page) { | |
7404 | if (page->index <= end_idx) | |
7405 | found = true; | |
09cbfeaf | 7406 | put_page(page); |
fc4adbff AG |
7407 | } |
7408 | ||
7409 | rcu_read_unlock(); | |
7410 | return found; | |
7411 | } | |
7412 | ||
eb838e73 JB |
7413 | static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, |
7414 | struct extent_state **cached_state, int writing) | |
7415 | { | |
7416 | struct btrfs_ordered_extent *ordered; | |
7417 | int ret = 0; | |
7418 | ||
7419 | while (1) { | |
7420 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
ff13db41 | 7421 | cached_state); |
eb838e73 JB |
7422 | /* |
7423 | * We're concerned with the entire range that we're going to be | |
01327610 | 7424 | * doing DIO to, so we need to make sure there's no ordered |
eb838e73 JB |
7425 | * extents in this range. |
7426 | */ | |
a776c6fa | 7427 | ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, |
eb838e73 JB |
7428 | lockend - lockstart + 1); |
7429 | ||
7430 | /* | |
7431 | * We need to make sure there are no buffered pages in this | |
7432 | * range either, we could have raced between the invalidate in | |
7433 | * generic_file_direct_write and locking the extent. The | |
7434 | * invalidate needs to happen so that reads after a write do not | |
7435 | * get stale data. | |
7436 | */ | |
fc4adbff AG |
7437 | if (!ordered && |
7438 | (!writing || | |
7439 | !btrfs_page_exists_in_range(inode, lockstart, lockend))) | |
eb838e73 JB |
7440 | break; |
7441 | ||
7442 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
7443 | cached_state, GFP_NOFS); | |
7444 | ||
7445 | if (ordered) { | |
ade77029 FM |
7446 | /* |
7447 | * If we are doing a DIO read and the ordered extent we | |
7448 | * found is for a buffered write, we can not wait for it | |
7449 | * to complete and retry, because if we do so we can | |
7450 | * deadlock with concurrent buffered writes on page | |
7451 | * locks. This happens only if our DIO read covers more | |
7452 | * than one extent map, if at this point has already | |
7453 | * created an ordered extent for a previous extent map | |
7454 | * and locked its range in the inode's io tree, and a | |
7455 | * concurrent write against that previous extent map's | |
7456 | * range and this range started (we unlock the ranges | |
7457 | * in the io tree only when the bios complete and | |
7458 | * buffered writes always lock pages before attempting | |
7459 | * to lock range in the io tree). | |
7460 | */ | |
7461 | if (writing || | |
7462 | test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) | |
7463 | btrfs_start_ordered_extent(inode, ordered, 1); | |
7464 | else | |
7465 | ret = -ENOTBLK; | |
eb838e73 JB |
7466 | btrfs_put_ordered_extent(ordered); |
7467 | } else { | |
eb838e73 | 7468 | /* |
b850ae14 FM |
7469 | * We could trigger writeback for this range (and wait |
7470 | * for it to complete) and then invalidate the pages for | |
7471 | * this range (through invalidate_inode_pages2_range()), | |
7472 | * but that can lead us to a deadlock with a concurrent | |
7473 | * call to readpages() (a buffered read or a defrag call | |
7474 | * triggered a readahead) on a page lock due to an | |
7475 | * ordered dio extent we created before but did not have | |
7476 | * yet a corresponding bio submitted (whence it can not | |
7477 | * complete), which makes readpages() wait for that | |
7478 | * ordered extent to complete while holding a lock on | |
7479 | * that page. | |
eb838e73 | 7480 | */ |
b850ae14 | 7481 | ret = -ENOTBLK; |
eb838e73 JB |
7482 | } |
7483 | ||
ade77029 FM |
7484 | if (ret) |
7485 | break; | |
7486 | ||
eb838e73 JB |
7487 | cond_resched(); |
7488 | } | |
7489 | ||
7490 | return ret; | |
7491 | } | |
7492 | ||
6f9994db LB |
7493 | /* The callers of this must take lock_extent() */ |
7494 | static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, | |
7495 | u64 orig_start, u64 block_start, | |
7496 | u64 block_len, u64 orig_block_len, | |
7497 | u64 ram_bytes, int compress_type, | |
7498 | int type) | |
69ffb543 JB |
7499 | { |
7500 | struct extent_map_tree *em_tree; | |
7501 | struct extent_map *em; | |
7502 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7503 | int ret; | |
7504 | ||
6f9994db LB |
7505 | ASSERT(type == BTRFS_ORDERED_PREALLOC || |
7506 | type == BTRFS_ORDERED_COMPRESSED || | |
7507 | type == BTRFS_ORDERED_NOCOW || | |
1af4a0aa | 7508 | type == BTRFS_ORDERED_REGULAR); |
6f9994db | 7509 | |
69ffb543 JB |
7510 | em_tree = &BTRFS_I(inode)->extent_tree; |
7511 | em = alloc_extent_map(); | |
7512 | if (!em) | |
7513 | return ERR_PTR(-ENOMEM); | |
7514 | ||
7515 | em->start = start; | |
7516 | em->orig_start = orig_start; | |
7517 | em->len = len; | |
7518 | em->block_len = block_len; | |
7519 | em->block_start = block_start; | |
7520 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
b4939680 | 7521 | em->orig_block_len = orig_block_len; |
cc95bef6 | 7522 | em->ram_bytes = ram_bytes; |
70c8a91c | 7523 | em->generation = -1; |
69ffb543 | 7524 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
1af4a0aa | 7525 | if (type == BTRFS_ORDERED_PREALLOC) { |
b11e234d | 7526 | set_bit(EXTENT_FLAG_FILLING, &em->flags); |
1af4a0aa | 7527 | } else if (type == BTRFS_ORDERED_COMPRESSED) { |
6f9994db LB |
7528 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
7529 | em->compress_type = compress_type; | |
7530 | } | |
69ffb543 JB |
7531 | |
7532 | do { | |
dcdbc059 | 7533 | btrfs_drop_extent_cache(BTRFS_I(inode), em->start, |
69ffb543 JB |
7534 | em->start + em->len - 1, 0); |
7535 | write_lock(&em_tree->lock); | |
09a2a8f9 | 7536 | ret = add_extent_mapping(em_tree, em, 1); |
69ffb543 | 7537 | write_unlock(&em_tree->lock); |
6f9994db LB |
7538 | /* |
7539 | * The caller has taken lock_extent(), who could race with us | |
7540 | * to add em? | |
7541 | */ | |
69ffb543 JB |
7542 | } while (ret == -EEXIST); |
7543 | ||
7544 | if (ret) { | |
7545 | free_extent_map(em); | |
7546 | return ERR_PTR(ret); | |
7547 | } | |
7548 | ||
6f9994db | 7549 | /* em got 2 refs now, callers needs to do free_extent_map once. */ |
69ffb543 JB |
7550 | return em; |
7551 | } | |
7552 | ||
9c9464cc FM |
7553 | static void adjust_dio_outstanding_extents(struct inode *inode, |
7554 | struct btrfs_dio_data *dio_data, | |
7555 | const u64 len) | |
7556 | { | |
823bb20a | 7557 | unsigned num_extents = count_max_extents(len); |
9c9464cc | 7558 | |
9c9464cc FM |
7559 | /* |
7560 | * If we have an outstanding_extents count still set then we're | |
7561 | * within our reservation, otherwise we need to adjust our inode | |
7562 | * counter appropriately. | |
7563 | */ | |
c2931667 | 7564 | if (dio_data->outstanding_extents >= num_extents) { |
9c9464cc FM |
7565 | dio_data->outstanding_extents -= num_extents; |
7566 | } else { | |
c2931667 LB |
7567 | /* |
7568 | * If dio write length has been split due to no large enough | |
7569 | * contiguous space, we need to compensate our inode counter | |
7570 | * appropriately. | |
7571 | */ | |
7572 | u64 num_needed = num_extents - dio_data->outstanding_extents; | |
7573 | ||
9c9464cc | 7574 | spin_lock(&BTRFS_I(inode)->lock); |
c2931667 | 7575 | BTRFS_I(inode)->outstanding_extents += num_needed; |
9c9464cc FM |
7576 | spin_unlock(&BTRFS_I(inode)->lock); |
7577 | } | |
7578 | } | |
7579 | ||
4b46fce2 JB |
7580 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
7581 | struct buffer_head *bh_result, int create) | |
7582 | { | |
0b246afa | 7583 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
4b46fce2 | 7584 | struct extent_map *em; |
eb838e73 | 7585 | struct extent_state *cached_state = NULL; |
50745b0a | 7586 | struct btrfs_dio_data *dio_data = NULL; |
4b46fce2 | 7587 | u64 start = iblock << inode->i_blkbits; |
eb838e73 | 7588 | u64 lockstart, lockend; |
4b46fce2 | 7589 | u64 len = bh_result->b_size; |
eb838e73 | 7590 | int unlock_bits = EXTENT_LOCKED; |
0934856d | 7591 | int ret = 0; |
eb838e73 | 7592 | |
172a5049 | 7593 | if (create) |
3266789f | 7594 | unlock_bits |= EXTENT_DIRTY; |
172a5049 | 7595 | else |
0b246afa | 7596 | len = min_t(u64, len, fs_info->sectorsize); |
eb838e73 | 7597 | |
c329861d JB |
7598 | lockstart = start; |
7599 | lockend = start + len - 1; | |
7600 | ||
e1cbbfa5 JB |
7601 | if (current->journal_info) { |
7602 | /* | |
7603 | * Need to pull our outstanding extents and set journal_info to NULL so | |
01327610 | 7604 | * that anything that needs to check if there's a transaction doesn't get |
e1cbbfa5 JB |
7605 | * confused. |
7606 | */ | |
50745b0a | 7607 | dio_data = current->journal_info; |
e1cbbfa5 JB |
7608 | current->journal_info = NULL; |
7609 | } | |
7610 | ||
eb838e73 JB |
7611 | /* |
7612 | * If this errors out it's because we couldn't invalidate pagecache for | |
7613 | * this range and we need to fallback to buffered. | |
7614 | */ | |
9c9464cc FM |
7615 | if (lock_extent_direct(inode, lockstart, lockend, &cached_state, |
7616 | create)) { | |
7617 | ret = -ENOTBLK; | |
7618 | goto err; | |
7619 | } | |
eb838e73 | 7620 | |
fc4f21b1 | 7621 | em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); |
eb838e73 JB |
7622 | if (IS_ERR(em)) { |
7623 | ret = PTR_ERR(em); | |
7624 | goto unlock_err; | |
7625 | } | |
4b46fce2 JB |
7626 | |
7627 | /* | |
7628 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered | |
7629 | * io. INLINE is special, and we could probably kludge it in here, but | |
7630 | * it's still buffered so for safety lets just fall back to the generic | |
7631 | * buffered path. | |
7632 | * | |
7633 | * For COMPRESSED we _have_ to read the entire extent in so we can | |
7634 | * decompress it, so there will be buffering required no matter what we | |
7635 | * do, so go ahead and fallback to buffered. | |
7636 | * | |
01327610 | 7637 | * We return -ENOTBLK because that's what makes DIO go ahead and go back |
4b46fce2 JB |
7638 | * to buffered IO. Don't blame me, this is the price we pay for using |
7639 | * the generic code. | |
7640 | */ | |
7641 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || | |
7642 | em->block_start == EXTENT_MAP_INLINE) { | |
7643 | free_extent_map(em); | |
eb838e73 JB |
7644 | ret = -ENOTBLK; |
7645 | goto unlock_err; | |
4b46fce2 JB |
7646 | } |
7647 | ||
7648 | /* Just a good old fashioned hole, return */ | |
7649 | if (!create && (em->block_start == EXTENT_MAP_HOLE || | |
7650 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | |
7651 | free_extent_map(em); | |
eb838e73 | 7652 | goto unlock_err; |
4b46fce2 JB |
7653 | } |
7654 | ||
7655 | /* | |
7656 | * We don't allocate a new extent in the following cases | |
7657 | * | |
7658 | * 1) The inode is marked as NODATACOW. In this case we'll just use the | |
7659 | * existing extent. | |
7660 | * 2) The extent is marked as PREALLOC. We're good to go here and can | |
7661 | * just use the extent. | |
7662 | * | |
7663 | */ | |
46bfbb5c | 7664 | if (!create) { |
eb838e73 JB |
7665 | len = min(len, em->len - (start - em->start)); |
7666 | lockstart = start + len; | |
7667 | goto unlock; | |
46bfbb5c | 7668 | } |
4b46fce2 JB |
7669 | |
7670 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | |
7671 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | |
7672 | em->block_start != EXTENT_MAP_HOLE)) { | |
4b46fce2 | 7673 | int type; |
eb384b55 | 7674 | u64 block_start, orig_start, orig_block_len, ram_bytes; |
4b46fce2 JB |
7675 | |
7676 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
7677 | type = BTRFS_ORDERED_PREALLOC; | |
7678 | else | |
7679 | type = BTRFS_ORDERED_NOCOW; | |
46bfbb5c | 7680 | len = min(len, em->len - (start - em->start)); |
4b46fce2 | 7681 | block_start = em->block_start + (start - em->start); |
46bfbb5c | 7682 | |
00361589 | 7683 | if (can_nocow_extent(inode, start, &len, &orig_start, |
f78c436c | 7684 | &orig_block_len, &ram_bytes) == 1 && |
0b246afa | 7685 | btrfs_inc_nocow_writers(fs_info, block_start)) { |
5f9a8a51 | 7686 | struct extent_map *em2; |
0b901916 | 7687 | |
5f9a8a51 FM |
7688 | em2 = btrfs_create_dio_extent(inode, start, len, |
7689 | orig_start, block_start, | |
7690 | len, orig_block_len, | |
7691 | ram_bytes, type); | |
0b246afa | 7692 | btrfs_dec_nocow_writers(fs_info, block_start); |
69ffb543 JB |
7693 | if (type == BTRFS_ORDERED_PREALLOC) { |
7694 | free_extent_map(em); | |
5f9a8a51 | 7695 | em = em2; |
69ffb543 | 7696 | } |
5f9a8a51 FM |
7697 | if (em2 && IS_ERR(em2)) { |
7698 | ret = PTR_ERR(em2); | |
eb838e73 | 7699 | goto unlock_err; |
46bfbb5c | 7700 | } |
18513091 WX |
7701 | /* |
7702 | * For inode marked NODATACOW or extent marked PREALLOC, | |
7703 | * use the existing or preallocated extent, so does not | |
7704 | * need to adjust btrfs_space_info's bytes_may_use. | |
7705 | */ | |
7706 | btrfs_free_reserved_data_space_noquota(inode, | |
7707 | start, len); | |
46bfbb5c | 7708 | goto unlock; |
4b46fce2 | 7709 | } |
4b46fce2 | 7710 | } |
00361589 | 7711 | |
46bfbb5c CM |
7712 | /* |
7713 | * this will cow the extent, reset the len in case we changed | |
7714 | * it above | |
7715 | */ | |
7716 | len = bh_result->b_size; | |
70c8a91c JB |
7717 | free_extent_map(em); |
7718 | em = btrfs_new_extent_direct(inode, start, len); | |
eb838e73 JB |
7719 | if (IS_ERR(em)) { |
7720 | ret = PTR_ERR(em); | |
7721 | goto unlock_err; | |
7722 | } | |
46bfbb5c CM |
7723 | len = min(len, em->len - (start - em->start)); |
7724 | unlock: | |
4b46fce2 JB |
7725 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> |
7726 | inode->i_blkbits; | |
46bfbb5c | 7727 | bh_result->b_size = len; |
4b46fce2 JB |
7728 | bh_result->b_bdev = em->bdev; |
7729 | set_buffer_mapped(bh_result); | |
c3473e83 JB |
7730 | if (create) { |
7731 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
7732 | set_buffer_new(bh_result); | |
7733 | ||
7734 | /* | |
7735 | * Need to update the i_size under the extent lock so buffered | |
7736 | * readers will get the updated i_size when we unlock. | |
7737 | */ | |
4aaedfb0 | 7738 | if (!dio_data->overwrite && start + len > i_size_read(inode)) |
c3473e83 | 7739 | i_size_write(inode, start + len); |
0934856d | 7740 | |
9c9464cc | 7741 | adjust_dio_outstanding_extents(inode, dio_data, len); |
50745b0a | 7742 | WARN_ON(dio_data->reserve < len); |
7743 | dio_data->reserve -= len; | |
f28a4928 | 7744 | dio_data->unsubmitted_oe_range_end = start + len; |
50745b0a | 7745 | current->journal_info = dio_data; |
c3473e83 | 7746 | } |
4b46fce2 | 7747 | |
eb838e73 JB |
7748 | /* |
7749 | * In the case of write we need to clear and unlock the entire range, | |
7750 | * in the case of read we need to unlock only the end area that we | |
7751 | * aren't using if there is any left over space. | |
7752 | */ | |
24c03fa5 | 7753 | if (lockstart < lockend) { |
0934856d MX |
7754 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, |
7755 | lockend, unlock_bits, 1, 0, | |
7756 | &cached_state, GFP_NOFS); | |
24c03fa5 | 7757 | } else { |
eb838e73 | 7758 | free_extent_state(cached_state); |
24c03fa5 | 7759 | } |
eb838e73 | 7760 | |
4b46fce2 JB |
7761 | free_extent_map(em); |
7762 | ||
7763 | return 0; | |
eb838e73 JB |
7764 | |
7765 | unlock_err: | |
eb838e73 JB |
7766 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
7767 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | |
9c9464cc | 7768 | err: |
50745b0a | 7769 | if (dio_data) |
7770 | current->journal_info = dio_data; | |
9c9464cc FM |
7771 | /* |
7772 | * Compensate the delalloc release we do in btrfs_direct_IO() when we | |
7773 | * write less data then expected, so that we don't underflow our inode's | |
7774 | * outstanding extents counter. | |
7775 | */ | |
7776 | if (create && dio_data) | |
7777 | adjust_dio_outstanding_extents(inode, dio_data, len); | |
7778 | ||
eb838e73 | 7779 | return ret; |
4b46fce2 JB |
7780 | } |
7781 | ||
8b110e39 | 7782 | static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, |
81a75f67 | 7783 | int mirror_num) |
8b110e39 | 7784 | { |
2ff7e61e | 7785 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
8b110e39 MX |
7786 | int ret; |
7787 | ||
37226b21 | 7788 | BUG_ON(bio_op(bio) == REQ_OP_WRITE); |
8b110e39 MX |
7789 | |
7790 | bio_get(bio); | |
7791 | ||
2ff7e61e | 7792 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR); |
8b110e39 MX |
7793 | if (ret) |
7794 | goto err; | |
7795 | ||
2ff7e61e | 7796 | ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); |
8b110e39 MX |
7797 | err: |
7798 | bio_put(bio); | |
7799 | return ret; | |
7800 | } | |
7801 | ||
7802 | static int btrfs_check_dio_repairable(struct inode *inode, | |
7803 | struct bio *failed_bio, | |
7804 | struct io_failure_record *failrec, | |
7805 | int failed_mirror) | |
7806 | { | |
ab8d0fc4 | 7807 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
8b110e39 MX |
7808 | int num_copies; |
7809 | ||
ab8d0fc4 | 7810 | num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); |
8b110e39 MX |
7811 | if (num_copies == 1) { |
7812 | /* | |
7813 | * we only have a single copy of the data, so don't bother with | |
7814 | * all the retry and error correction code that follows. no | |
7815 | * matter what the error is, it is very likely to persist. | |
7816 | */ | |
ab8d0fc4 JM |
7817 | btrfs_debug(fs_info, |
7818 | "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", | |
7819 | num_copies, failrec->this_mirror, failed_mirror); | |
8b110e39 MX |
7820 | return 0; |
7821 | } | |
7822 | ||
7823 | failrec->failed_mirror = failed_mirror; | |
7824 | failrec->this_mirror++; | |
7825 | if (failrec->this_mirror == failed_mirror) | |
7826 | failrec->this_mirror++; | |
7827 | ||
7828 | if (failrec->this_mirror > num_copies) { | |
ab8d0fc4 JM |
7829 | btrfs_debug(fs_info, |
7830 | "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", | |
7831 | num_copies, failrec->this_mirror, failed_mirror); | |
8b110e39 MX |
7832 | return 0; |
7833 | } | |
7834 | ||
7835 | return 1; | |
7836 | } | |
7837 | ||
7838 | static int dio_read_error(struct inode *inode, struct bio *failed_bio, | |
2dabb324 CR |
7839 | struct page *page, unsigned int pgoff, |
7840 | u64 start, u64 end, int failed_mirror, | |
7841 | bio_end_io_t *repair_endio, void *repair_arg) | |
8b110e39 MX |
7842 | { |
7843 | struct io_failure_record *failrec; | |
7844 | struct bio *bio; | |
7845 | int isector; | |
70fd7614 | 7846 | int read_mode = 0; |
8b110e39 MX |
7847 | int ret; |
7848 | ||
37226b21 | 7849 | BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
8b110e39 MX |
7850 | |
7851 | ret = btrfs_get_io_failure_record(inode, start, end, &failrec); | |
7852 | if (ret) | |
7853 | return ret; | |
7854 | ||
7855 | ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, | |
7856 | failed_mirror); | |
7857 | if (!ret) { | |
4ac1f4ac | 7858 | free_io_failure(BTRFS_I(inode), failrec); |
8b110e39 MX |
7859 | return -EIO; |
7860 | } | |
7861 | ||
2dabb324 CR |
7862 | if ((failed_bio->bi_vcnt > 1) |
7863 | || (failed_bio->bi_io_vec->bv_len | |
da17066c | 7864 | > btrfs_inode_sectorsize(inode))) |
70fd7614 | 7865 | read_mode |= REQ_FAILFAST_DEV; |
8b110e39 MX |
7866 | |
7867 | isector = start - btrfs_io_bio(failed_bio)->logical; | |
7868 | isector >>= inode->i_sb->s_blocksize_bits; | |
7869 | bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, | |
2dabb324 | 7870 | pgoff, isector, repair_endio, repair_arg); |
8b110e39 | 7871 | if (!bio) { |
4ac1f4ac | 7872 | free_io_failure(BTRFS_I(inode), failrec); |
8b110e39 MX |
7873 | return -EIO; |
7874 | } | |
37226b21 | 7875 | bio_set_op_attrs(bio, REQ_OP_READ, read_mode); |
8b110e39 MX |
7876 | |
7877 | btrfs_debug(BTRFS_I(inode)->root->fs_info, | |
7878 | "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", | |
7879 | read_mode, failrec->this_mirror, failrec->in_validation); | |
7880 | ||
81a75f67 | 7881 | ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); |
8b110e39 | 7882 | if (ret) { |
4ac1f4ac | 7883 | free_io_failure(BTRFS_I(inode), failrec); |
8b110e39 MX |
7884 | bio_put(bio); |
7885 | } | |
7886 | ||
7887 | return ret; | |
7888 | } | |
7889 | ||
7890 | struct btrfs_retry_complete { | |
7891 | struct completion done; | |
7892 | struct inode *inode; | |
7893 | u64 start; | |
7894 | int uptodate; | |
7895 | }; | |
7896 | ||
4246a0b6 | 7897 | static void btrfs_retry_endio_nocsum(struct bio *bio) |
8b110e39 MX |
7898 | { |
7899 | struct btrfs_retry_complete *done = bio->bi_private; | |
2dabb324 | 7900 | struct inode *inode; |
8b110e39 MX |
7901 | struct bio_vec *bvec; |
7902 | int i; | |
7903 | ||
4246a0b6 | 7904 | if (bio->bi_error) |
8b110e39 MX |
7905 | goto end; |
7906 | ||
2dabb324 CR |
7907 | ASSERT(bio->bi_vcnt == 1); |
7908 | inode = bio->bi_io_vec->bv_page->mapping->host; | |
da17066c | 7909 | ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); |
2dabb324 | 7910 | |
8b110e39 MX |
7911 | done->uptodate = 1; |
7912 | bio_for_each_segment_all(bvec, bio, i) | |
b30cb441 | 7913 | clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); |
8b110e39 MX |
7914 | end: |
7915 | complete(&done->done); | |
7916 | bio_put(bio); | |
7917 | } | |
7918 | ||
7919 | static int __btrfs_correct_data_nocsum(struct inode *inode, | |
7920 | struct btrfs_io_bio *io_bio) | |
4b46fce2 | 7921 | { |
2dabb324 | 7922 | struct btrfs_fs_info *fs_info; |
2c30c71b | 7923 | struct bio_vec *bvec; |
8b110e39 | 7924 | struct btrfs_retry_complete done; |
4b46fce2 | 7925 | u64 start; |
2dabb324 CR |
7926 | unsigned int pgoff; |
7927 | u32 sectorsize; | |
7928 | int nr_sectors; | |
2c30c71b | 7929 | int i; |
c1dc0896 | 7930 | int ret; |
4b46fce2 | 7931 | |
2dabb324 | 7932 | fs_info = BTRFS_I(inode)->root->fs_info; |
da17066c | 7933 | sectorsize = fs_info->sectorsize; |
2dabb324 | 7934 | |
8b110e39 MX |
7935 | start = io_bio->logical; |
7936 | done.inode = inode; | |
7937 | ||
7938 | bio_for_each_segment_all(bvec, &io_bio->bio, i) { | |
2dabb324 CR |
7939 | nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len); |
7940 | pgoff = bvec->bv_offset; | |
7941 | ||
7942 | next_block_or_try_again: | |
8b110e39 MX |
7943 | done.uptodate = 0; |
7944 | done.start = start; | |
7945 | init_completion(&done.done); | |
7946 | ||
2dabb324 CR |
7947 | ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, |
7948 | pgoff, start, start + sectorsize - 1, | |
7949 | io_bio->mirror_num, | |
7950 | btrfs_retry_endio_nocsum, &done); | |
8b110e39 MX |
7951 | if (ret) |
7952 | return ret; | |
7953 | ||
7954 | wait_for_completion(&done.done); | |
7955 | ||
7956 | if (!done.uptodate) { | |
7957 | /* We might have another mirror, so try again */ | |
2dabb324 | 7958 | goto next_block_or_try_again; |
8b110e39 MX |
7959 | } |
7960 | ||
2dabb324 CR |
7961 | start += sectorsize; |
7962 | ||
7963 | if (nr_sectors--) { | |
7964 | pgoff += sectorsize; | |
7965 | goto next_block_or_try_again; | |
7966 | } | |
8b110e39 MX |
7967 | } |
7968 | ||
7969 | return 0; | |
7970 | } | |
7971 | ||
4246a0b6 | 7972 | static void btrfs_retry_endio(struct bio *bio) |
8b110e39 MX |
7973 | { |
7974 | struct btrfs_retry_complete *done = bio->bi_private; | |
7975 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | |
2dabb324 | 7976 | struct inode *inode; |
8b110e39 | 7977 | struct bio_vec *bvec; |
2dabb324 | 7978 | u64 start; |
8b110e39 MX |
7979 | int uptodate; |
7980 | int ret; | |
7981 | int i; | |
7982 | ||
4246a0b6 | 7983 | if (bio->bi_error) |
8b110e39 MX |
7984 | goto end; |
7985 | ||
7986 | uptodate = 1; | |
2dabb324 CR |
7987 | |
7988 | start = done->start; | |
7989 | ||
7990 | ASSERT(bio->bi_vcnt == 1); | |
7991 | inode = bio->bi_io_vec->bv_page->mapping->host; | |
da17066c | 7992 | ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); |
2dabb324 | 7993 | |
8b110e39 MX |
7994 | bio_for_each_segment_all(bvec, bio, i) { |
7995 | ret = __readpage_endio_check(done->inode, io_bio, i, | |
2dabb324 CR |
7996 | bvec->bv_page, bvec->bv_offset, |
7997 | done->start, bvec->bv_len); | |
8b110e39 | 7998 | if (!ret) |
b30cb441 | 7999 | clean_io_failure(BTRFS_I(done->inode), done->start, |
2dabb324 | 8000 | bvec->bv_page, bvec->bv_offset); |
8b110e39 MX |
8001 | else |
8002 | uptodate = 0; | |
8003 | } | |
8004 | ||
8005 | done->uptodate = uptodate; | |
8006 | end: | |
8007 | complete(&done->done); | |
8008 | bio_put(bio); | |
8009 | } | |
8010 | ||
8011 | static int __btrfs_subio_endio_read(struct inode *inode, | |
8012 | struct btrfs_io_bio *io_bio, int err) | |
8013 | { | |
2dabb324 | 8014 | struct btrfs_fs_info *fs_info; |
8b110e39 MX |
8015 | struct bio_vec *bvec; |
8016 | struct btrfs_retry_complete done; | |
8017 | u64 start; | |
8018 | u64 offset = 0; | |
2dabb324 CR |
8019 | u32 sectorsize; |
8020 | int nr_sectors; | |
8021 | unsigned int pgoff; | |
8022 | int csum_pos; | |
8b110e39 MX |
8023 | int i; |
8024 | int ret; | |
dc380aea | 8025 | |
2dabb324 | 8026 | fs_info = BTRFS_I(inode)->root->fs_info; |
da17066c | 8027 | sectorsize = fs_info->sectorsize; |
2dabb324 | 8028 | |
8b110e39 | 8029 | err = 0; |
c1dc0896 | 8030 | start = io_bio->logical; |
8b110e39 MX |
8031 | done.inode = inode; |
8032 | ||
c1dc0896 | 8033 | bio_for_each_segment_all(bvec, &io_bio->bio, i) { |
2dabb324 CR |
8034 | nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len); |
8035 | ||
8036 | pgoff = bvec->bv_offset; | |
8037 | next_block: | |
8038 | csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset); | |
8039 | ret = __readpage_endio_check(inode, io_bio, csum_pos, | |
8040 | bvec->bv_page, pgoff, start, | |
8041 | sectorsize); | |
8b110e39 MX |
8042 | if (likely(!ret)) |
8043 | goto next; | |
8044 | try_again: | |
8045 | done.uptodate = 0; | |
8046 | done.start = start; | |
8047 | init_completion(&done.done); | |
8048 | ||
2dabb324 CR |
8049 | ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, |
8050 | pgoff, start, start + sectorsize - 1, | |
8051 | io_bio->mirror_num, | |
8052 | btrfs_retry_endio, &done); | |
8b110e39 MX |
8053 | if (ret) { |
8054 | err = ret; | |
8055 | goto next; | |
8056 | } | |
8057 | ||
8058 | wait_for_completion(&done.done); | |
8059 | ||
8060 | if (!done.uptodate) { | |
8061 | /* We might have another mirror, so try again */ | |
8062 | goto try_again; | |
8063 | } | |
8064 | next: | |
2dabb324 CR |
8065 | offset += sectorsize; |
8066 | start += sectorsize; | |
8067 | ||
8068 | ASSERT(nr_sectors); | |
8069 | ||
8070 | if (--nr_sectors) { | |
8071 | pgoff += sectorsize; | |
8072 | goto next_block; | |
8073 | } | |
2c30c71b | 8074 | } |
c1dc0896 MX |
8075 | |
8076 | return err; | |
8077 | } | |
8078 | ||
8b110e39 MX |
8079 | static int btrfs_subio_endio_read(struct inode *inode, |
8080 | struct btrfs_io_bio *io_bio, int err) | |
8081 | { | |
8082 | bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
8083 | ||
8084 | if (skip_csum) { | |
8085 | if (unlikely(err)) | |
8086 | return __btrfs_correct_data_nocsum(inode, io_bio); | |
8087 | else | |
8088 | return 0; | |
8089 | } else { | |
8090 | return __btrfs_subio_endio_read(inode, io_bio, err); | |
8091 | } | |
8092 | } | |
8093 | ||
4246a0b6 | 8094 | static void btrfs_endio_direct_read(struct bio *bio) |
c1dc0896 MX |
8095 | { |
8096 | struct btrfs_dio_private *dip = bio->bi_private; | |
8097 | struct inode *inode = dip->inode; | |
8098 | struct bio *dio_bio; | |
8099 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | |
4246a0b6 | 8100 | int err = bio->bi_error; |
c1dc0896 | 8101 | |
8b110e39 MX |
8102 | if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) |
8103 | err = btrfs_subio_endio_read(inode, io_bio, err); | |
c1dc0896 | 8104 | |
4b46fce2 | 8105 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, |
d0082371 | 8106 | dip->logical_offset + dip->bytes - 1); |
9be3395b | 8107 | dio_bio = dip->dio_bio; |
4b46fce2 | 8108 | |
4b46fce2 | 8109 | kfree(dip); |
c0da7aa1 | 8110 | |
1636d1d7 | 8111 | dio_bio->bi_error = bio->bi_error; |
4246a0b6 | 8112 | dio_end_io(dio_bio, bio->bi_error); |
23ea8e5a MX |
8113 | |
8114 | if (io_bio->end_io) | |
8115 | io_bio->end_io(io_bio, err); | |
9be3395b | 8116 | bio_put(bio); |
4b46fce2 JB |
8117 | } |
8118 | ||
14543774 FM |
8119 | static void btrfs_endio_direct_write_update_ordered(struct inode *inode, |
8120 | const u64 offset, | |
8121 | const u64 bytes, | |
8122 | const int uptodate) | |
4b46fce2 | 8123 | { |
0b246afa | 8124 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
4b46fce2 | 8125 | struct btrfs_ordered_extent *ordered = NULL; |
14543774 FM |
8126 | u64 ordered_offset = offset; |
8127 | u64 ordered_bytes = bytes; | |
4b46fce2 JB |
8128 | int ret; |
8129 | ||
163cf09c CM |
8130 | again: |
8131 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | |
8132 | &ordered_offset, | |
4246a0b6 | 8133 | ordered_bytes, |
14543774 | 8134 | uptodate); |
4b46fce2 | 8135 | if (!ret) |
163cf09c | 8136 | goto out_test; |
4b46fce2 | 8137 | |
9e0af237 LB |
8138 | btrfs_init_work(&ordered->work, btrfs_endio_write_helper, |
8139 | finish_ordered_fn, NULL, NULL); | |
0b246afa | 8140 | btrfs_queue_work(fs_info->endio_write_workers, &ordered->work); |
163cf09c CM |
8141 | out_test: |
8142 | /* | |
8143 | * our bio might span multiple ordered extents. If we haven't | |
8144 | * completed the accounting for the whole dio, go back and try again | |
8145 | */ | |
14543774 FM |
8146 | if (ordered_offset < offset + bytes) { |
8147 | ordered_bytes = offset + bytes - ordered_offset; | |
5fd02043 | 8148 | ordered = NULL; |
163cf09c CM |
8149 | goto again; |
8150 | } | |
14543774 FM |
8151 | } |
8152 | ||
8153 | static void btrfs_endio_direct_write(struct bio *bio) | |
8154 | { | |
8155 | struct btrfs_dio_private *dip = bio->bi_private; | |
8156 | struct bio *dio_bio = dip->dio_bio; | |
8157 | ||
8158 | btrfs_endio_direct_write_update_ordered(dip->inode, | |
8159 | dip->logical_offset, | |
8160 | dip->bytes, | |
8161 | !bio->bi_error); | |
4b46fce2 | 8162 | |
4b46fce2 | 8163 | kfree(dip); |
c0da7aa1 | 8164 | |
1636d1d7 | 8165 | dio_bio->bi_error = bio->bi_error; |
4246a0b6 | 8166 | dio_end_io(dio_bio, bio->bi_error); |
9be3395b | 8167 | bio_put(bio); |
4b46fce2 JB |
8168 | } |
8169 | ||
81a75f67 | 8170 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, |
eaf25d93 CM |
8171 | struct bio *bio, int mirror_num, |
8172 | unsigned long bio_flags, u64 offset) | |
8173 | { | |
8174 | int ret; | |
2ff7e61e | 8175 | ret = btrfs_csum_one_bio(inode, bio, offset, 1); |
79787eaa | 8176 | BUG_ON(ret); /* -ENOMEM */ |
eaf25d93 CM |
8177 | return 0; |
8178 | } | |
8179 | ||
4246a0b6 | 8180 | static void btrfs_end_dio_bio(struct bio *bio) |
e65e1535 MX |
8181 | { |
8182 | struct btrfs_dio_private *dip = bio->bi_private; | |
4246a0b6 | 8183 | int err = bio->bi_error; |
e65e1535 | 8184 | |
8b110e39 MX |
8185 | if (err) |
8186 | btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, | |
6296b960 | 8187 | "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d", |
f85b7379 DS |
8188 | btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), |
8189 | bio->bi_opf, | |
8b110e39 MX |
8190 | (unsigned long long)bio->bi_iter.bi_sector, |
8191 | bio->bi_iter.bi_size, err); | |
8192 | ||
8193 | if (dip->subio_endio) | |
8194 | err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); | |
c1dc0896 MX |
8195 | |
8196 | if (err) { | |
e65e1535 MX |
8197 | dip->errors = 1; |
8198 | ||
8199 | /* | |
8200 | * before atomic variable goto zero, we must make sure | |
8201 | * dip->errors is perceived to be set. | |
8202 | */ | |
4e857c58 | 8203 | smp_mb__before_atomic(); |
e65e1535 MX |
8204 | } |
8205 | ||
8206 | /* if there are more bios still pending for this dio, just exit */ | |
8207 | if (!atomic_dec_and_test(&dip->pending_bios)) | |
8208 | goto out; | |
8209 | ||
9be3395b | 8210 | if (dip->errors) { |
e65e1535 | 8211 | bio_io_error(dip->orig_bio); |
9be3395b | 8212 | } else { |
4246a0b6 CH |
8213 | dip->dio_bio->bi_error = 0; |
8214 | bio_endio(dip->orig_bio); | |
e65e1535 MX |
8215 | } |
8216 | out: | |
8217 | bio_put(bio); | |
8218 | } | |
8219 | ||
8220 | static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |
8221 | u64 first_sector, gfp_t gfp_flags) | |
8222 | { | |
da2f0f74 | 8223 | struct bio *bio; |
22365979 | 8224 | bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags); |
da2f0f74 CM |
8225 | if (bio) |
8226 | bio_associate_current(bio); | |
8227 | return bio; | |
e65e1535 MX |
8228 | } |
8229 | ||
2ff7e61e | 8230 | static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode, |
c1dc0896 MX |
8231 | struct btrfs_dio_private *dip, |
8232 | struct bio *bio, | |
8233 | u64 file_offset) | |
8234 | { | |
8235 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | |
8236 | struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); | |
8237 | int ret; | |
8238 | ||
8239 | /* | |
8240 | * We load all the csum data we need when we submit | |
8241 | * the first bio to reduce the csum tree search and | |
8242 | * contention. | |
8243 | */ | |
8244 | if (dip->logical_offset == file_offset) { | |
2ff7e61e | 8245 | ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio, |
c1dc0896 MX |
8246 | file_offset); |
8247 | if (ret) | |
8248 | return ret; | |
8249 | } | |
8250 | ||
8251 | if (bio == dip->orig_bio) | |
8252 | return 0; | |
8253 | ||
8254 | file_offset -= dip->logical_offset; | |
8255 | file_offset >>= inode->i_sb->s_blocksize_bits; | |
8256 | io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset); | |
8257 | ||
8258 | return 0; | |
8259 | } | |
8260 | ||
e65e1535 | 8261 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, |
81a75f67 | 8262 | u64 file_offset, int skip_sum, |
c329861d | 8263 | int async_submit) |
e65e1535 | 8264 | { |
0b246afa | 8265 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
facc8a22 | 8266 | struct btrfs_dio_private *dip = bio->bi_private; |
37226b21 | 8267 | bool write = bio_op(bio) == REQ_OP_WRITE; |
e65e1535 MX |
8268 | int ret; |
8269 | ||
b812ce28 JB |
8270 | if (async_submit) |
8271 | async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); | |
8272 | ||
e65e1535 | 8273 | bio_get(bio); |
5fd02043 JB |
8274 | |
8275 | if (!write) { | |
0b246afa | 8276 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
5fd02043 JB |
8277 | if (ret) |
8278 | goto err; | |
8279 | } | |
e65e1535 | 8280 | |
1ae39938 JB |
8281 | if (skip_sum) |
8282 | goto map; | |
8283 | ||
8284 | if (write && async_submit) { | |
0b246afa JM |
8285 | ret = btrfs_wq_submit_bio(fs_info, inode, bio, 0, 0, |
8286 | file_offset, | |
8287 | __btrfs_submit_bio_start_direct_io, | |
8288 | __btrfs_submit_bio_done); | |
e65e1535 | 8289 | goto err; |
1ae39938 JB |
8290 | } else if (write) { |
8291 | /* | |
8292 | * If we aren't doing async submit, calculate the csum of the | |
8293 | * bio now. | |
8294 | */ | |
2ff7e61e | 8295 | ret = btrfs_csum_one_bio(inode, bio, file_offset, 1); |
1ae39938 JB |
8296 | if (ret) |
8297 | goto err; | |
23ea8e5a | 8298 | } else { |
2ff7e61e | 8299 | ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio, |
c1dc0896 | 8300 | file_offset); |
c2db1073 TI |
8301 | if (ret) |
8302 | goto err; | |
8303 | } | |
1ae39938 | 8304 | map: |
2ff7e61e | 8305 | ret = btrfs_map_bio(fs_info, bio, 0, async_submit); |
e65e1535 MX |
8306 | err: |
8307 | bio_put(bio); | |
8308 | return ret; | |
8309 | } | |
8310 | ||
81a75f67 | 8311 | static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, |
e65e1535 MX |
8312 | int skip_sum) |
8313 | { | |
8314 | struct inode *inode = dip->inode; | |
0b246afa | 8315 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e65e1535 | 8316 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e65e1535 MX |
8317 | struct bio *bio; |
8318 | struct bio *orig_bio = dip->orig_bio; | |
6a2de22f | 8319 | struct bio_vec *bvec; |
4f024f37 | 8320 | u64 start_sector = orig_bio->bi_iter.bi_sector; |
e65e1535 MX |
8321 | u64 file_offset = dip->logical_offset; |
8322 | u64 submit_len = 0; | |
8323 | u64 map_length; | |
0b246afa | 8324 | u32 blocksize = fs_info->sectorsize; |
1ae39938 | 8325 | int async_submit = 0; |
5f4dc8fc CR |
8326 | int nr_sectors; |
8327 | int ret; | |
6a2de22f | 8328 | int i, j; |
e65e1535 | 8329 | |
4f024f37 | 8330 | map_length = orig_bio->bi_iter.bi_size; |
0b246afa JM |
8331 | ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9, |
8332 | &map_length, NULL, 0); | |
7a5c3c9b | 8333 | if (ret) |
e65e1535 | 8334 | return -EIO; |
facc8a22 | 8335 | |
4f024f37 | 8336 | if (map_length >= orig_bio->bi_iter.bi_size) { |
02f57c7a | 8337 | bio = orig_bio; |
c1dc0896 | 8338 | dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED; |
02f57c7a JB |
8339 | goto submit; |
8340 | } | |
8341 | ||
53b381b3 | 8342 | /* async crcs make it difficult to collect full stripe writes. */ |
ffe2d203 | 8343 | if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) |
53b381b3 DW |
8344 | async_submit = 0; |
8345 | else | |
8346 | async_submit = 1; | |
8347 | ||
02f57c7a JB |
8348 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); |
8349 | if (!bio) | |
8350 | return -ENOMEM; | |
7a5c3c9b | 8351 | |
ef295ecf | 8352 | bio->bi_opf = orig_bio->bi_opf; |
02f57c7a JB |
8353 | bio->bi_private = dip; |
8354 | bio->bi_end_io = btrfs_end_dio_bio; | |
c1dc0896 | 8355 | btrfs_io_bio(bio)->logical = file_offset; |
02f57c7a JB |
8356 | atomic_inc(&dip->pending_bios); |
8357 | ||
6a2de22f | 8358 | bio_for_each_segment_all(bvec, orig_bio, j) { |
0b246afa | 8359 | nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len); |
5f4dc8fc CR |
8360 | i = 0; |
8361 | next_block: | |
8362 | if (unlikely(map_length < submit_len + blocksize || | |
8363 | bio_add_page(bio, bvec->bv_page, blocksize, | |
8364 | bvec->bv_offset + (i * blocksize)) < blocksize)) { | |
e65e1535 MX |
8365 | /* |
8366 | * inc the count before we submit the bio so | |
8367 | * we know the end IO handler won't happen before | |
8368 | * we inc the count. Otherwise, the dip might get freed | |
8369 | * before we're done setting it up | |
8370 | */ | |
8371 | atomic_inc(&dip->pending_bios); | |
81a75f67 | 8372 | ret = __btrfs_submit_dio_bio(bio, inode, |
e65e1535 | 8373 | file_offset, skip_sum, |
c329861d | 8374 | async_submit); |
e65e1535 MX |
8375 | if (ret) { |
8376 | bio_put(bio); | |
8377 | atomic_dec(&dip->pending_bios); | |
8378 | goto out_err; | |
8379 | } | |
8380 | ||
e65e1535 MX |
8381 | start_sector += submit_len >> 9; |
8382 | file_offset += submit_len; | |
8383 | ||
8384 | submit_len = 0; | |
e65e1535 MX |
8385 | |
8386 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, | |
8387 | start_sector, GFP_NOFS); | |
8388 | if (!bio) | |
8389 | goto out_err; | |
ef295ecf | 8390 | bio->bi_opf = orig_bio->bi_opf; |
e65e1535 MX |
8391 | bio->bi_private = dip; |
8392 | bio->bi_end_io = btrfs_end_dio_bio; | |
c1dc0896 | 8393 | btrfs_io_bio(bio)->logical = file_offset; |
e65e1535 | 8394 | |
4f024f37 | 8395 | map_length = orig_bio->bi_iter.bi_size; |
0b246afa | 8396 | ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), |
3ec706c8 | 8397 | start_sector << 9, |
e65e1535 MX |
8398 | &map_length, NULL, 0); |
8399 | if (ret) { | |
8400 | bio_put(bio); | |
8401 | goto out_err; | |
8402 | } | |
5f4dc8fc CR |
8403 | |
8404 | goto next_block; | |
e65e1535 | 8405 | } else { |
5f4dc8fc CR |
8406 | submit_len += blocksize; |
8407 | if (--nr_sectors) { | |
8408 | i++; | |
8409 | goto next_block; | |
8410 | } | |
e65e1535 MX |
8411 | } |
8412 | } | |
8413 | ||
02f57c7a | 8414 | submit: |
81a75f67 | 8415 | ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, |
c329861d | 8416 | async_submit); |
e65e1535 MX |
8417 | if (!ret) |
8418 | return 0; | |
8419 | ||
8420 | bio_put(bio); | |
8421 | out_err: | |
8422 | dip->errors = 1; | |
8423 | /* | |
8424 | * before atomic variable goto zero, we must | |
8425 | * make sure dip->errors is perceived to be set. | |
8426 | */ | |
4e857c58 | 8427 | smp_mb__before_atomic(); |
e65e1535 MX |
8428 | if (atomic_dec_and_test(&dip->pending_bios)) |
8429 | bio_io_error(dip->orig_bio); | |
8430 | ||
8431 | /* bio_end_io() will handle error, so we needn't return it */ | |
8432 | return 0; | |
8433 | } | |
8434 | ||
8a4c1e42 MC |
8435 | static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, |
8436 | loff_t file_offset) | |
4b46fce2 | 8437 | { |
61de718f FM |
8438 | struct btrfs_dio_private *dip = NULL; |
8439 | struct bio *io_bio = NULL; | |
23ea8e5a | 8440 | struct btrfs_io_bio *btrfs_bio; |
4b46fce2 | 8441 | int skip_sum; |
8a4c1e42 | 8442 | bool write = (bio_op(dio_bio) == REQ_OP_WRITE); |
4b46fce2 JB |
8443 | int ret = 0; |
8444 | ||
8445 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
8446 | ||
9be3395b | 8447 | io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); |
9be3395b CM |
8448 | if (!io_bio) { |
8449 | ret = -ENOMEM; | |
8450 | goto free_ordered; | |
8451 | } | |
8452 | ||
c1dc0896 | 8453 | dip = kzalloc(sizeof(*dip), GFP_NOFS); |
4b46fce2 JB |
8454 | if (!dip) { |
8455 | ret = -ENOMEM; | |
61de718f | 8456 | goto free_ordered; |
4b46fce2 | 8457 | } |
4b46fce2 | 8458 | |
9be3395b | 8459 | dip->private = dio_bio->bi_private; |
4b46fce2 JB |
8460 | dip->inode = inode; |
8461 | dip->logical_offset = file_offset; | |
4f024f37 KO |
8462 | dip->bytes = dio_bio->bi_iter.bi_size; |
8463 | dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; | |
9be3395b | 8464 | io_bio->bi_private = dip; |
9be3395b CM |
8465 | dip->orig_bio = io_bio; |
8466 | dip->dio_bio = dio_bio; | |
e65e1535 | 8467 | atomic_set(&dip->pending_bios, 0); |
c1dc0896 MX |
8468 | btrfs_bio = btrfs_io_bio(io_bio); |
8469 | btrfs_bio->logical = file_offset; | |
4b46fce2 | 8470 | |
c1dc0896 | 8471 | if (write) { |
9be3395b | 8472 | io_bio->bi_end_io = btrfs_endio_direct_write; |
c1dc0896 | 8473 | } else { |
9be3395b | 8474 | io_bio->bi_end_io = btrfs_endio_direct_read; |
c1dc0896 MX |
8475 | dip->subio_endio = btrfs_subio_endio_read; |
8476 | } | |
4b46fce2 | 8477 | |
f28a4928 FM |
8478 | /* |
8479 | * Reset the range for unsubmitted ordered extents (to a 0 length range) | |
8480 | * even if we fail to submit a bio, because in such case we do the | |
8481 | * corresponding error handling below and it must not be done a second | |
8482 | * time by btrfs_direct_IO(). | |
8483 | */ | |
8484 | if (write) { | |
8485 | struct btrfs_dio_data *dio_data = current->journal_info; | |
8486 | ||
8487 | dio_data->unsubmitted_oe_range_end = dip->logical_offset + | |
8488 | dip->bytes; | |
8489 | dio_data->unsubmitted_oe_range_start = | |
8490 | dio_data->unsubmitted_oe_range_end; | |
8491 | } | |
8492 | ||
81a75f67 | 8493 | ret = btrfs_submit_direct_hook(dip, skip_sum); |
e65e1535 | 8494 | if (!ret) |
eaf25d93 | 8495 | return; |
9be3395b | 8496 | |
23ea8e5a MX |
8497 | if (btrfs_bio->end_io) |
8498 | btrfs_bio->end_io(btrfs_bio, ret); | |
9be3395b | 8499 | |
4b46fce2 JB |
8500 | free_ordered: |
8501 | /* | |
61de718f FM |
8502 | * If we arrived here it means either we failed to submit the dip |
8503 | * or we either failed to clone the dio_bio or failed to allocate the | |
8504 | * dip. If we cloned the dio_bio and allocated the dip, we can just | |
8505 | * call bio_endio against our io_bio so that we get proper resource | |
8506 | * cleanup if we fail to submit the dip, otherwise, we must do the | |
8507 | * same as btrfs_endio_direct_[write|read] because we can't call these | |
8508 | * callbacks - they require an allocated dip and a clone of dio_bio. | |
4b46fce2 | 8509 | */ |
61de718f | 8510 | if (io_bio && dip) { |
4246a0b6 CH |
8511 | io_bio->bi_error = -EIO; |
8512 | bio_endio(io_bio); | |
61de718f FM |
8513 | /* |
8514 | * The end io callbacks free our dip, do the final put on io_bio | |
8515 | * and all the cleanup and final put for dio_bio (through | |
8516 | * dio_end_io()). | |
8517 | */ | |
8518 | dip = NULL; | |
8519 | io_bio = NULL; | |
8520 | } else { | |
14543774 FM |
8521 | if (write) |
8522 | btrfs_endio_direct_write_update_ordered(inode, | |
8523 | file_offset, | |
8524 | dio_bio->bi_iter.bi_size, | |
8525 | 0); | |
8526 | else | |
61de718f FM |
8527 | unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, |
8528 | file_offset + dio_bio->bi_iter.bi_size - 1); | |
14543774 | 8529 | |
4246a0b6 | 8530 | dio_bio->bi_error = -EIO; |
61de718f FM |
8531 | /* |
8532 | * Releases and cleans up our dio_bio, no need to bio_put() | |
8533 | * nor bio_endio()/bio_io_error() against dio_bio. | |
8534 | */ | |
8535 | dio_end_io(dio_bio, ret); | |
4b46fce2 | 8536 | } |
61de718f FM |
8537 | if (io_bio) |
8538 | bio_put(io_bio); | |
8539 | kfree(dip); | |
4b46fce2 JB |
8540 | } |
8541 | ||
2ff7e61e JM |
8542 | static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, |
8543 | struct kiocb *iocb, | |
8544 | const struct iov_iter *iter, loff_t offset) | |
5a5f79b5 CM |
8545 | { |
8546 | int seg; | |
a1b75f7d | 8547 | int i; |
0b246afa | 8548 | unsigned int blocksize_mask = fs_info->sectorsize - 1; |
5a5f79b5 | 8549 | ssize_t retval = -EINVAL; |
5a5f79b5 CM |
8550 | |
8551 | if (offset & blocksize_mask) | |
8552 | goto out; | |
8553 | ||
28060d5d AV |
8554 | if (iov_iter_alignment(iter) & blocksize_mask) |
8555 | goto out; | |
a1b75f7d | 8556 | |
28060d5d | 8557 | /* If this is a write we don't need to check anymore */ |
cd27e455 | 8558 | if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) |
28060d5d AV |
8559 | return 0; |
8560 | /* | |
8561 | * Check to make sure we don't have duplicate iov_base's in this | |
8562 | * iovec, if so return EINVAL, otherwise we'll get csum errors | |
8563 | * when reading back. | |
8564 | */ | |
8565 | for (seg = 0; seg < iter->nr_segs; seg++) { | |
8566 | for (i = seg + 1; i < iter->nr_segs; i++) { | |
8567 | if (iter->iov[seg].iov_base == iter->iov[i].iov_base) | |
a1b75f7d JB |
8568 | goto out; |
8569 | } | |
5a5f79b5 CM |
8570 | } |
8571 | retval = 0; | |
8572 | out: | |
8573 | return retval; | |
8574 | } | |
eb838e73 | 8575 | |
c8b8e32d | 8576 | static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
16432985 | 8577 | { |
4b46fce2 JB |
8578 | struct file *file = iocb->ki_filp; |
8579 | struct inode *inode = file->f_mapping->host; | |
0b246afa | 8580 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
50745b0a | 8581 | struct btrfs_dio_data dio_data = { 0 }; |
c8b8e32d | 8582 | loff_t offset = iocb->ki_pos; |
0934856d | 8583 | size_t count = 0; |
2e60a51e | 8584 | int flags = 0; |
38851cc1 MX |
8585 | bool wakeup = true; |
8586 | bool relock = false; | |
0934856d | 8587 | ssize_t ret; |
4b46fce2 | 8588 | |
2ff7e61e | 8589 | if (check_direct_IO(fs_info, iocb, iter, offset)) |
5a5f79b5 | 8590 | return 0; |
3f7c579c | 8591 | |
fe0f07d0 | 8592 | inode_dio_begin(inode); |
4e857c58 | 8593 | smp_mb__after_atomic(); |
38851cc1 | 8594 | |
0e267c44 | 8595 | /* |
41bd9ca4 MX |
8596 | * The generic stuff only does filemap_write_and_wait_range, which |
8597 | * isn't enough if we've written compressed pages to this area, so | |
8598 | * we need to flush the dirty pages again to make absolutely sure | |
8599 | * that any outstanding dirty pages are on disk. | |
0e267c44 | 8600 | */ |
a6cbcd4a | 8601 | count = iov_iter_count(iter); |
41bd9ca4 MX |
8602 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, |
8603 | &BTRFS_I(inode)->runtime_flags)) | |
9a025a08 WS |
8604 | filemap_fdatawrite_range(inode->i_mapping, offset, |
8605 | offset + count - 1); | |
0e267c44 | 8606 | |
6f673763 | 8607 | if (iov_iter_rw(iter) == WRITE) { |
38851cc1 MX |
8608 | /* |
8609 | * If the write DIO is beyond the EOF, we need update | |
8610 | * the isize, but it is protected by i_mutex. So we can | |
8611 | * not unlock the i_mutex at this case. | |
8612 | */ | |
8613 | if (offset + count <= inode->i_size) { | |
4aaedfb0 | 8614 | dio_data.overwrite = 1; |
5955102c | 8615 | inode_unlock(inode); |
38851cc1 MX |
8616 | relock = true; |
8617 | } | |
7cf5b976 | 8618 | ret = btrfs_delalloc_reserve_space(inode, offset, count); |
0934856d | 8619 | if (ret) |
38851cc1 | 8620 | goto out; |
823bb20a | 8621 | dio_data.outstanding_extents = count_max_extents(count); |
e1cbbfa5 JB |
8622 | |
8623 | /* | |
8624 | * We need to know how many extents we reserved so that we can | |
8625 | * do the accounting properly if we go over the number we | |
8626 | * originally calculated. Abuse current->journal_info for this. | |
8627 | */ | |
da17066c | 8628 | dio_data.reserve = round_up(count, |
0b246afa | 8629 | fs_info->sectorsize); |
f28a4928 FM |
8630 | dio_data.unsubmitted_oe_range_start = (u64)offset; |
8631 | dio_data.unsubmitted_oe_range_end = (u64)offset; | |
50745b0a | 8632 | current->journal_info = &dio_data; |
97dcdea0 | 8633 | down_read(&BTRFS_I(inode)->dio_sem); |
ee39b432 DS |
8634 | } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, |
8635 | &BTRFS_I(inode)->runtime_flags)) { | |
fe0f07d0 | 8636 | inode_dio_end(inode); |
38851cc1 MX |
8637 | flags = DIO_LOCKING | DIO_SKIP_HOLES; |
8638 | wakeup = false; | |
0934856d MX |
8639 | } |
8640 | ||
17f8c842 | 8641 | ret = __blockdev_direct_IO(iocb, inode, |
0b246afa | 8642 | fs_info->fs_devices->latest_bdev, |
c8b8e32d | 8643 | iter, btrfs_get_blocks_direct, NULL, |
17f8c842 | 8644 | btrfs_submit_direct, flags); |
6f673763 | 8645 | if (iov_iter_rw(iter) == WRITE) { |
97dcdea0 | 8646 | up_read(&BTRFS_I(inode)->dio_sem); |
e1cbbfa5 | 8647 | current->journal_info = NULL; |
ddba1bfc | 8648 | if (ret < 0 && ret != -EIOCBQUEUED) { |
50745b0a | 8649 | if (dio_data.reserve) |
7cf5b976 QW |
8650 | btrfs_delalloc_release_space(inode, offset, |
8651 | dio_data.reserve); | |
f28a4928 FM |
8652 | /* |
8653 | * On error we might have left some ordered extents | |
8654 | * without submitting corresponding bios for them, so | |
8655 | * cleanup them up to avoid other tasks getting them | |
8656 | * and waiting for them to complete forever. | |
8657 | */ | |
8658 | if (dio_data.unsubmitted_oe_range_start < | |
8659 | dio_data.unsubmitted_oe_range_end) | |
8660 | btrfs_endio_direct_write_update_ordered(inode, | |
8661 | dio_data.unsubmitted_oe_range_start, | |
8662 | dio_data.unsubmitted_oe_range_end - | |
8663 | dio_data.unsubmitted_oe_range_start, | |
8664 | 0); | |
ddba1bfc | 8665 | } else if (ret >= 0 && (size_t)ret < count) |
7cf5b976 QW |
8666 | btrfs_delalloc_release_space(inode, offset, |
8667 | count - (size_t)ret); | |
0934856d | 8668 | } |
38851cc1 | 8669 | out: |
2e60a51e | 8670 | if (wakeup) |
fe0f07d0 | 8671 | inode_dio_end(inode); |
38851cc1 | 8672 | if (relock) |
5955102c | 8673 | inode_lock(inode); |
0934856d MX |
8674 | |
8675 | return ret; | |
16432985 CM |
8676 | } |
8677 | ||
05dadc09 TI |
8678 | #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) |
8679 | ||
1506fcc8 YS |
8680 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
8681 | __u64 start, __u64 len) | |
8682 | { | |
05dadc09 TI |
8683 | int ret; |
8684 | ||
8685 | ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); | |
8686 | if (ret) | |
8687 | return ret; | |
8688 | ||
ec29ed5b | 8689 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
1506fcc8 YS |
8690 | } |
8691 | ||
a52d9a80 | 8692 | int btrfs_readpage(struct file *file, struct page *page) |
9ebefb18 | 8693 | { |
d1310b2e CM |
8694 | struct extent_io_tree *tree; |
8695 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
8ddc7d9c | 8696 | return extent_read_full_page(tree, page, btrfs_get_extent, 0); |
9ebefb18 | 8697 | } |
1832a6d5 | 8698 | |
a52d9a80 | 8699 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) |
39279cc3 | 8700 | { |
d1310b2e | 8701 | struct extent_io_tree *tree; |
be7bd730 JB |
8702 | struct inode *inode = page->mapping->host; |
8703 | int ret; | |
b888db2b CM |
8704 | |
8705 | if (current->flags & PF_MEMALLOC) { | |
8706 | redirty_page_for_writepage(wbc, page); | |
8707 | unlock_page(page); | |
8708 | return 0; | |
8709 | } | |
be7bd730 JB |
8710 | |
8711 | /* | |
8712 | * If we are under memory pressure we will call this directly from the | |
8713 | * VM, we need to make sure we have the inode referenced for the ordered | |
8714 | * extent. If not just return like we didn't do anything. | |
8715 | */ | |
8716 | if (!igrab(inode)) { | |
8717 | redirty_page_for_writepage(wbc, page); | |
8718 | return AOP_WRITEPAGE_ACTIVATE; | |
8719 | } | |
d1310b2e | 8720 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
be7bd730 JB |
8721 | ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc); |
8722 | btrfs_add_delayed_iput(inode); | |
8723 | return ret; | |
9ebefb18 CM |
8724 | } |
8725 | ||
48a3b636 ES |
8726 | static int btrfs_writepages(struct address_space *mapping, |
8727 | struct writeback_control *wbc) | |
b293f02e | 8728 | { |
d1310b2e | 8729 | struct extent_io_tree *tree; |
771ed689 | 8730 | |
d1310b2e | 8731 | tree = &BTRFS_I(mapping->host)->io_tree; |
b293f02e CM |
8732 | return extent_writepages(tree, mapping, btrfs_get_extent, wbc); |
8733 | } | |
8734 | ||
3ab2fb5a CM |
8735 | static int |
8736 | btrfs_readpages(struct file *file, struct address_space *mapping, | |
8737 | struct list_head *pages, unsigned nr_pages) | |
8738 | { | |
d1310b2e CM |
8739 | struct extent_io_tree *tree; |
8740 | tree = &BTRFS_I(mapping->host)->io_tree; | |
3ab2fb5a CM |
8741 | return extent_readpages(tree, mapping, pages, nr_pages, |
8742 | btrfs_get_extent); | |
8743 | } | |
e6dcd2dc | 8744 | static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
9ebefb18 | 8745 | { |
d1310b2e CM |
8746 | struct extent_io_tree *tree; |
8747 | struct extent_map_tree *map; | |
a52d9a80 | 8748 | int ret; |
8c2383c3 | 8749 | |
d1310b2e CM |
8750 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
8751 | map = &BTRFS_I(page->mapping->host)->extent_tree; | |
70dec807 | 8752 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); |
a52d9a80 CM |
8753 | if (ret == 1) { |
8754 | ClearPagePrivate(page); | |
8755 | set_page_private(page, 0); | |
09cbfeaf | 8756 | put_page(page); |
39279cc3 | 8757 | } |
a52d9a80 | 8758 | return ret; |
39279cc3 CM |
8759 | } |
8760 | ||
e6dcd2dc CM |
8761 | static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
8762 | { | |
98509cfc CM |
8763 | if (PageWriteback(page) || PageDirty(page)) |
8764 | return 0; | |
3ba7ab22 | 8765 | return __btrfs_releasepage(page, gfp_flags); |
e6dcd2dc CM |
8766 | } |
8767 | ||
d47992f8 LC |
8768 | static void btrfs_invalidatepage(struct page *page, unsigned int offset, |
8769 | unsigned int length) | |
39279cc3 | 8770 | { |
5fd02043 | 8771 | struct inode *inode = page->mapping->host; |
d1310b2e | 8772 | struct extent_io_tree *tree; |
e6dcd2dc | 8773 | struct btrfs_ordered_extent *ordered; |
2ac55d41 | 8774 | struct extent_state *cached_state = NULL; |
e6dcd2dc | 8775 | u64 page_start = page_offset(page); |
09cbfeaf | 8776 | u64 page_end = page_start + PAGE_SIZE - 1; |
dbfdb6d1 CR |
8777 | u64 start; |
8778 | u64 end; | |
131e404a | 8779 | int inode_evicting = inode->i_state & I_FREEING; |
39279cc3 | 8780 | |
8b62b72b CM |
8781 | /* |
8782 | * we have the page locked, so new writeback can't start, | |
8783 | * and the dirty bit won't be cleared while we are here. | |
8784 | * | |
8785 | * Wait for IO on this page so that we can safely clear | |
8786 | * the PagePrivate2 bit and do ordered accounting | |
8787 | */ | |
e6dcd2dc | 8788 | wait_on_page_writeback(page); |
8b62b72b | 8789 | |
5fd02043 | 8790 | tree = &BTRFS_I(inode)->io_tree; |
e6dcd2dc CM |
8791 | if (offset) { |
8792 | btrfs_releasepage(page, GFP_NOFS); | |
8793 | return; | |
8794 | } | |
131e404a FDBM |
8795 | |
8796 | if (!inode_evicting) | |
ff13db41 | 8797 | lock_extent_bits(tree, page_start, page_end, &cached_state); |
dbfdb6d1 CR |
8798 | again: |
8799 | start = page_start; | |
a776c6fa | 8800 | ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, |
dbfdb6d1 | 8801 | page_end - start + 1); |
e6dcd2dc | 8802 | if (ordered) { |
dbfdb6d1 | 8803 | end = min(page_end, ordered->file_offset + ordered->len - 1); |
eb84ae03 CM |
8804 | /* |
8805 | * IO on this page will never be started, so we need | |
8806 | * to account for any ordered extents now | |
8807 | */ | |
131e404a | 8808 | if (!inode_evicting) |
dbfdb6d1 | 8809 | clear_extent_bit(tree, start, end, |
131e404a FDBM |
8810 | EXTENT_DIRTY | EXTENT_DELALLOC | |
8811 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | | |
8812 | EXTENT_DEFRAG, 1, 0, &cached_state, | |
8813 | GFP_NOFS); | |
8b62b72b CM |
8814 | /* |
8815 | * whoever cleared the private bit is responsible | |
8816 | * for the finish_ordered_io | |
8817 | */ | |
77cef2ec JB |
8818 | if (TestClearPagePrivate2(page)) { |
8819 | struct btrfs_ordered_inode_tree *tree; | |
8820 | u64 new_len; | |
8821 | ||
8822 | tree = &BTRFS_I(inode)->ordered_tree; | |
8823 | ||
8824 | spin_lock_irq(&tree->lock); | |
8825 | set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); | |
dbfdb6d1 | 8826 | new_len = start - ordered->file_offset; |
77cef2ec JB |
8827 | if (new_len < ordered->truncated_len) |
8828 | ordered->truncated_len = new_len; | |
8829 | spin_unlock_irq(&tree->lock); | |
8830 | ||
8831 | if (btrfs_dec_test_ordered_pending(inode, &ordered, | |
dbfdb6d1 CR |
8832 | start, |
8833 | end - start + 1, 1)) | |
77cef2ec | 8834 | btrfs_finish_ordered_io(ordered); |
8b62b72b | 8835 | } |
e6dcd2dc | 8836 | btrfs_put_ordered_extent(ordered); |
131e404a FDBM |
8837 | if (!inode_evicting) { |
8838 | cached_state = NULL; | |
dbfdb6d1 | 8839 | lock_extent_bits(tree, start, end, |
131e404a FDBM |
8840 | &cached_state); |
8841 | } | |
dbfdb6d1 CR |
8842 | |
8843 | start = end + 1; | |
8844 | if (start < page_end) | |
8845 | goto again; | |
131e404a FDBM |
8846 | } |
8847 | ||
b9d0b389 QW |
8848 | /* |
8849 | * Qgroup reserved space handler | |
8850 | * Page here will be either | |
8851 | * 1) Already written to disk | |
8852 | * In this case, its reserved space is released from data rsv map | |
8853 | * and will be freed by delayed_ref handler finally. | |
8854 | * So even we call qgroup_free_data(), it won't decrease reserved | |
8855 | * space. | |
8856 | * 2) Not written to disk | |
0b34c261 GR |
8857 | * This means the reserved space should be freed here. However, |
8858 | * if a truncate invalidates the page (by clearing PageDirty) | |
8859 | * and the page is accounted for while allocating extent | |
8860 | * in btrfs_check_data_free_space() we let delayed_ref to | |
8861 | * free the entire extent. | |
b9d0b389 | 8862 | */ |
0b34c261 GR |
8863 | if (PageDirty(page)) |
8864 | btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); | |
131e404a FDBM |
8865 | if (!inode_evicting) { |
8866 | clear_extent_bit(tree, page_start, page_end, | |
8867 | EXTENT_LOCKED | EXTENT_DIRTY | | |
8868 | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | | |
8869 | EXTENT_DEFRAG, 1, 1, | |
8870 | &cached_state, GFP_NOFS); | |
8871 | ||
8872 | __btrfs_releasepage(page, GFP_NOFS); | |
e6dcd2dc | 8873 | } |
e6dcd2dc | 8874 | |
4a096752 | 8875 | ClearPageChecked(page); |
9ad6b7bc | 8876 | if (PagePrivate(page)) { |
9ad6b7bc CM |
8877 | ClearPagePrivate(page); |
8878 | set_page_private(page, 0); | |
09cbfeaf | 8879 | put_page(page); |
9ad6b7bc | 8880 | } |
39279cc3 CM |
8881 | } |
8882 | ||
9ebefb18 CM |
8883 | /* |
8884 | * btrfs_page_mkwrite() is not allowed to change the file size as it gets | |
8885 | * called from a page fault handler when a page is first dirtied. Hence we must | |
8886 | * be careful to check for EOF conditions here. We set the page up correctly | |
8887 | * for a written page which means we get ENOSPC checking when writing into | |
8888 | * holes and correct delalloc and unwritten extent mapping on filesystems that | |
8889 | * support these features. | |
8890 | * | |
8891 | * We are not allowed to take the i_mutex here so we have to play games to | |
8892 | * protect against truncate races as the page could now be beyond EOF. Because | |
8893 | * vmtruncate() writes the inode size before removing pages, once we have the | |
8894 | * page lock we can determine safely if the page is beyond EOF. If it is not | |
8895 | * beyond EOF, then the page is guaranteed safe against truncation until we | |
8896 | * unlock the page. | |
8897 | */ | |
c2ec175c | 8898 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
9ebefb18 | 8899 | { |
c2ec175c | 8900 | struct page *page = vmf->page; |
496ad9aa | 8901 | struct inode *inode = file_inode(vma->vm_file); |
0b246afa | 8902 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc CM |
8903 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
8904 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 8905 | struct extent_state *cached_state = NULL; |
e6dcd2dc CM |
8906 | char *kaddr; |
8907 | unsigned long zero_start; | |
9ebefb18 | 8908 | loff_t size; |
1832a6d5 | 8909 | int ret; |
9998eb70 | 8910 | int reserved = 0; |
d0b7da88 | 8911 | u64 reserved_space; |
a52d9a80 | 8912 | u64 page_start; |
e6dcd2dc | 8913 | u64 page_end; |
d0b7da88 CR |
8914 | u64 end; |
8915 | ||
09cbfeaf | 8916 | reserved_space = PAGE_SIZE; |
9ebefb18 | 8917 | |
b2b5ef5c | 8918 | sb_start_pagefault(inode->i_sb); |
df480633 | 8919 | page_start = page_offset(page); |
09cbfeaf | 8920 | page_end = page_start + PAGE_SIZE - 1; |
d0b7da88 | 8921 | end = page_end; |
df480633 | 8922 | |
d0b7da88 CR |
8923 | /* |
8924 | * Reserving delalloc space after obtaining the page lock can lead to | |
8925 | * deadlock. For example, if a dirty page is locked by this function | |
8926 | * and the call to btrfs_delalloc_reserve_space() ends up triggering | |
8927 | * dirty page write out, then the btrfs_writepage() function could | |
8928 | * end up waiting indefinitely to get a lock on the page currently | |
8929 | * being processed by btrfs_page_mkwrite() function. | |
8930 | */ | |
7cf5b976 | 8931 | ret = btrfs_delalloc_reserve_space(inode, page_start, |
d0b7da88 | 8932 | reserved_space); |
9998eb70 | 8933 | if (!ret) { |
e41f941a | 8934 | ret = file_update_time(vma->vm_file); |
9998eb70 CM |
8935 | reserved = 1; |
8936 | } | |
56a76f82 NP |
8937 | if (ret) { |
8938 | if (ret == -ENOMEM) | |
8939 | ret = VM_FAULT_OOM; | |
8940 | else /* -ENOSPC, -EIO, etc */ | |
8941 | ret = VM_FAULT_SIGBUS; | |
9998eb70 CM |
8942 | if (reserved) |
8943 | goto out; | |
8944 | goto out_noreserve; | |
56a76f82 | 8945 | } |
1832a6d5 | 8946 | |
56a76f82 | 8947 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ |
e6dcd2dc | 8948 | again: |
9ebefb18 | 8949 | lock_page(page); |
9ebefb18 | 8950 | size = i_size_read(inode); |
a52d9a80 | 8951 | |
9ebefb18 | 8952 | if ((page->mapping != inode->i_mapping) || |
e6dcd2dc | 8953 | (page_start >= size)) { |
9ebefb18 CM |
8954 | /* page got truncated out from underneath us */ |
8955 | goto out_unlock; | |
8956 | } | |
e6dcd2dc CM |
8957 | wait_on_page_writeback(page); |
8958 | ||
ff13db41 | 8959 | lock_extent_bits(io_tree, page_start, page_end, &cached_state); |
e6dcd2dc CM |
8960 | set_page_extent_mapped(page); |
8961 | ||
eb84ae03 CM |
8962 | /* |
8963 | * we can't set the delalloc bits if there are pending ordered | |
8964 | * extents. Drop our locks and wait for them to finish | |
8965 | */ | |
a776c6fa NB |
8966 | ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, |
8967 | PAGE_SIZE); | |
e6dcd2dc | 8968 | if (ordered) { |
2ac55d41 JB |
8969 | unlock_extent_cached(io_tree, page_start, page_end, |
8970 | &cached_state, GFP_NOFS); | |
e6dcd2dc | 8971 | unlock_page(page); |
eb84ae03 | 8972 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
8973 | btrfs_put_ordered_extent(ordered); |
8974 | goto again; | |
8975 | } | |
8976 | ||
09cbfeaf | 8977 | if (page->index == ((size - 1) >> PAGE_SHIFT)) { |
da17066c | 8978 | reserved_space = round_up(size - page_start, |
0b246afa | 8979 | fs_info->sectorsize); |
09cbfeaf | 8980 | if (reserved_space < PAGE_SIZE) { |
d0b7da88 CR |
8981 | end = page_start + reserved_space - 1; |
8982 | spin_lock(&BTRFS_I(inode)->lock); | |
8983 | BTRFS_I(inode)->outstanding_extents++; | |
8984 | spin_unlock(&BTRFS_I(inode)->lock); | |
8985 | btrfs_delalloc_release_space(inode, page_start, | |
09cbfeaf | 8986 | PAGE_SIZE - reserved_space); |
d0b7da88 CR |
8987 | } |
8988 | } | |
8989 | ||
fbf19087 | 8990 | /* |
5416034f LB |
8991 | * page_mkwrite gets called when the page is firstly dirtied after it's |
8992 | * faulted in, but write(2) could also dirty a page and set delalloc | |
8993 | * bits, thus in this case for space account reason, we still need to | |
8994 | * clear any delalloc bits within this page range since we have to | |
8995 | * reserve data&meta space before lock_page() (see above comments). | |
fbf19087 | 8996 | */ |
d0b7da88 | 8997 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, |
9e8a4a8b LB |
8998 | EXTENT_DIRTY | EXTENT_DELALLOC | |
8999 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
2ac55d41 | 9000 | 0, 0, &cached_state, GFP_NOFS); |
fbf19087 | 9001 | |
d0b7da88 | 9002 | ret = btrfs_set_extent_delalloc(inode, page_start, end, |
ba8b04c1 | 9003 | &cached_state, 0); |
9ed74f2d | 9004 | if (ret) { |
2ac55d41 JB |
9005 | unlock_extent_cached(io_tree, page_start, page_end, |
9006 | &cached_state, GFP_NOFS); | |
9ed74f2d JB |
9007 | ret = VM_FAULT_SIGBUS; |
9008 | goto out_unlock; | |
9009 | } | |
e6dcd2dc | 9010 | ret = 0; |
9ebefb18 CM |
9011 | |
9012 | /* page is wholly or partially inside EOF */ | |
09cbfeaf KS |
9013 | if (page_start + PAGE_SIZE > size) |
9014 | zero_start = size & ~PAGE_MASK; | |
9ebefb18 | 9015 | else |
09cbfeaf | 9016 | zero_start = PAGE_SIZE; |
9ebefb18 | 9017 | |
09cbfeaf | 9018 | if (zero_start != PAGE_SIZE) { |
e6dcd2dc | 9019 | kaddr = kmap(page); |
09cbfeaf | 9020 | memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); |
e6dcd2dc CM |
9021 | flush_dcache_page(page); |
9022 | kunmap(page); | |
9023 | } | |
247e743c | 9024 | ClearPageChecked(page); |
e6dcd2dc | 9025 | set_page_dirty(page); |
50a9b214 | 9026 | SetPageUptodate(page); |
5a3f23d5 | 9027 | |
0b246afa | 9028 | BTRFS_I(inode)->last_trans = fs_info->generation; |
257c62e1 | 9029 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; |
46d8bc34 | 9030 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; |
257c62e1 | 9031 | |
2ac55d41 | 9032 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); |
9ebefb18 CM |
9033 | |
9034 | out_unlock: | |
b2b5ef5c JK |
9035 | if (!ret) { |
9036 | sb_end_pagefault(inode->i_sb); | |
50a9b214 | 9037 | return VM_FAULT_LOCKED; |
b2b5ef5c | 9038 | } |
9ebefb18 | 9039 | unlock_page(page); |
1832a6d5 | 9040 | out: |
d0b7da88 | 9041 | btrfs_delalloc_release_space(inode, page_start, reserved_space); |
9998eb70 | 9042 | out_noreserve: |
b2b5ef5c | 9043 | sb_end_pagefault(inode->i_sb); |
9ebefb18 CM |
9044 | return ret; |
9045 | } | |
9046 | ||
a41ad394 | 9047 | static int btrfs_truncate(struct inode *inode) |
39279cc3 | 9048 | { |
0b246afa | 9049 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
39279cc3 | 9050 | struct btrfs_root *root = BTRFS_I(inode)->root; |
fcb80c2a | 9051 | struct btrfs_block_rsv *rsv; |
a71754fc | 9052 | int ret = 0; |
3893e33b | 9053 | int err = 0; |
39279cc3 | 9054 | struct btrfs_trans_handle *trans; |
0b246afa JM |
9055 | u64 mask = fs_info->sectorsize - 1; |
9056 | u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1); | |
39279cc3 | 9057 | |
0ef8b726 JB |
9058 | ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), |
9059 | (u64)-1); | |
9060 | if (ret) | |
9061 | return ret; | |
39279cc3 | 9062 | |
fcb80c2a | 9063 | /* |
01327610 | 9064 | * Yes ladies and gentlemen, this is indeed ugly. The fact is we have |
fcb80c2a JB |
9065 | * 3 things going on here |
9066 | * | |
9067 | * 1) We need to reserve space for our orphan item and the space to | |
9068 | * delete our orphan item. Lord knows we don't want to have a dangling | |
9069 | * orphan item because we didn't reserve space to remove it. | |
9070 | * | |
9071 | * 2) We need to reserve space to update our inode. | |
9072 | * | |
9073 | * 3) We need to have something to cache all the space that is going to | |
9074 | * be free'd up by the truncate operation, but also have some slack | |
9075 | * space reserved in case it uses space during the truncate (thank you | |
9076 | * very much snapshotting). | |
9077 | * | |
01327610 | 9078 | * And we need these to all be separate. The fact is we can use a lot of |
fcb80c2a | 9079 | * space doing the truncate, and we have no earthly idea how much space |
01327610 | 9080 | * we will use, so we need the truncate reservation to be separate so it |
fcb80c2a JB |
9081 | * doesn't end up using space reserved for updating the inode or |
9082 | * removing the orphan item. We also need to be able to stop the | |
9083 | * transaction and start a new one, which means we need to be able to | |
9084 | * update the inode several times, and we have no idea of knowing how | |
9085 | * many times that will be, so we can't just reserve 1 item for the | |
01327610 | 9086 | * entirety of the operation, so that has to be done separately as well. |
fcb80c2a JB |
9087 | * Then there is the orphan item, which does indeed need to be held on |
9088 | * to for the whole operation, and we need nobody to touch this reserved | |
9089 | * space except the orphan code. | |
9090 | * | |
9091 | * So that leaves us with | |
9092 | * | |
9093 | * 1) root->orphan_block_rsv - for the orphan deletion. | |
9094 | * 2) rsv - for the truncate reservation, which we will steal from the | |
9095 | * transaction reservation. | |
9096 | * 3) fs_info->trans_block_rsv - this will have 1 items worth left for | |
9097 | * updating the inode. | |
9098 | */ | |
2ff7e61e | 9099 | rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); |
fcb80c2a JB |
9100 | if (!rsv) |
9101 | return -ENOMEM; | |
4a338542 | 9102 | rsv->size = min_size; |
ca7e70f5 | 9103 | rsv->failfast = 1; |
f0cd846e | 9104 | |
907cbceb | 9105 | /* |
07127184 | 9106 | * 1 for the truncate slack space |
907cbceb JB |
9107 | * 1 for updating the inode. |
9108 | */ | |
f3fe820c | 9109 | trans = btrfs_start_transaction(root, 2); |
fcb80c2a JB |
9110 | if (IS_ERR(trans)) { |
9111 | err = PTR_ERR(trans); | |
9112 | goto out; | |
9113 | } | |
f0cd846e | 9114 | |
907cbceb | 9115 | /* Migrate the slack space for the truncate to our reserve */ |
0b246afa | 9116 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, |
25d609f8 | 9117 | min_size, 0); |
fcb80c2a | 9118 | BUG_ON(ret); |
f0cd846e | 9119 | |
5dc562c5 JB |
9120 | /* |
9121 | * So if we truncate and then write and fsync we normally would just | |
9122 | * write the extents that changed, which is a problem if we need to | |
9123 | * first truncate that entire inode. So set this flag so we write out | |
9124 | * all of the extents in the inode to the sync log so we're completely | |
9125 | * safe. | |
9126 | */ | |
9127 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
ca7e70f5 | 9128 | trans->block_rsv = rsv; |
907cbceb | 9129 | |
8082510e YZ |
9130 | while (1) { |
9131 | ret = btrfs_truncate_inode_items(trans, root, inode, | |
9132 | inode->i_size, | |
9133 | BTRFS_EXTENT_DATA_KEY); | |
28ed1345 | 9134 | if (ret != -ENOSPC && ret != -EAGAIN) { |
3893e33b | 9135 | err = ret; |
8082510e | 9136 | break; |
3893e33b | 9137 | } |
39279cc3 | 9138 | |
0b246afa | 9139 | trans->block_rsv = &fs_info->trans_block_rsv; |
8082510e | 9140 | ret = btrfs_update_inode(trans, root, inode); |
3893e33b JB |
9141 | if (ret) { |
9142 | err = ret; | |
9143 | break; | |
9144 | } | |
ca7e70f5 | 9145 | |
3a45bb20 | 9146 | btrfs_end_transaction(trans); |
2ff7e61e | 9147 | btrfs_btree_balance_dirty(fs_info); |
ca7e70f5 JB |
9148 | |
9149 | trans = btrfs_start_transaction(root, 2); | |
9150 | if (IS_ERR(trans)) { | |
9151 | ret = err = PTR_ERR(trans); | |
9152 | trans = NULL; | |
9153 | break; | |
9154 | } | |
9155 | ||
47b5d646 | 9156 | btrfs_block_rsv_release(fs_info, rsv, -1); |
0b246afa | 9157 | ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, |
25d609f8 | 9158 | rsv, min_size, 0); |
ca7e70f5 JB |
9159 | BUG_ON(ret); /* shouldn't happen */ |
9160 | trans->block_rsv = rsv; | |
8082510e YZ |
9161 | } |
9162 | ||
9163 | if (ret == 0 && inode->i_nlink > 0) { | |
fcb80c2a | 9164 | trans->block_rsv = root->orphan_block_rsv; |
3d6ae7bb | 9165 | ret = btrfs_orphan_del(trans, BTRFS_I(inode)); |
3893e33b JB |
9166 | if (ret) |
9167 | err = ret; | |
8082510e YZ |
9168 | } |
9169 | ||
917c16b2 | 9170 | if (trans) { |
0b246afa | 9171 | trans->block_rsv = &fs_info->trans_block_rsv; |
917c16b2 CM |
9172 | ret = btrfs_update_inode(trans, root, inode); |
9173 | if (ret && !err) | |
9174 | err = ret; | |
7b128766 | 9175 | |
3a45bb20 | 9176 | ret = btrfs_end_transaction(trans); |
2ff7e61e | 9177 | btrfs_btree_balance_dirty(fs_info); |
917c16b2 | 9178 | } |
fcb80c2a | 9179 | out: |
2ff7e61e | 9180 | btrfs_free_block_rsv(fs_info, rsv); |
fcb80c2a | 9181 | |
3893e33b JB |
9182 | if (ret && !err) |
9183 | err = ret; | |
a41ad394 | 9184 | |
3893e33b | 9185 | return err; |
39279cc3 CM |
9186 | } |
9187 | ||
d352ac68 CM |
9188 | /* |
9189 | * create a new subvolume directory/inode (helper for the ioctl). | |
9190 | */ | |
d2fb3437 | 9191 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, |
63541927 FDBM |
9192 | struct btrfs_root *new_root, |
9193 | struct btrfs_root *parent_root, | |
9194 | u64 new_dirid) | |
39279cc3 | 9195 | { |
39279cc3 | 9196 | struct inode *inode; |
76dda93c | 9197 | int err; |
00e4e6b3 | 9198 | u64 index = 0; |
39279cc3 | 9199 | |
12fc9d09 FA |
9200 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, |
9201 | new_dirid, new_dirid, | |
9202 | S_IFDIR | (~current_umask() & S_IRWXUGO), | |
9203 | &index); | |
54aa1f4d | 9204 | if (IS_ERR(inode)) |
f46b5a66 | 9205 | return PTR_ERR(inode); |
39279cc3 CM |
9206 | inode->i_op = &btrfs_dir_inode_operations; |
9207 | inode->i_fop = &btrfs_dir_file_operations; | |
9208 | ||
bfe86848 | 9209 | set_nlink(inode, 1); |
6ef06d27 | 9210 | btrfs_i_size_write(BTRFS_I(inode), 0); |
b0d5d10f | 9211 | unlock_new_inode(inode); |
3b96362c | 9212 | |
63541927 FDBM |
9213 | err = btrfs_subvol_inherit_props(trans, new_root, parent_root); |
9214 | if (err) | |
9215 | btrfs_err(new_root->fs_info, | |
351fd353 | 9216 | "error inheriting subvolume %llu properties: %d", |
63541927 FDBM |
9217 | new_root->root_key.objectid, err); |
9218 | ||
76dda93c | 9219 | err = btrfs_update_inode(trans, new_root, inode); |
cb8e7090 | 9220 | |
76dda93c | 9221 | iput(inode); |
ce598979 | 9222 | return err; |
39279cc3 CM |
9223 | } |
9224 | ||
39279cc3 CM |
9225 | struct inode *btrfs_alloc_inode(struct super_block *sb) |
9226 | { | |
9227 | struct btrfs_inode *ei; | |
2ead6ae7 | 9228 | struct inode *inode; |
39279cc3 CM |
9229 | |
9230 | ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); | |
9231 | if (!ei) | |
9232 | return NULL; | |
2ead6ae7 YZ |
9233 | |
9234 | ei->root = NULL; | |
2ead6ae7 | 9235 | ei->generation = 0; |
15ee9bc7 | 9236 | ei->last_trans = 0; |
257c62e1 | 9237 | ei->last_sub_trans = 0; |
e02119d5 | 9238 | ei->logged_trans = 0; |
2ead6ae7 | 9239 | ei->delalloc_bytes = 0; |
47059d93 | 9240 | ei->defrag_bytes = 0; |
2ead6ae7 YZ |
9241 | ei->disk_i_size = 0; |
9242 | ei->flags = 0; | |
7709cde3 | 9243 | ei->csum_bytes = 0; |
2ead6ae7 | 9244 | ei->index_cnt = (u64)-1; |
67de1176 | 9245 | ei->dir_index = 0; |
2ead6ae7 | 9246 | ei->last_unlink_trans = 0; |
46d8bc34 | 9247 | ei->last_log_commit = 0; |
8089fe62 | 9248 | ei->delayed_iput_count = 0; |
2ead6ae7 | 9249 | |
9e0baf60 JB |
9250 | spin_lock_init(&ei->lock); |
9251 | ei->outstanding_extents = 0; | |
9252 | ei->reserved_extents = 0; | |
2ead6ae7 | 9253 | |
72ac3c0d | 9254 | ei->runtime_flags = 0; |
261507a0 | 9255 | ei->force_compress = BTRFS_COMPRESS_NONE; |
2ead6ae7 | 9256 | |
16cdcec7 MX |
9257 | ei->delayed_node = NULL; |
9258 | ||
9cc97d64 | 9259 | ei->i_otime.tv_sec = 0; |
9260 | ei->i_otime.tv_nsec = 0; | |
9261 | ||
2ead6ae7 | 9262 | inode = &ei->vfs_inode; |
a8067e02 | 9263 | extent_map_tree_init(&ei->extent_tree); |
f993c883 DS |
9264 | extent_io_tree_init(&ei->io_tree, &inode->i_data); |
9265 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); | |
0b32f4bb JB |
9266 | ei->io_tree.track_uptodate = 1; |
9267 | ei->io_failure_tree.track_uptodate = 1; | |
b812ce28 | 9268 | atomic_set(&ei->sync_writers, 0); |
2ead6ae7 | 9269 | mutex_init(&ei->log_mutex); |
f248679e | 9270 | mutex_init(&ei->delalloc_mutex); |
e6dcd2dc | 9271 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
2ead6ae7 | 9272 | INIT_LIST_HEAD(&ei->delalloc_inodes); |
8089fe62 | 9273 | INIT_LIST_HEAD(&ei->delayed_iput); |
2ead6ae7 | 9274 | RB_CLEAR_NODE(&ei->rb_node); |
5f9a8a51 | 9275 | init_rwsem(&ei->dio_sem); |
2ead6ae7 YZ |
9276 | |
9277 | return inode; | |
39279cc3 CM |
9278 | } |
9279 | ||
aaedb55b JB |
9280 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
9281 | void btrfs_test_destroy_inode(struct inode *inode) | |
9282 | { | |
dcdbc059 | 9283 | btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); |
aaedb55b JB |
9284 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); |
9285 | } | |
9286 | #endif | |
9287 | ||
fa0d7e3d NP |
9288 | static void btrfs_i_callback(struct rcu_head *head) |
9289 | { | |
9290 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
fa0d7e3d NP |
9291 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); |
9292 | } | |
9293 | ||
39279cc3 CM |
9294 | void btrfs_destroy_inode(struct inode *inode) |
9295 | { | |
0b246afa | 9296 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
e6dcd2dc | 9297 | struct btrfs_ordered_extent *ordered; |
5a3f23d5 CM |
9298 | struct btrfs_root *root = BTRFS_I(inode)->root; |
9299 | ||
b3d9b7a3 | 9300 | WARN_ON(!hlist_empty(&inode->i_dentry)); |
39279cc3 | 9301 | WARN_ON(inode->i_data.nrpages); |
9e0baf60 JB |
9302 | WARN_ON(BTRFS_I(inode)->outstanding_extents); |
9303 | WARN_ON(BTRFS_I(inode)->reserved_extents); | |
7709cde3 JB |
9304 | WARN_ON(BTRFS_I(inode)->delalloc_bytes); |
9305 | WARN_ON(BTRFS_I(inode)->csum_bytes); | |
47059d93 | 9306 | WARN_ON(BTRFS_I(inode)->defrag_bytes); |
39279cc3 | 9307 | |
a6dbd429 JB |
9308 | /* |
9309 | * This can happen where we create an inode, but somebody else also | |
9310 | * created the same inode and we need to destroy the one we already | |
9311 | * created. | |
9312 | */ | |
9313 | if (!root) | |
9314 | goto free; | |
9315 | ||
8a35d95f JB |
9316 | if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
9317 | &BTRFS_I(inode)->runtime_flags)) { | |
0b246afa | 9318 | btrfs_info(fs_info, "inode %llu still on the orphan list", |
4a0cc7ca | 9319 | btrfs_ino(BTRFS_I(inode))); |
8a35d95f | 9320 | atomic_dec(&root->orphan_inodes); |
7b128766 | 9321 | } |
7b128766 | 9322 | |
d397712b | 9323 | while (1) { |
e6dcd2dc CM |
9324 | ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); |
9325 | if (!ordered) | |
9326 | break; | |
9327 | else { | |
0b246afa | 9328 | btrfs_err(fs_info, |
5d163e0e JM |
9329 | "found ordered extent %llu %llu on inode cleanup", |
9330 | ordered->file_offset, ordered->len); | |
e6dcd2dc CM |
9331 | btrfs_remove_ordered_extent(inode, ordered); |
9332 | btrfs_put_ordered_extent(ordered); | |
9333 | btrfs_put_ordered_extent(ordered); | |
9334 | } | |
9335 | } | |
56fa9d07 | 9336 | btrfs_qgroup_check_reserved_leak(inode); |
5d4f98a2 | 9337 | inode_tree_del(inode); |
dcdbc059 | 9338 | btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); |
a6dbd429 | 9339 | free: |
fa0d7e3d | 9340 | call_rcu(&inode->i_rcu, btrfs_i_callback); |
39279cc3 CM |
9341 | } |
9342 | ||
45321ac5 | 9343 | int btrfs_drop_inode(struct inode *inode) |
76dda93c YZ |
9344 | { |
9345 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
45321ac5 | 9346 | |
6379ef9f NA |
9347 | if (root == NULL) |
9348 | return 1; | |
9349 | ||
fa6ac876 | 9350 | /* the snap/subvol tree is on deleting */ |
69e9c6c6 | 9351 | if (btrfs_root_refs(&root->root_item) == 0) |
45321ac5 | 9352 | return 1; |
76dda93c | 9353 | else |
45321ac5 | 9354 | return generic_drop_inode(inode); |
76dda93c YZ |
9355 | } |
9356 | ||
0ee0fda0 | 9357 | static void init_once(void *foo) |
39279cc3 CM |
9358 | { |
9359 | struct btrfs_inode *ei = (struct btrfs_inode *) foo; | |
9360 | ||
9361 | inode_init_once(&ei->vfs_inode); | |
9362 | } | |
9363 | ||
9364 | void btrfs_destroy_cachep(void) | |
9365 | { | |
8c0a8537 KS |
9366 | /* |
9367 | * Make sure all delayed rcu free inodes are flushed before we | |
9368 | * destroy cache. | |
9369 | */ | |
9370 | rcu_barrier(); | |
5598e900 KM |
9371 | kmem_cache_destroy(btrfs_inode_cachep); |
9372 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
9373 | kmem_cache_destroy(btrfs_transaction_cachep); | |
9374 | kmem_cache_destroy(btrfs_path_cachep); | |
9375 | kmem_cache_destroy(btrfs_free_space_cachep); | |
39279cc3 CM |
9376 | } |
9377 | ||
9378 | int btrfs_init_cachep(void) | |
9379 | { | |
837e1972 | 9380 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode", |
9601e3f6 | 9381 | sizeof(struct btrfs_inode), 0, |
5d097056 VD |
9382 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT, |
9383 | init_once); | |
39279cc3 CM |
9384 | if (!btrfs_inode_cachep) |
9385 | goto fail; | |
9601e3f6 | 9386 | |
837e1972 | 9387 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", |
9601e3f6 | 9388 | sizeof(struct btrfs_trans_handle), 0, |
fba4b697 | 9389 | SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); |
39279cc3 CM |
9390 | if (!btrfs_trans_handle_cachep) |
9391 | goto fail; | |
9601e3f6 | 9392 | |
837e1972 | 9393 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", |
9601e3f6 | 9394 | sizeof(struct btrfs_transaction), 0, |
fba4b697 | 9395 | SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); |
39279cc3 CM |
9396 | if (!btrfs_transaction_cachep) |
9397 | goto fail; | |
9601e3f6 | 9398 | |
837e1972 | 9399 | btrfs_path_cachep = kmem_cache_create("btrfs_path", |
9601e3f6 | 9400 | sizeof(struct btrfs_path), 0, |
fba4b697 | 9401 | SLAB_MEM_SPREAD, NULL); |
39279cc3 CM |
9402 | if (!btrfs_path_cachep) |
9403 | goto fail; | |
9601e3f6 | 9404 | |
837e1972 | 9405 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", |
dc89e982 | 9406 | sizeof(struct btrfs_free_space), 0, |
fba4b697 | 9407 | SLAB_MEM_SPREAD, NULL); |
dc89e982 JB |
9408 | if (!btrfs_free_space_cachep) |
9409 | goto fail; | |
9410 | ||
39279cc3 CM |
9411 | return 0; |
9412 | fail: | |
9413 | btrfs_destroy_cachep(); | |
9414 | return -ENOMEM; | |
9415 | } | |
9416 | ||
9417 | static int btrfs_getattr(struct vfsmount *mnt, | |
9418 | struct dentry *dentry, struct kstat *stat) | |
9419 | { | |
df0af1a5 | 9420 | u64 delalloc_bytes; |
2b0143b5 | 9421 | struct inode *inode = d_inode(dentry); |
fadc0d8b DS |
9422 | u32 blocksize = inode->i_sb->s_blocksize; |
9423 | ||
39279cc3 | 9424 | generic_fillattr(inode, stat); |
0ee5dc67 | 9425 | stat->dev = BTRFS_I(inode)->root->anon_dev; |
df0af1a5 MX |
9426 | |
9427 | spin_lock(&BTRFS_I(inode)->lock); | |
9428 | delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; | |
9429 | spin_unlock(&BTRFS_I(inode)->lock); | |
fadc0d8b | 9430 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + |
df0af1a5 | 9431 | ALIGN(delalloc_bytes, blocksize)) >> 9; |
39279cc3 CM |
9432 | return 0; |
9433 | } | |
9434 | ||
cdd1fedf DF |
9435 | static int btrfs_rename_exchange(struct inode *old_dir, |
9436 | struct dentry *old_dentry, | |
9437 | struct inode *new_dir, | |
9438 | struct dentry *new_dentry) | |
9439 | { | |
0b246afa | 9440 | struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); |
cdd1fedf DF |
9441 | struct btrfs_trans_handle *trans; |
9442 | struct btrfs_root *root = BTRFS_I(old_dir)->root; | |
9443 | struct btrfs_root *dest = BTRFS_I(new_dir)->root; | |
9444 | struct inode *new_inode = new_dentry->d_inode; | |
9445 | struct inode *old_inode = old_dentry->d_inode; | |
c2050a45 | 9446 | struct timespec ctime = current_time(old_inode); |
cdd1fedf | 9447 | struct dentry *parent; |
4a0cc7ca NB |
9448 | u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); |
9449 | u64 new_ino = btrfs_ino(BTRFS_I(new_inode)); | |
cdd1fedf DF |
9450 | u64 old_idx = 0; |
9451 | u64 new_idx = 0; | |
9452 | u64 root_objectid; | |
9453 | int ret; | |
86e8aa0e FM |
9454 | bool root_log_pinned = false; |
9455 | bool dest_log_pinned = false; | |
cdd1fedf DF |
9456 | |
9457 | /* we only allow rename subvolume link between subvolumes */ | |
9458 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) | |
9459 | return -EXDEV; | |
9460 | ||
9461 | /* close the race window with snapshot create/destroy ioctl */ | |
9462 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) | |
0b246afa | 9463 | down_read(&fs_info->subvol_sem); |
cdd1fedf | 9464 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) |
0b246afa | 9465 | down_read(&fs_info->subvol_sem); |
cdd1fedf DF |
9466 | |
9467 | /* | |
9468 | * We want to reserve the absolute worst case amount of items. So if | |
9469 | * both inodes are subvols and we need to unlink them then that would | |
9470 | * require 4 item modifications, but if they are both normal inodes it | |
9471 | * would require 5 item modifications, so we'll assume their normal | |
9472 | * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items | |
9473 | * should cover the worst case number of items we'll modify. | |
9474 | */ | |
9475 | trans = btrfs_start_transaction(root, 12); | |
9476 | if (IS_ERR(trans)) { | |
9477 | ret = PTR_ERR(trans); | |
9478 | goto out_notrans; | |
9479 | } | |
9480 | ||
9481 | /* | |
9482 | * We need to find a free sequence number both in the source and | |
9483 | * in the destination directory for the exchange. | |
9484 | */ | |
877574e2 | 9485 | ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); |
cdd1fedf DF |
9486 | if (ret) |
9487 | goto out_fail; | |
877574e2 | 9488 | ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); |
cdd1fedf DF |
9489 | if (ret) |
9490 | goto out_fail; | |
9491 | ||
9492 | BTRFS_I(old_inode)->dir_index = 0ULL; | |
9493 | BTRFS_I(new_inode)->dir_index = 0ULL; | |
9494 | ||
9495 | /* Reference for the source. */ | |
9496 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { | |
9497 | /* force full log commit if subvolume involved. */ | |
0b246afa | 9498 | btrfs_set_log_full_commit(fs_info, trans); |
cdd1fedf | 9499 | } else { |
376e5a57 FM |
9500 | btrfs_pin_log_trans(root); |
9501 | root_log_pinned = true; | |
cdd1fedf DF |
9502 | ret = btrfs_insert_inode_ref(trans, dest, |
9503 | new_dentry->d_name.name, | |
9504 | new_dentry->d_name.len, | |
9505 | old_ino, | |
f85b7379 DS |
9506 | btrfs_ino(BTRFS_I(new_dir)), |
9507 | old_idx); | |
cdd1fedf DF |
9508 | if (ret) |
9509 | goto out_fail; | |
cdd1fedf DF |
9510 | } |
9511 | ||
9512 | /* And now for the dest. */ | |
9513 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { | |
9514 | /* force full log commit if subvolume involved. */ | |
0b246afa | 9515 | btrfs_set_log_full_commit(fs_info, trans); |
cdd1fedf | 9516 | } else { |
376e5a57 FM |
9517 | btrfs_pin_log_trans(dest); |
9518 | dest_log_pinned = true; | |
cdd1fedf DF |
9519 | ret = btrfs_insert_inode_ref(trans, root, |
9520 | old_dentry->d_name.name, | |
9521 | old_dentry->d_name.len, | |
9522 | new_ino, | |
f85b7379 DS |
9523 | btrfs_ino(BTRFS_I(old_dir)), |
9524 | new_idx); | |
cdd1fedf DF |
9525 | if (ret) |
9526 | goto out_fail; | |
cdd1fedf DF |
9527 | } |
9528 | ||
9529 | /* Update inode version and ctime/mtime. */ | |
9530 | inode_inc_iversion(old_dir); | |
9531 | inode_inc_iversion(new_dir); | |
9532 | inode_inc_iversion(old_inode); | |
9533 | inode_inc_iversion(new_inode); | |
9534 | old_dir->i_ctime = old_dir->i_mtime = ctime; | |
9535 | new_dir->i_ctime = new_dir->i_mtime = ctime; | |
9536 | old_inode->i_ctime = ctime; | |
9537 | new_inode->i_ctime = ctime; | |
9538 | ||
9539 | if (old_dentry->d_parent != new_dentry->d_parent) { | |
f85b7379 DS |
9540 | btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), |
9541 | BTRFS_I(old_inode), 1); | |
9542 | btrfs_record_unlink_dir(trans, BTRFS_I(new_dir), | |
9543 | BTRFS_I(new_inode), 1); | |
cdd1fedf DF |
9544 | } |
9545 | ||
9546 | /* src is a subvolume */ | |
9547 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { | |
9548 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; | |
9549 | ret = btrfs_unlink_subvol(trans, root, old_dir, | |
9550 | root_objectid, | |
9551 | old_dentry->d_name.name, | |
9552 | old_dentry->d_name.len); | |
9553 | } else { /* src is an inode */ | |
4ec5934e NB |
9554 | ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), |
9555 | BTRFS_I(old_dentry->d_inode), | |
cdd1fedf DF |
9556 | old_dentry->d_name.name, |
9557 | old_dentry->d_name.len); | |
9558 | if (!ret) | |
9559 | ret = btrfs_update_inode(trans, root, old_inode); | |
9560 | } | |
9561 | if (ret) { | |
66642832 | 9562 | btrfs_abort_transaction(trans, ret); |
cdd1fedf DF |
9563 | goto out_fail; |
9564 | } | |
9565 | ||
9566 | /* dest is a subvolume */ | |
9567 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { | |
9568 | root_objectid = BTRFS_I(new_inode)->root->root_key.objectid; | |
9569 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | |
9570 | root_objectid, | |
9571 | new_dentry->d_name.name, | |
9572 | new_dentry->d_name.len); | |
9573 | } else { /* dest is an inode */ | |
4ec5934e NB |
9574 | ret = __btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), |
9575 | BTRFS_I(new_dentry->d_inode), | |
cdd1fedf DF |
9576 | new_dentry->d_name.name, |
9577 | new_dentry->d_name.len); | |
9578 | if (!ret) | |
9579 | ret = btrfs_update_inode(trans, dest, new_inode); | |
9580 | } | |
9581 | if (ret) { | |
66642832 | 9582 | btrfs_abort_transaction(trans, ret); |
cdd1fedf DF |
9583 | goto out_fail; |
9584 | } | |
9585 | ||
db0a669f | 9586 | ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), |
cdd1fedf DF |
9587 | new_dentry->d_name.name, |
9588 | new_dentry->d_name.len, 0, old_idx); | |
9589 | if (ret) { | |
66642832 | 9590 | btrfs_abort_transaction(trans, ret); |
cdd1fedf DF |
9591 | goto out_fail; |
9592 | } | |
9593 | ||
db0a669f | 9594 | ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), |
cdd1fedf DF |
9595 | old_dentry->d_name.name, |
9596 | old_dentry->d_name.len, 0, new_idx); | |
9597 | if (ret) { | |
66642832 | 9598 | btrfs_abort_transaction(trans, ret); |
cdd1fedf DF |
9599 | goto out_fail; |
9600 | } | |
9601 | ||
9602 | if (old_inode->i_nlink == 1) | |
9603 | BTRFS_I(old_inode)->dir_index = old_idx; | |
9604 | if (new_inode->i_nlink == 1) | |
9605 | BTRFS_I(new_inode)->dir_index = new_idx; | |
9606 | ||
86e8aa0e | 9607 | if (root_log_pinned) { |
cdd1fedf | 9608 | parent = new_dentry->d_parent; |
f85b7379 DS |
9609 | btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), |
9610 | parent); | |
cdd1fedf | 9611 | btrfs_end_log_trans(root); |
86e8aa0e | 9612 | root_log_pinned = false; |
cdd1fedf | 9613 | } |
86e8aa0e | 9614 | if (dest_log_pinned) { |
cdd1fedf | 9615 | parent = old_dentry->d_parent; |
f85b7379 DS |
9616 | btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), |
9617 | parent); | |
cdd1fedf | 9618 | btrfs_end_log_trans(dest); |
86e8aa0e | 9619 | dest_log_pinned = false; |
cdd1fedf DF |
9620 | } |
9621 | out_fail: | |
86e8aa0e FM |
9622 | /* |
9623 | * If we have pinned a log and an error happened, we unpin tasks | |
9624 | * trying to sync the log and force them to fallback to a transaction | |
9625 | * commit if the log currently contains any of the inodes involved in | |
9626 | * this rename operation (to ensure we do not persist a log with an | |
9627 | * inconsistent state for any of these inodes or leading to any | |
9628 | * inconsistencies when replayed). If the transaction was aborted, the | |
9629 | * abortion reason is propagated to userspace when attempting to commit | |
9630 | * the transaction. If the log does not contain any of these inodes, we | |
9631 | * allow the tasks to sync it. | |
9632 | */ | |
9633 | if (ret && (root_log_pinned || dest_log_pinned)) { | |
0f8939b8 NB |
9634 | if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || |
9635 | btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || | |
9636 | btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || | |
86e8aa0e | 9637 | (new_inode && |
0f8939b8 | 9638 | btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) |
0b246afa | 9639 | btrfs_set_log_full_commit(fs_info, trans); |
86e8aa0e FM |
9640 | |
9641 | if (root_log_pinned) { | |
9642 | btrfs_end_log_trans(root); | |
9643 | root_log_pinned = false; | |
9644 | } | |
9645 | if (dest_log_pinned) { | |
9646 | btrfs_end_log_trans(dest); | |
9647 | dest_log_pinned = false; | |
9648 | } | |
9649 | } | |
3a45bb20 | 9650 | ret = btrfs_end_transaction(trans); |
cdd1fedf DF |
9651 | out_notrans: |
9652 | if (new_ino == BTRFS_FIRST_FREE_OBJECTID) | |
0b246afa | 9653 | up_read(&fs_info->subvol_sem); |
cdd1fedf | 9654 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
0b246afa | 9655 | up_read(&fs_info->subvol_sem); |
cdd1fedf DF |
9656 | |
9657 | return ret; | |
9658 | } | |
9659 | ||
9660 | static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, | |
9661 | struct btrfs_root *root, | |
9662 | struct inode *dir, | |
9663 | struct dentry *dentry) | |
9664 | { | |
9665 | int ret; | |
9666 | struct inode *inode; | |
9667 | u64 objectid; | |
9668 | u64 index; | |
9669 | ||
9670 | ret = btrfs_find_free_ino(root, &objectid); | |
9671 | if (ret) | |
9672 | return ret; | |
9673 | ||
9674 | inode = btrfs_new_inode(trans, root, dir, | |
9675 | dentry->d_name.name, | |
9676 | dentry->d_name.len, | |
4a0cc7ca | 9677 | btrfs_ino(BTRFS_I(dir)), |
cdd1fedf DF |
9678 | objectid, |
9679 | S_IFCHR | WHITEOUT_MODE, | |
9680 | &index); | |
9681 | ||
9682 | if (IS_ERR(inode)) { | |
9683 | ret = PTR_ERR(inode); | |
9684 | return ret; | |
9685 | } | |
9686 | ||
9687 | inode->i_op = &btrfs_special_inode_operations; | |
9688 | init_special_inode(inode, inode->i_mode, | |
9689 | WHITEOUT_DEV); | |
9690 | ||
9691 | ret = btrfs_init_inode_security(trans, inode, dir, | |
9692 | &dentry->d_name); | |
9693 | if (ret) | |
c9901618 | 9694 | goto out; |
cdd1fedf | 9695 | |
cef415af NB |
9696 | ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, |
9697 | BTRFS_I(inode), 0, index); | |
cdd1fedf | 9698 | if (ret) |
c9901618 | 9699 | goto out; |
cdd1fedf DF |
9700 | |
9701 | ret = btrfs_update_inode(trans, root, inode); | |
c9901618 | 9702 | out: |
cdd1fedf | 9703 | unlock_new_inode(inode); |
c9901618 FM |
9704 | if (ret) |
9705 | inode_dec_link_count(inode); | |
cdd1fedf DF |
9706 | iput(inode); |
9707 | ||
c9901618 | 9708 | return ret; |
cdd1fedf DF |
9709 | } |
9710 | ||
d397712b | 9711 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
cdd1fedf DF |
9712 | struct inode *new_dir, struct dentry *new_dentry, |
9713 | unsigned int flags) | |
39279cc3 | 9714 | { |
0b246afa | 9715 | struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); |
39279cc3 | 9716 | struct btrfs_trans_handle *trans; |
5062af35 | 9717 | unsigned int trans_num_items; |
39279cc3 | 9718 | struct btrfs_root *root = BTRFS_I(old_dir)->root; |
4df27c4d | 9719 | struct btrfs_root *dest = BTRFS_I(new_dir)->root; |
2b0143b5 DH |
9720 | struct inode *new_inode = d_inode(new_dentry); |
9721 | struct inode *old_inode = d_inode(old_dentry); | |
00e4e6b3 | 9722 | u64 index = 0; |
4df27c4d | 9723 | u64 root_objectid; |
39279cc3 | 9724 | int ret; |
4a0cc7ca | 9725 | u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); |
3dc9e8f7 | 9726 | bool log_pinned = false; |
39279cc3 | 9727 | |
4a0cc7ca | 9728 | if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
f679a840 YZ |
9729 | return -EPERM; |
9730 | ||
4df27c4d | 9731 | /* we only allow rename subvolume link between subvolumes */ |
33345d01 | 9732 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
3394e160 CM |
9733 | return -EXDEV; |
9734 | ||
33345d01 | 9735 | if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || |
4a0cc7ca | 9736 | (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID)) |
39279cc3 | 9737 | return -ENOTEMPTY; |
5f39d397 | 9738 | |
4df27c4d YZ |
9739 | if (S_ISDIR(old_inode->i_mode) && new_inode && |
9740 | new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) | |
9741 | return -ENOTEMPTY; | |
9c52057c CM |
9742 | |
9743 | ||
9744 | /* check for collisions, even if the name isn't there */ | |
4871c158 | 9745 | ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, |
9c52057c CM |
9746 | new_dentry->d_name.name, |
9747 | new_dentry->d_name.len); | |
9748 | ||
9749 | if (ret) { | |
9750 | if (ret == -EEXIST) { | |
9751 | /* we shouldn't get | |
9752 | * eexist without a new_inode */ | |
fae7f21c | 9753 | if (WARN_ON(!new_inode)) { |
9c52057c CM |
9754 | return ret; |
9755 | } | |
9756 | } else { | |
9757 | /* maybe -EOVERFLOW */ | |
9758 | return ret; | |
9759 | } | |
9760 | } | |
9761 | ret = 0; | |
9762 | ||
5a3f23d5 | 9763 | /* |
8d875f95 CM |
9764 | * we're using rename to replace one file with another. Start IO on it |
9765 | * now so we don't add too much work to the end of the transaction | |
5a3f23d5 | 9766 | */ |
8d875f95 | 9767 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) |
5a3f23d5 CM |
9768 | filemap_flush(old_inode->i_mapping); |
9769 | ||
76dda93c | 9770 | /* close the racy window with snapshot create/destroy ioctl */ |
33345d01 | 9771 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
0b246afa | 9772 | down_read(&fs_info->subvol_sem); |
a22285a6 YZ |
9773 | /* |
9774 | * We want to reserve the absolute worst case amount of items. So if | |
9775 | * both inodes are subvols and we need to unlink them then that would | |
9776 | * require 4 item modifications, but if they are both normal inodes it | |
cdd1fedf | 9777 | * would require 5 item modifications, so we'll assume they are normal |
a22285a6 YZ |
9778 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items |
9779 | * should cover the worst case number of items we'll modify. | |
5062af35 FM |
9780 | * If our rename has the whiteout flag, we need more 5 units for the |
9781 | * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item | |
9782 | * when selinux is enabled). | |
a22285a6 | 9783 | */ |
5062af35 FM |
9784 | trans_num_items = 11; |
9785 | if (flags & RENAME_WHITEOUT) | |
9786 | trans_num_items += 5; | |
9787 | trans = btrfs_start_transaction(root, trans_num_items); | |
b44c59a8 | 9788 | if (IS_ERR(trans)) { |
cdd1fedf DF |
9789 | ret = PTR_ERR(trans); |
9790 | goto out_notrans; | |
9791 | } | |
76dda93c | 9792 | |
4df27c4d YZ |
9793 | if (dest != root) |
9794 | btrfs_record_root_in_trans(trans, dest); | |
5f39d397 | 9795 | |
877574e2 | 9796 | ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); |
a5719521 YZ |
9797 | if (ret) |
9798 | goto out_fail; | |
5a3f23d5 | 9799 | |
67de1176 | 9800 | BTRFS_I(old_inode)->dir_index = 0ULL; |
33345d01 | 9801 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d | 9802 | /* force full log commit if subvolume involved. */ |
0b246afa | 9803 | btrfs_set_log_full_commit(fs_info, trans); |
4df27c4d | 9804 | } else { |
c4aba954 FM |
9805 | btrfs_pin_log_trans(root); |
9806 | log_pinned = true; | |
a5719521 YZ |
9807 | ret = btrfs_insert_inode_ref(trans, dest, |
9808 | new_dentry->d_name.name, | |
9809 | new_dentry->d_name.len, | |
33345d01 | 9810 | old_ino, |
4a0cc7ca | 9811 | btrfs_ino(BTRFS_I(new_dir)), index); |
a5719521 YZ |
9812 | if (ret) |
9813 | goto out_fail; | |
4df27c4d | 9814 | } |
5a3f23d5 | 9815 | |
0c4d2d95 JB |
9816 | inode_inc_iversion(old_dir); |
9817 | inode_inc_iversion(new_dir); | |
9818 | inode_inc_iversion(old_inode); | |
04b285f3 DD |
9819 | old_dir->i_ctime = old_dir->i_mtime = |
9820 | new_dir->i_ctime = new_dir->i_mtime = | |
c2050a45 | 9821 | old_inode->i_ctime = current_time(old_dir); |
5f39d397 | 9822 | |
12fcfd22 | 9823 | if (old_dentry->d_parent != new_dentry->d_parent) |
f85b7379 DS |
9824 | btrfs_record_unlink_dir(trans, BTRFS_I(old_dir), |
9825 | BTRFS_I(old_inode), 1); | |
12fcfd22 | 9826 | |
33345d01 | 9827 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
9828 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; |
9829 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, | |
9830 | old_dentry->d_name.name, | |
9831 | old_dentry->d_name.len); | |
9832 | } else { | |
4ec5934e NB |
9833 | ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir), |
9834 | BTRFS_I(d_inode(old_dentry)), | |
92986796 AV |
9835 | old_dentry->d_name.name, |
9836 | old_dentry->d_name.len); | |
9837 | if (!ret) | |
9838 | ret = btrfs_update_inode(trans, root, old_inode); | |
4df27c4d | 9839 | } |
79787eaa | 9840 | if (ret) { |
66642832 | 9841 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
9842 | goto out_fail; |
9843 | } | |
39279cc3 CM |
9844 | |
9845 | if (new_inode) { | |
0c4d2d95 | 9846 | inode_inc_iversion(new_inode); |
c2050a45 | 9847 | new_inode->i_ctime = current_time(new_inode); |
4a0cc7ca | 9848 | if (unlikely(btrfs_ino(BTRFS_I(new_inode)) == |
4df27c4d YZ |
9849 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
9850 | root_objectid = BTRFS_I(new_inode)->location.objectid; | |
9851 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | |
9852 | root_objectid, | |
9853 | new_dentry->d_name.name, | |
9854 | new_dentry->d_name.len); | |
9855 | BUG_ON(new_inode->i_nlink == 0); | |
9856 | } else { | |
4ec5934e NB |
9857 | ret = btrfs_unlink_inode(trans, dest, BTRFS_I(new_dir), |
9858 | BTRFS_I(d_inode(new_dentry)), | |
4df27c4d YZ |
9859 | new_dentry->d_name.name, |
9860 | new_dentry->d_name.len); | |
9861 | } | |
4ef31a45 | 9862 | if (!ret && new_inode->i_nlink == 0) |
73f2e545 NB |
9863 | ret = btrfs_orphan_add(trans, |
9864 | BTRFS_I(d_inode(new_dentry))); | |
79787eaa | 9865 | if (ret) { |
66642832 | 9866 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
9867 | goto out_fail; |
9868 | } | |
39279cc3 | 9869 | } |
aec7477b | 9870 | |
db0a669f | 9871 | ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), |
4df27c4d | 9872 | new_dentry->d_name.name, |
a5719521 | 9873 | new_dentry->d_name.len, 0, index); |
79787eaa | 9874 | if (ret) { |
66642832 | 9875 | btrfs_abort_transaction(trans, ret); |
79787eaa JM |
9876 | goto out_fail; |
9877 | } | |
39279cc3 | 9878 | |
67de1176 MX |
9879 | if (old_inode->i_nlink == 1) |
9880 | BTRFS_I(old_inode)->dir_index = index; | |
9881 | ||
3dc9e8f7 | 9882 | if (log_pinned) { |
10d9f309 | 9883 | struct dentry *parent = new_dentry->d_parent; |
3dc9e8f7 | 9884 | |
f85b7379 DS |
9885 | btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), |
9886 | parent); | |
4df27c4d | 9887 | btrfs_end_log_trans(root); |
3dc9e8f7 | 9888 | log_pinned = false; |
4df27c4d | 9889 | } |
cdd1fedf DF |
9890 | |
9891 | if (flags & RENAME_WHITEOUT) { | |
9892 | ret = btrfs_whiteout_for_rename(trans, root, old_dir, | |
9893 | old_dentry); | |
9894 | ||
9895 | if (ret) { | |
66642832 | 9896 | btrfs_abort_transaction(trans, ret); |
cdd1fedf DF |
9897 | goto out_fail; |
9898 | } | |
4df27c4d | 9899 | } |
39279cc3 | 9900 | out_fail: |
3dc9e8f7 FM |
9901 | /* |
9902 | * If we have pinned the log and an error happened, we unpin tasks | |
9903 | * trying to sync the log and force them to fallback to a transaction | |
9904 | * commit if the log currently contains any of the inodes involved in | |
9905 | * this rename operation (to ensure we do not persist a log with an | |
9906 | * inconsistent state for any of these inodes or leading to any | |
9907 | * inconsistencies when replayed). If the transaction was aborted, the | |
9908 | * abortion reason is propagated to userspace when attempting to commit | |
9909 | * the transaction. If the log does not contain any of these inodes, we | |
9910 | * allow the tasks to sync it. | |
9911 | */ | |
9912 | if (ret && log_pinned) { | |
0f8939b8 NB |
9913 | if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || |
9914 | btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || | |
9915 | btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || | |
3dc9e8f7 | 9916 | (new_inode && |
0f8939b8 | 9917 | btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) |
0b246afa | 9918 | btrfs_set_log_full_commit(fs_info, trans); |
3dc9e8f7 FM |
9919 | |
9920 | btrfs_end_log_trans(root); | |
9921 | log_pinned = false; | |
9922 | } | |
3a45bb20 | 9923 | btrfs_end_transaction(trans); |
b44c59a8 | 9924 | out_notrans: |
33345d01 | 9925 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
0b246afa | 9926 | up_read(&fs_info->subvol_sem); |
9ed74f2d | 9927 | |
39279cc3 CM |
9928 | return ret; |
9929 | } | |
9930 | ||
80ace85c MS |
9931 | static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, |
9932 | struct inode *new_dir, struct dentry *new_dentry, | |
9933 | unsigned int flags) | |
9934 | { | |
cdd1fedf | 9935 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) |
80ace85c MS |
9936 | return -EINVAL; |
9937 | ||
cdd1fedf DF |
9938 | if (flags & RENAME_EXCHANGE) |
9939 | return btrfs_rename_exchange(old_dir, old_dentry, new_dir, | |
9940 | new_dentry); | |
9941 | ||
9942 | return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); | |
80ace85c MS |
9943 | } |
9944 | ||
8ccf6f19 MX |
9945 | static void btrfs_run_delalloc_work(struct btrfs_work *work) |
9946 | { | |
9947 | struct btrfs_delalloc_work *delalloc_work; | |
9f23e289 | 9948 | struct inode *inode; |
8ccf6f19 MX |
9949 | |
9950 | delalloc_work = container_of(work, struct btrfs_delalloc_work, | |
9951 | work); | |
9f23e289 | 9952 | inode = delalloc_work->inode; |
30424601 DS |
9953 | filemap_flush(inode->i_mapping); |
9954 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
9955 | &BTRFS_I(inode)->runtime_flags)) | |
9f23e289 | 9956 | filemap_flush(inode->i_mapping); |
8ccf6f19 MX |
9957 | |
9958 | if (delalloc_work->delay_iput) | |
9f23e289 | 9959 | btrfs_add_delayed_iput(inode); |
8ccf6f19 | 9960 | else |
9f23e289 | 9961 | iput(inode); |
8ccf6f19 MX |
9962 | complete(&delalloc_work->completion); |
9963 | } | |
9964 | ||
9965 | struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, | |
651d494a | 9966 | int delay_iput) |
8ccf6f19 MX |
9967 | { |
9968 | struct btrfs_delalloc_work *work; | |
9969 | ||
100d5702 | 9970 | work = kmalloc(sizeof(*work), GFP_NOFS); |
8ccf6f19 MX |
9971 | if (!work) |
9972 | return NULL; | |
9973 | ||
9974 | init_completion(&work->completion); | |
9975 | INIT_LIST_HEAD(&work->list); | |
9976 | work->inode = inode; | |
8ccf6f19 | 9977 | work->delay_iput = delay_iput; |
9e0af237 LB |
9978 | WARN_ON_ONCE(!inode); |
9979 | btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, | |
9980 | btrfs_run_delalloc_work, NULL, NULL); | |
8ccf6f19 MX |
9981 | |
9982 | return work; | |
9983 | } | |
9984 | ||
9985 | void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) | |
9986 | { | |
9987 | wait_for_completion(&work->completion); | |
100d5702 | 9988 | kfree(work); |
8ccf6f19 MX |
9989 | } |
9990 | ||
d352ac68 CM |
9991 | /* |
9992 | * some fairly slow code that needs optimization. This walks the list | |
9993 | * of all the inodes with pending delalloc and forces them to disk. | |
9994 | */ | |
6c255e67 MX |
9995 | static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput, |
9996 | int nr) | |
ea8c2819 | 9997 | { |
ea8c2819 | 9998 | struct btrfs_inode *binode; |
5b21f2ed | 9999 | struct inode *inode; |
8ccf6f19 MX |
10000 | struct btrfs_delalloc_work *work, *next; |
10001 | struct list_head works; | |
1eafa6c7 | 10002 | struct list_head splice; |
8ccf6f19 | 10003 | int ret = 0; |
ea8c2819 | 10004 | |
8ccf6f19 | 10005 | INIT_LIST_HEAD(&works); |
1eafa6c7 | 10006 | INIT_LIST_HEAD(&splice); |
63607cc8 | 10007 | |
573bfb72 | 10008 | mutex_lock(&root->delalloc_mutex); |
eb73c1b7 MX |
10009 | spin_lock(&root->delalloc_lock); |
10010 | list_splice_init(&root->delalloc_inodes, &splice); | |
1eafa6c7 MX |
10011 | while (!list_empty(&splice)) { |
10012 | binode = list_entry(splice.next, struct btrfs_inode, | |
ea8c2819 | 10013 | delalloc_inodes); |
1eafa6c7 | 10014 | |
eb73c1b7 MX |
10015 | list_move_tail(&binode->delalloc_inodes, |
10016 | &root->delalloc_inodes); | |
5b21f2ed | 10017 | inode = igrab(&binode->vfs_inode); |
df0af1a5 | 10018 | if (!inode) { |
eb73c1b7 | 10019 | cond_resched_lock(&root->delalloc_lock); |
1eafa6c7 | 10020 | continue; |
df0af1a5 | 10021 | } |
eb73c1b7 | 10022 | spin_unlock(&root->delalloc_lock); |
1eafa6c7 | 10023 | |
651d494a | 10024 | work = btrfs_alloc_delalloc_work(inode, delay_iput); |
5d99a998 | 10025 | if (!work) { |
f4ab9ea7 JB |
10026 | if (delay_iput) |
10027 | btrfs_add_delayed_iput(inode); | |
10028 | else | |
10029 | iput(inode); | |
1eafa6c7 | 10030 | ret = -ENOMEM; |
a1ecaabb | 10031 | goto out; |
5b21f2ed | 10032 | } |
1eafa6c7 | 10033 | list_add_tail(&work->list, &works); |
a44903ab QW |
10034 | btrfs_queue_work(root->fs_info->flush_workers, |
10035 | &work->work); | |
6c255e67 MX |
10036 | ret++; |
10037 | if (nr != -1 && ret >= nr) | |
a1ecaabb | 10038 | goto out; |
5b21f2ed | 10039 | cond_resched(); |
eb73c1b7 | 10040 | spin_lock(&root->delalloc_lock); |
ea8c2819 | 10041 | } |
eb73c1b7 | 10042 | spin_unlock(&root->delalloc_lock); |
8c8bee1d | 10043 | |
a1ecaabb | 10044 | out: |
eb73c1b7 MX |
10045 | list_for_each_entry_safe(work, next, &works, list) { |
10046 | list_del_init(&work->list); | |
10047 | btrfs_wait_and_free_delalloc_work(work); | |
10048 | } | |
10049 | ||
10050 | if (!list_empty_careful(&splice)) { | |
10051 | spin_lock(&root->delalloc_lock); | |
10052 | list_splice_tail(&splice, &root->delalloc_inodes); | |
10053 | spin_unlock(&root->delalloc_lock); | |
10054 | } | |
573bfb72 | 10055 | mutex_unlock(&root->delalloc_mutex); |
eb73c1b7 MX |
10056 | return ret; |
10057 | } | |
1eafa6c7 | 10058 | |
eb73c1b7 MX |
10059 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) |
10060 | { | |
0b246afa | 10061 | struct btrfs_fs_info *fs_info = root->fs_info; |
eb73c1b7 | 10062 | int ret; |
1eafa6c7 | 10063 | |
0b246afa | 10064 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
eb73c1b7 MX |
10065 | return -EROFS; |
10066 | ||
6c255e67 MX |
10067 | ret = __start_delalloc_inodes(root, delay_iput, -1); |
10068 | if (ret > 0) | |
10069 | ret = 0; | |
eb73c1b7 MX |
10070 | /* |
10071 | * the filemap_flush will queue IO into the worker threads, but | |
8c8bee1d CM |
10072 | * we have to make sure the IO is actually started and that |
10073 | * ordered extents get created before we return | |
10074 | */ | |
0b246afa JM |
10075 | atomic_inc(&fs_info->async_submit_draining); |
10076 | while (atomic_read(&fs_info->nr_async_submits) || | |
10077 | atomic_read(&fs_info->async_delalloc_pages)) { | |
10078 | wait_event(fs_info->async_submit_wait, | |
10079 | (atomic_read(&fs_info->nr_async_submits) == 0 && | |
10080 | atomic_read(&fs_info->async_delalloc_pages) == 0)); | |
10081 | } | |
10082 | atomic_dec(&fs_info->async_submit_draining); | |
eb73c1b7 MX |
10083 | return ret; |
10084 | } | |
10085 | ||
6c255e67 MX |
10086 | int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, |
10087 | int nr) | |
eb73c1b7 MX |
10088 | { |
10089 | struct btrfs_root *root; | |
10090 | struct list_head splice; | |
10091 | int ret; | |
10092 | ||
2c21b4d7 | 10093 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) |
eb73c1b7 MX |
10094 | return -EROFS; |
10095 | ||
10096 | INIT_LIST_HEAD(&splice); | |
10097 | ||
573bfb72 | 10098 | mutex_lock(&fs_info->delalloc_root_mutex); |
eb73c1b7 MX |
10099 | spin_lock(&fs_info->delalloc_root_lock); |
10100 | list_splice_init(&fs_info->delalloc_roots, &splice); | |
6c255e67 | 10101 | while (!list_empty(&splice) && nr) { |
eb73c1b7 MX |
10102 | root = list_first_entry(&splice, struct btrfs_root, |
10103 | delalloc_root); | |
10104 | root = btrfs_grab_fs_root(root); | |
10105 | BUG_ON(!root); | |
10106 | list_move_tail(&root->delalloc_root, | |
10107 | &fs_info->delalloc_roots); | |
10108 | spin_unlock(&fs_info->delalloc_root_lock); | |
10109 | ||
6c255e67 | 10110 | ret = __start_delalloc_inodes(root, delay_iput, nr); |
eb73c1b7 | 10111 | btrfs_put_fs_root(root); |
6c255e67 | 10112 | if (ret < 0) |
eb73c1b7 MX |
10113 | goto out; |
10114 | ||
6c255e67 MX |
10115 | if (nr != -1) { |
10116 | nr -= ret; | |
10117 | WARN_ON(nr < 0); | |
10118 | } | |
eb73c1b7 | 10119 | spin_lock(&fs_info->delalloc_root_lock); |
8ccf6f19 | 10120 | } |
eb73c1b7 | 10121 | spin_unlock(&fs_info->delalloc_root_lock); |
1eafa6c7 | 10122 | |
6c255e67 | 10123 | ret = 0; |
eb73c1b7 MX |
10124 | atomic_inc(&fs_info->async_submit_draining); |
10125 | while (atomic_read(&fs_info->nr_async_submits) || | |
10126 | atomic_read(&fs_info->async_delalloc_pages)) { | |
10127 | wait_event(fs_info->async_submit_wait, | |
10128 | (atomic_read(&fs_info->nr_async_submits) == 0 && | |
10129 | atomic_read(&fs_info->async_delalloc_pages) == 0)); | |
10130 | } | |
10131 | atomic_dec(&fs_info->async_submit_draining); | |
eb73c1b7 | 10132 | out: |
1eafa6c7 | 10133 | if (!list_empty_careful(&splice)) { |
eb73c1b7 MX |
10134 | spin_lock(&fs_info->delalloc_root_lock); |
10135 | list_splice_tail(&splice, &fs_info->delalloc_roots); | |
10136 | spin_unlock(&fs_info->delalloc_root_lock); | |
1eafa6c7 | 10137 | } |
573bfb72 | 10138 | mutex_unlock(&fs_info->delalloc_root_mutex); |
8ccf6f19 | 10139 | return ret; |
ea8c2819 CM |
10140 | } |
10141 | ||
39279cc3 CM |
10142 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, |
10143 | const char *symname) | |
10144 | { | |
0b246afa | 10145 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
39279cc3 CM |
10146 | struct btrfs_trans_handle *trans; |
10147 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
10148 | struct btrfs_path *path; | |
10149 | struct btrfs_key key; | |
1832a6d5 | 10150 | struct inode *inode = NULL; |
39279cc3 CM |
10151 | int err; |
10152 | int drop_inode = 0; | |
10153 | u64 objectid; | |
67871254 | 10154 | u64 index = 0; |
39279cc3 CM |
10155 | int name_len; |
10156 | int datasize; | |
5f39d397 | 10157 | unsigned long ptr; |
39279cc3 | 10158 | struct btrfs_file_extent_item *ei; |
5f39d397 | 10159 | struct extent_buffer *leaf; |
39279cc3 | 10160 | |
f06becc4 | 10161 | name_len = strlen(symname); |
0b246afa | 10162 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info)) |
39279cc3 | 10163 | return -ENAMETOOLONG; |
1832a6d5 | 10164 | |
9ed74f2d JB |
10165 | /* |
10166 | * 2 items for inode item and ref | |
10167 | * 2 items for dir items | |
9269d12b FM |
10168 | * 1 item for updating parent inode item |
10169 | * 1 item for the inline extent item | |
9ed74f2d JB |
10170 | * 1 item for xattr if selinux is on |
10171 | */ | |
9269d12b | 10172 | trans = btrfs_start_transaction(root, 7); |
a22285a6 YZ |
10173 | if (IS_ERR(trans)) |
10174 | return PTR_ERR(trans); | |
1832a6d5 | 10175 | |
581bb050 LZ |
10176 | err = btrfs_find_free_ino(root, &objectid); |
10177 | if (err) | |
10178 | goto out_unlock; | |
10179 | ||
aec7477b | 10180 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
f85b7379 DS |
10181 | dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), |
10182 | objectid, S_IFLNK|S_IRWXUGO, &index); | |
7cf96da3 TI |
10183 | if (IS_ERR(inode)) { |
10184 | err = PTR_ERR(inode); | |
39279cc3 | 10185 | goto out_unlock; |
7cf96da3 | 10186 | } |
39279cc3 | 10187 | |
ad19db71 CS |
10188 | /* |
10189 | * If the active LSM wants to access the inode during | |
10190 | * d_instantiate it needs these. Smack checks to see | |
10191 | * if the filesystem supports xattrs by looking at the | |
10192 | * ops vector. | |
10193 | */ | |
10194 | inode->i_fop = &btrfs_file_operations; | |
10195 | inode->i_op = &btrfs_file_inode_operations; | |
b0d5d10f | 10196 | inode->i_mapping->a_ops = &btrfs_aops; |
b0d5d10f CM |
10197 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
10198 | ||
10199 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | |
10200 | if (err) | |
10201 | goto out_unlock_inode; | |
ad19db71 | 10202 | |
39279cc3 | 10203 | path = btrfs_alloc_path(); |
d8926bb3 MF |
10204 | if (!path) { |
10205 | err = -ENOMEM; | |
b0d5d10f | 10206 | goto out_unlock_inode; |
d8926bb3 | 10207 | } |
4a0cc7ca | 10208 | key.objectid = btrfs_ino(BTRFS_I(inode)); |
39279cc3 | 10209 | key.offset = 0; |
962a298f | 10210 | key.type = BTRFS_EXTENT_DATA_KEY; |
39279cc3 CM |
10211 | datasize = btrfs_file_extent_calc_inline_size(name_len); |
10212 | err = btrfs_insert_empty_item(trans, root, path, &key, | |
10213 | datasize); | |
54aa1f4d | 10214 | if (err) { |
b0839166 | 10215 | btrfs_free_path(path); |
b0d5d10f | 10216 | goto out_unlock_inode; |
54aa1f4d | 10217 | } |
5f39d397 CM |
10218 | leaf = path->nodes[0]; |
10219 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
10220 | struct btrfs_file_extent_item); | |
10221 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
10222 | btrfs_set_file_extent_type(leaf, ei, | |
39279cc3 | 10223 | BTRFS_FILE_EXTENT_INLINE); |
c8b97818 CM |
10224 | btrfs_set_file_extent_encryption(leaf, ei, 0); |
10225 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
10226 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
10227 | btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); | |
10228 | ||
39279cc3 | 10229 | ptr = btrfs_file_extent_inline_start(ei); |
5f39d397 CM |
10230 | write_extent_buffer(leaf, symname, ptr, name_len); |
10231 | btrfs_mark_buffer_dirty(leaf); | |
39279cc3 | 10232 | btrfs_free_path(path); |
5f39d397 | 10233 | |
39279cc3 | 10234 | inode->i_op = &btrfs_symlink_inode_operations; |
21fc61c7 | 10235 | inode_nohighmem(inode); |
39279cc3 | 10236 | inode->i_mapping->a_ops = &btrfs_symlink_aops; |
d899e052 | 10237 | inode_set_bytes(inode, name_len); |
6ef06d27 | 10238 | btrfs_i_size_write(BTRFS_I(inode), name_len); |
54aa1f4d | 10239 | err = btrfs_update_inode(trans, root, inode); |
d50866d0 FM |
10240 | /* |
10241 | * Last step, add directory indexes for our symlink inode. This is the | |
10242 | * last step to avoid extra cleanup of these indexes if an error happens | |
10243 | * elsewhere above. | |
10244 | */ | |
10245 | if (!err) | |
cef415af NB |
10246 | err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, |
10247 | BTRFS_I(inode), 0, index); | |
b0d5d10f | 10248 | if (err) { |
54aa1f4d | 10249 | drop_inode = 1; |
b0d5d10f CM |
10250 | goto out_unlock_inode; |
10251 | } | |
10252 | ||
10253 | unlock_new_inode(inode); | |
10254 | d_instantiate(dentry, inode); | |
39279cc3 CM |
10255 | |
10256 | out_unlock: | |
3a45bb20 | 10257 | btrfs_end_transaction(trans); |
39279cc3 CM |
10258 | if (drop_inode) { |
10259 | inode_dec_link_count(inode); | |
10260 | iput(inode); | |
10261 | } | |
2ff7e61e | 10262 | btrfs_btree_balance_dirty(fs_info); |
39279cc3 | 10263 | return err; |
b0d5d10f CM |
10264 | |
10265 | out_unlock_inode: | |
10266 | drop_inode = 1; | |
10267 | unlock_new_inode(inode); | |
10268 | goto out_unlock; | |
39279cc3 | 10269 | } |
16432985 | 10270 | |
0af3d00b JB |
10271 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, |
10272 | u64 start, u64 num_bytes, u64 min_size, | |
10273 | loff_t actual_len, u64 *alloc_hint, | |
10274 | struct btrfs_trans_handle *trans) | |
d899e052 | 10275 | { |
0b246afa | 10276 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
5dc562c5 JB |
10277 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
10278 | struct extent_map *em; | |
d899e052 YZ |
10279 | struct btrfs_root *root = BTRFS_I(inode)->root; |
10280 | struct btrfs_key ins; | |
d899e052 | 10281 | u64 cur_offset = start; |
55a61d1d | 10282 | u64 i_size; |
154ea289 | 10283 | u64 cur_bytes; |
0b670dc4 | 10284 | u64 last_alloc = (u64)-1; |
d899e052 | 10285 | int ret = 0; |
0af3d00b | 10286 | bool own_trans = true; |
18513091 | 10287 | u64 end = start + num_bytes - 1; |
d899e052 | 10288 | |
0af3d00b JB |
10289 | if (trans) |
10290 | own_trans = false; | |
d899e052 | 10291 | while (num_bytes > 0) { |
0af3d00b JB |
10292 | if (own_trans) { |
10293 | trans = btrfs_start_transaction(root, 3); | |
10294 | if (IS_ERR(trans)) { | |
10295 | ret = PTR_ERR(trans); | |
10296 | break; | |
10297 | } | |
5a303d5d YZ |
10298 | } |
10299 | ||
ee22184b | 10300 | cur_bytes = min_t(u64, num_bytes, SZ_256M); |
154ea289 | 10301 | cur_bytes = max(cur_bytes, min_size); |
0b670dc4 JB |
10302 | /* |
10303 | * If we are severely fragmented we could end up with really | |
10304 | * small allocations, so if the allocator is returning small | |
10305 | * chunks lets make its job easier by only searching for those | |
10306 | * sized chunks. | |
10307 | */ | |
10308 | cur_bytes = min(cur_bytes, last_alloc); | |
18513091 WX |
10309 | ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes, |
10310 | min_size, 0, *alloc_hint, &ins, 1, 0); | |
5a303d5d | 10311 | if (ret) { |
0af3d00b | 10312 | if (own_trans) |
3a45bb20 | 10313 | btrfs_end_transaction(trans); |
a22285a6 | 10314 | break; |
d899e052 | 10315 | } |
0b246afa | 10316 | btrfs_dec_block_group_reservations(fs_info, ins.objectid); |
5a303d5d | 10317 | |
0b670dc4 | 10318 | last_alloc = ins.offset; |
d899e052 YZ |
10319 | ret = insert_reserved_file_extent(trans, inode, |
10320 | cur_offset, ins.objectid, | |
10321 | ins.offset, ins.offset, | |
920bbbfb | 10322 | ins.offset, 0, 0, 0, |
d899e052 | 10323 | BTRFS_FILE_EXTENT_PREALLOC); |
79787eaa | 10324 | if (ret) { |
2ff7e61e | 10325 | btrfs_free_reserved_extent(fs_info, ins.objectid, |
e570fd27 | 10326 | ins.offset, 0); |
66642832 | 10327 | btrfs_abort_transaction(trans, ret); |
79787eaa | 10328 | if (own_trans) |
3a45bb20 | 10329 | btrfs_end_transaction(trans); |
79787eaa JM |
10330 | break; |
10331 | } | |
31193213 | 10332 | |
dcdbc059 | 10333 | btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, |
a1ed835e | 10334 | cur_offset + ins.offset -1, 0); |
5a303d5d | 10335 | |
5dc562c5 JB |
10336 | em = alloc_extent_map(); |
10337 | if (!em) { | |
10338 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
10339 | &BTRFS_I(inode)->runtime_flags); | |
10340 | goto next; | |
10341 | } | |
10342 | ||
10343 | em->start = cur_offset; | |
10344 | em->orig_start = cur_offset; | |
10345 | em->len = ins.offset; | |
10346 | em->block_start = ins.objectid; | |
10347 | em->block_len = ins.offset; | |
b4939680 | 10348 | em->orig_block_len = ins.offset; |
cc95bef6 | 10349 | em->ram_bytes = ins.offset; |
0b246afa | 10350 | em->bdev = fs_info->fs_devices->latest_bdev; |
5dc562c5 JB |
10351 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); |
10352 | em->generation = trans->transid; | |
10353 | ||
10354 | while (1) { | |
10355 | write_lock(&em_tree->lock); | |
09a2a8f9 | 10356 | ret = add_extent_mapping(em_tree, em, 1); |
5dc562c5 JB |
10357 | write_unlock(&em_tree->lock); |
10358 | if (ret != -EEXIST) | |
10359 | break; | |
dcdbc059 | 10360 | btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, |
5dc562c5 JB |
10361 | cur_offset + ins.offset - 1, |
10362 | 0); | |
10363 | } | |
10364 | free_extent_map(em); | |
10365 | next: | |
d899e052 YZ |
10366 | num_bytes -= ins.offset; |
10367 | cur_offset += ins.offset; | |
efa56464 | 10368 | *alloc_hint = ins.objectid + ins.offset; |
5a303d5d | 10369 | |
0c4d2d95 | 10370 | inode_inc_iversion(inode); |
c2050a45 | 10371 | inode->i_ctime = current_time(inode); |
6cbff00f | 10372 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; |
d899e052 | 10373 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
efa56464 YZ |
10374 | (actual_len > inode->i_size) && |
10375 | (cur_offset > inode->i_size)) { | |
d1ea6a61 | 10376 | if (cur_offset > actual_len) |
55a61d1d | 10377 | i_size = actual_len; |
d1ea6a61 | 10378 | else |
55a61d1d JB |
10379 | i_size = cur_offset; |
10380 | i_size_write(inode, i_size); | |
10381 | btrfs_ordered_update_i_size(inode, i_size, NULL); | |
5a303d5d YZ |
10382 | } |
10383 | ||
d899e052 | 10384 | ret = btrfs_update_inode(trans, root, inode); |
79787eaa JM |
10385 | |
10386 | if (ret) { | |
66642832 | 10387 | btrfs_abort_transaction(trans, ret); |
79787eaa | 10388 | if (own_trans) |
3a45bb20 | 10389 | btrfs_end_transaction(trans); |
79787eaa JM |
10390 | break; |
10391 | } | |
d899e052 | 10392 | |
0af3d00b | 10393 | if (own_trans) |
3a45bb20 | 10394 | btrfs_end_transaction(trans); |
5a303d5d | 10395 | } |
18513091 WX |
10396 | if (cur_offset < end) |
10397 | btrfs_free_reserved_data_space(inode, cur_offset, | |
10398 | end - cur_offset + 1); | |
d899e052 YZ |
10399 | return ret; |
10400 | } | |
10401 | ||
0af3d00b JB |
10402 | int btrfs_prealloc_file_range(struct inode *inode, int mode, |
10403 | u64 start, u64 num_bytes, u64 min_size, | |
10404 | loff_t actual_len, u64 *alloc_hint) | |
10405 | { | |
10406 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
10407 | min_size, actual_len, alloc_hint, | |
10408 | NULL); | |
10409 | } | |
10410 | ||
10411 | int btrfs_prealloc_file_range_trans(struct inode *inode, | |
10412 | struct btrfs_trans_handle *trans, int mode, | |
10413 | u64 start, u64 num_bytes, u64 min_size, | |
10414 | loff_t actual_len, u64 *alloc_hint) | |
10415 | { | |
10416 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
10417 | min_size, actual_len, alloc_hint, trans); | |
10418 | } | |
10419 | ||
e6dcd2dc CM |
10420 | static int btrfs_set_page_dirty(struct page *page) |
10421 | { | |
e6dcd2dc CM |
10422 | return __set_page_dirty_nobuffers(page); |
10423 | } | |
10424 | ||
10556cb2 | 10425 | static int btrfs_permission(struct inode *inode, int mask) |
fdebe2bd | 10426 | { |
b83cc969 | 10427 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cb6db4e5 | 10428 | umode_t mode = inode->i_mode; |
b83cc969 | 10429 | |
cb6db4e5 JM |
10430 | if (mask & MAY_WRITE && |
10431 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { | |
10432 | if (btrfs_root_readonly(root)) | |
10433 | return -EROFS; | |
10434 | if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) | |
10435 | return -EACCES; | |
10436 | } | |
2830ba7f | 10437 | return generic_permission(inode, mask); |
fdebe2bd | 10438 | } |
39279cc3 | 10439 | |
ef3b9af5 FM |
10440 | static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) |
10441 | { | |
2ff7e61e | 10442 | struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); |
ef3b9af5 FM |
10443 | struct btrfs_trans_handle *trans; |
10444 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
10445 | struct inode *inode = NULL; | |
10446 | u64 objectid; | |
10447 | u64 index; | |
10448 | int ret = 0; | |
10449 | ||
10450 | /* | |
10451 | * 5 units required for adding orphan entry | |
10452 | */ | |
10453 | trans = btrfs_start_transaction(root, 5); | |
10454 | if (IS_ERR(trans)) | |
10455 | return PTR_ERR(trans); | |
10456 | ||
10457 | ret = btrfs_find_free_ino(root, &objectid); | |
10458 | if (ret) | |
10459 | goto out; | |
10460 | ||
10461 | inode = btrfs_new_inode(trans, root, dir, NULL, 0, | |
f85b7379 | 10462 | btrfs_ino(BTRFS_I(dir)), objectid, mode, &index); |
ef3b9af5 FM |
10463 | if (IS_ERR(inode)) { |
10464 | ret = PTR_ERR(inode); | |
10465 | inode = NULL; | |
10466 | goto out; | |
10467 | } | |
10468 | ||
ef3b9af5 FM |
10469 | inode->i_fop = &btrfs_file_operations; |
10470 | inode->i_op = &btrfs_file_inode_operations; | |
10471 | ||
10472 | inode->i_mapping->a_ops = &btrfs_aops; | |
ef3b9af5 FM |
10473 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
10474 | ||
b0d5d10f CM |
10475 | ret = btrfs_init_inode_security(trans, inode, dir, NULL); |
10476 | if (ret) | |
10477 | goto out_inode; | |
10478 | ||
10479 | ret = btrfs_update_inode(trans, root, inode); | |
10480 | if (ret) | |
10481 | goto out_inode; | |
73f2e545 | 10482 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); |
ef3b9af5 | 10483 | if (ret) |
b0d5d10f | 10484 | goto out_inode; |
ef3b9af5 | 10485 | |
5762b5c9 FM |
10486 | /* |
10487 | * We set number of links to 0 in btrfs_new_inode(), and here we set | |
10488 | * it to 1 because d_tmpfile() will issue a warning if the count is 0, | |
10489 | * through: | |
10490 | * | |
10491 | * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() | |
10492 | */ | |
10493 | set_nlink(inode, 1); | |
b0d5d10f | 10494 | unlock_new_inode(inode); |
ef3b9af5 FM |
10495 | d_tmpfile(dentry, inode); |
10496 | mark_inode_dirty(inode); | |
10497 | ||
10498 | out: | |
3a45bb20 | 10499 | btrfs_end_transaction(trans); |
ef3b9af5 FM |
10500 | if (ret) |
10501 | iput(inode); | |
2ff7e61e JM |
10502 | btrfs_balance_delayed_items(fs_info); |
10503 | btrfs_btree_balance_dirty(fs_info); | |
ef3b9af5 | 10504 | return ret; |
b0d5d10f CM |
10505 | |
10506 | out_inode: | |
10507 | unlock_new_inode(inode); | |
10508 | goto out; | |
10509 | ||
ef3b9af5 FM |
10510 | } |
10511 | ||
6e1d5dcc | 10512 | static const struct inode_operations btrfs_dir_inode_operations = { |
3394e160 | 10513 | .getattr = btrfs_getattr, |
39279cc3 CM |
10514 | .lookup = btrfs_lookup, |
10515 | .create = btrfs_create, | |
10516 | .unlink = btrfs_unlink, | |
10517 | .link = btrfs_link, | |
10518 | .mkdir = btrfs_mkdir, | |
10519 | .rmdir = btrfs_rmdir, | |
2773bf00 | 10520 | .rename = btrfs_rename2, |
39279cc3 CM |
10521 | .symlink = btrfs_symlink, |
10522 | .setattr = btrfs_setattr, | |
618e21d5 | 10523 | .mknod = btrfs_mknod, |
5103e947 | 10524 | .listxattr = btrfs_listxattr, |
fdebe2bd | 10525 | .permission = btrfs_permission, |
4e34e719 | 10526 | .get_acl = btrfs_get_acl, |
996a710d | 10527 | .set_acl = btrfs_set_acl, |
93fd63c2 | 10528 | .update_time = btrfs_update_time, |
ef3b9af5 | 10529 | .tmpfile = btrfs_tmpfile, |
39279cc3 | 10530 | }; |
6e1d5dcc | 10531 | static const struct inode_operations btrfs_dir_ro_inode_operations = { |
39279cc3 | 10532 | .lookup = btrfs_lookup, |
fdebe2bd | 10533 | .permission = btrfs_permission, |
93fd63c2 | 10534 | .update_time = btrfs_update_time, |
39279cc3 | 10535 | }; |
76dda93c | 10536 | |
828c0950 | 10537 | static const struct file_operations btrfs_dir_file_operations = { |
39279cc3 CM |
10538 | .llseek = generic_file_llseek, |
10539 | .read = generic_read_dir, | |
02dbfc99 | 10540 | .iterate_shared = btrfs_real_readdir, |
34287aa3 | 10541 | .unlocked_ioctl = btrfs_ioctl, |
39279cc3 | 10542 | #ifdef CONFIG_COMPAT |
4c63c245 | 10543 | .compat_ioctl = btrfs_compat_ioctl, |
39279cc3 | 10544 | #endif |
6bf13c0c | 10545 | .release = btrfs_release_file, |
e02119d5 | 10546 | .fsync = btrfs_sync_file, |
39279cc3 CM |
10547 | }; |
10548 | ||
20e5506b | 10549 | static const struct extent_io_ops btrfs_extent_io_ops = { |
4d53dddb | 10550 | /* mandatory callbacks */ |
065631f6 | 10551 | .submit_bio_hook = btrfs_submit_bio_hook, |
07157aac | 10552 | .readpage_end_io_hook = btrfs_readpage_end_io_hook, |
4d53dddb DS |
10553 | .merge_bio_hook = btrfs_merge_bio_hook, |
10554 | ||
10555 | /* optional callbacks */ | |
10556 | .fill_delalloc = run_delalloc_range, | |
e6dcd2dc | 10557 | .writepage_end_io_hook = btrfs_writepage_end_io_hook, |
247e743c | 10558 | .writepage_start_hook = btrfs_writepage_start_hook, |
b0c68f8b CM |
10559 | .set_bit_hook = btrfs_set_bit_hook, |
10560 | .clear_bit_hook = btrfs_clear_bit_hook, | |
9ed74f2d JB |
10561 | .merge_extent_hook = btrfs_merge_extent_hook, |
10562 | .split_extent_hook = btrfs_split_extent_hook, | |
07157aac CM |
10563 | }; |
10564 | ||
35054394 CM |
10565 | /* |
10566 | * btrfs doesn't support the bmap operation because swapfiles | |
10567 | * use bmap to make a mapping of extents in the file. They assume | |
10568 | * these extents won't change over the life of the file and they | |
10569 | * use the bmap result to do IO directly to the drive. | |
10570 | * | |
10571 | * the btrfs bmap call would return logical addresses that aren't | |
10572 | * suitable for IO and they also will change frequently as COW | |
10573 | * operations happen. So, swapfile + btrfs == corruption. | |
10574 | * | |
10575 | * For now we're avoiding this by dropping bmap. | |
10576 | */ | |
7f09410b | 10577 | static const struct address_space_operations btrfs_aops = { |
39279cc3 CM |
10578 | .readpage = btrfs_readpage, |
10579 | .writepage = btrfs_writepage, | |
b293f02e | 10580 | .writepages = btrfs_writepages, |
3ab2fb5a | 10581 | .readpages = btrfs_readpages, |
16432985 | 10582 | .direct_IO = btrfs_direct_IO, |
a52d9a80 CM |
10583 | .invalidatepage = btrfs_invalidatepage, |
10584 | .releasepage = btrfs_releasepage, | |
e6dcd2dc | 10585 | .set_page_dirty = btrfs_set_page_dirty, |
465fdd97 | 10586 | .error_remove_page = generic_error_remove_page, |
39279cc3 CM |
10587 | }; |
10588 | ||
7f09410b | 10589 | static const struct address_space_operations btrfs_symlink_aops = { |
39279cc3 CM |
10590 | .readpage = btrfs_readpage, |
10591 | .writepage = btrfs_writepage, | |
2bf5a725 CM |
10592 | .invalidatepage = btrfs_invalidatepage, |
10593 | .releasepage = btrfs_releasepage, | |
39279cc3 CM |
10594 | }; |
10595 | ||
6e1d5dcc | 10596 | static const struct inode_operations btrfs_file_inode_operations = { |
39279cc3 CM |
10597 | .getattr = btrfs_getattr, |
10598 | .setattr = btrfs_setattr, | |
5103e947 | 10599 | .listxattr = btrfs_listxattr, |
fdebe2bd | 10600 | .permission = btrfs_permission, |
1506fcc8 | 10601 | .fiemap = btrfs_fiemap, |
4e34e719 | 10602 | .get_acl = btrfs_get_acl, |
996a710d | 10603 | .set_acl = btrfs_set_acl, |
e41f941a | 10604 | .update_time = btrfs_update_time, |
39279cc3 | 10605 | }; |
6e1d5dcc | 10606 | static const struct inode_operations btrfs_special_inode_operations = { |
618e21d5 JB |
10607 | .getattr = btrfs_getattr, |
10608 | .setattr = btrfs_setattr, | |
fdebe2bd | 10609 | .permission = btrfs_permission, |
33268eaf | 10610 | .listxattr = btrfs_listxattr, |
4e34e719 | 10611 | .get_acl = btrfs_get_acl, |
996a710d | 10612 | .set_acl = btrfs_set_acl, |
e41f941a | 10613 | .update_time = btrfs_update_time, |
618e21d5 | 10614 | }; |
6e1d5dcc | 10615 | static const struct inode_operations btrfs_symlink_inode_operations = { |
6b255391 | 10616 | .get_link = page_get_link, |
f209561a | 10617 | .getattr = btrfs_getattr, |
22c44fe6 | 10618 | .setattr = btrfs_setattr, |
fdebe2bd | 10619 | .permission = btrfs_permission, |
0279b4cd | 10620 | .listxattr = btrfs_listxattr, |
e41f941a | 10621 | .update_time = btrfs_update_time, |
39279cc3 | 10622 | }; |
76dda93c | 10623 | |
82d339d9 | 10624 | const struct dentry_operations btrfs_dentry_operations = { |
76dda93c | 10625 | .d_delete = btrfs_dentry_delete, |
b4aff1f8 | 10626 | .d_release = btrfs_dentry_release, |
76dda93c | 10627 | }; |