]>
Commit | Line | Data |
---|---|---|
6cbd5570 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
8f18cf13 | 19 | #include <linux/kernel.h> |
065631f6 | 20 | #include <linux/bio.h> |
39279cc3 | 21 | #include <linux/buffer_head.h> |
f2eb0a24 | 22 | #include <linux/file.h> |
39279cc3 CM |
23 | #include <linux/fs.h> |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/string.h> | |
39279cc3 CM |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/mpage.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/writeback.h> | |
33 | #include <linux/statfs.h> | |
34 | #include <linux/compat.h> | |
a27bb332 | 35 | #include <linux/aio.h> |
9ebefb18 | 36 | #include <linux/bit_spinlock.h> |
5103e947 | 37 | #include <linux/xattr.h> |
33268eaf | 38 | #include <linux/posix_acl.h> |
d899e052 | 39 | #include <linux/falloc.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
7a36ddec | 41 | #include <linux/ratelimit.h> |
22c44fe6 | 42 | #include <linux/mount.h> |
55e301fd | 43 | #include <linux/btrfs.h> |
53b381b3 | 44 | #include <linux/blkdev.h> |
f23b5a59 | 45 | #include <linux/posix_acl_xattr.h> |
39279cc3 CM |
46 | #include "ctree.h" |
47 | #include "disk-io.h" | |
48 | #include "transaction.h" | |
49 | #include "btrfs_inode.h" | |
39279cc3 | 50 | #include "print-tree.h" |
e6dcd2dc | 51 | #include "ordered-data.h" |
95819c05 | 52 | #include "xattr.h" |
e02119d5 | 53 | #include "tree-log.h" |
4a54c8c1 | 54 | #include "volumes.h" |
c8b97818 | 55 | #include "compression.h" |
b4ce94de | 56 | #include "locking.h" |
dc89e982 | 57 | #include "free-space-cache.h" |
581bb050 | 58 | #include "inode-map.h" |
38c227d8 | 59 | #include "backref.h" |
f23b5a59 | 60 | #include "hash.h" |
39279cc3 CM |
61 | |
62 | struct btrfs_iget_args { | |
63 | u64 ino; | |
64 | struct btrfs_root *root; | |
65 | }; | |
66 | ||
6e1d5dcc AD |
67 | static const struct inode_operations btrfs_dir_inode_operations; |
68 | static const struct inode_operations btrfs_symlink_inode_operations; | |
69 | static const struct inode_operations btrfs_dir_ro_inode_operations; | |
70 | static const struct inode_operations btrfs_special_inode_operations; | |
71 | static const struct inode_operations btrfs_file_inode_operations; | |
7f09410b AD |
72 | static const struct address_space_operations btrfs_aops; |
73 | static const struct address_space_operations btrfs_symlink_aops; | |
828c0950 | 74 | static const struct file_operations btrfs_dir_file_operations; |
d1310b2e | 75 | static struct extent_io_ops btrfs_extent_io_ops; |
39279cc3 CM |
76 | |
77 | static struct kmem_cache *btrfs_inode_cachep; | |
8ccf6f19 | 78 | static struct kmem_cache *btrfs_delalloc_work_cachep; |
39279cc3 CM |
79 | struct kmem_cache *btrfs_trans_handle_cachep; |
80 | struct kmem_cache *btrfs_transaction_cachep; | |
39279cc3 | 81 | struct kmem_cache *btrfs_path_cachep; |
dc89e982 | 82 | struct kmem_cache *btrfs_free_space_cachep; |
39279cc3 CM |
83 | |
84 | #define S_SHIFT 12 | |
85 | static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { | |
86 | [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE, | |
87 | [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR, | |
88 | [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV, | |
89 | [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV, | |
90 | [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO, | |
91 | [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK, | |
92 | [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, | |
93 | }; | |
94 | ||
3972f260 | 95 | static int btrfs_setsize(struct inode *inode, struct iattr *attr); |
a41ad394 | 96 | static int btrfs_truncate(struct inode *inode); |
5fd02043 | 97 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); |
771ed689 CM |
98 | static noinline int cow_file_range(struct inode *inode, |
99 | struct page *locked_page, | |
100 | u64 start, u64 end, int *page_started, | |
101 | unsigned long *nr_written, int unlock); | |
70c8a91c JB |
102 | static struct extent_map *create_pinned_em(struct inode *inode, u64 start, |
103 | u64 len, u64 orig_start, | |
104 | u64 block_start, u64 block_len, | |
cc95bef6 JB |
105 | u64 orig_block_len, u64 ram_bytes, |
106 | int type); | |
7b128766 | 107 | |
48a3b636 | 108 | static int btrfs_dirty_inode(struct inode *inode); |
7b128766 | 109 | |
f34f57a3 | 110 | static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, |
2a7dba39 EP |
111 | struct inode *inode, struct inode *dir, |
112 | const struct qstr *qstr) | |
0279b4cd JO |
113 | { |
114 | int err; | |
115 | ||
f34f57a3 | 116 | err = btrfs_init_acl(trans, inode, dir); |
0279b4cd | 117 | if (!err) |
2a7dba39 | 118 | err = btrfs_xattr_security_init(trans, inode, dir, qstr); |
0279b4cd JO |
119 | return err; |
120 | } | |
121 | ||
c8b97818 CM |
122 | /* |
123 | * this does all the hard work for inserting an inline extent into | |
124 | * the btree. The caller should have done a btrfs_drop_extents so that | |
125 | * no overlapping inline items exist in the btree | |
126 | */ | |
d397712b | 127 | static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, |
c8b97818 CM |
128 | struct btrfs_root *root, struct inode *inode, |
129 | u64 start, size_t size, size_t compressed_size, | |
fe3f566c | 130 | int compress_type, |
c8b97818 CM |
131 | struct page **compressed_pages) |
132 | { | |
133 | struct btrfs_key key; | |
134 | struct btrfs_path *path; | |
135 | struct extent_buffer *leaf; | |
136 | struct page *page = NULL; | |
137 | char *kaddr; | |
138 | unsigned long ptr; | |
139 | struct btrfs_file_extent_item *ei; | |
140 | int err = 0; | |
141 | int ret; | |
142 | size_t cur_size = size; | |
143 | size_t datasize; | |
144 | unsigned long offset; | |
c8b97818 | 145 | |
fe3f566c | 146 | if (compressed_size && compressed_pages) |
c8b97818 | 147 | cur_size = compressed_size; |
c8b97818 | 148 | |
d397712b CM |
149 | path = btrfs_alloc_path(); |
150 | if (!path) | |
c8b97818 CM |
151 | return -ENOMEM; |
152 | ||
b9473439 | 153 | path->leave_spinning = 1; |
c8b97818 | 154 | |
33345d01 | 155 | key.objectid = btrfs_ino(inode); |
c8b97818 CM |
156 | key.offset = start; |
157 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | |
c8b97818 CM |
158 | datasize = btrfs_file_extent_calc_inline_size(cur_size); |
159 | ||
160 | inode_add_bytes(inode, size); | |
161 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
162 | datasize); | |
c8b97818 CM |
163 | if (ret) { |
164 | err = ret; | |
c8b97818 CM |
165 | goto fail; |
166 | } | |
167 | leaf = path->nodes[0]; | |
168 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
169 | struct btrfs_file_extent_item); | |
170 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
171 | btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); | |
172 | btrfs_set_file_extent_encryption(leaf, ei, 0); | |
173 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
174 | btrfs_set_file_extent_ram_bytes(leaf, ei, size); | |
175 | ptr = btrfs_file_extent_inline_start(ei); | |
176 | ||
261507a0 | 177 | if (compress_type != BTRFS_COMPRESS_NONE) { |
c8b97818 CM |
178 | struct page *cpage; |
179 | int i = 0; | |
d397712b | 180 | while (compressed_size > 0) { |
c8b97818 | 181 | cpage = compressed_pages[i]; |
5b050f04 | 182 | cur_size = min_t(unsigned long, compressed_size, |
c8b97818 CM |
183 | PAGE_CACHE_SIZE); |
184 | ||
7ac687d9 | 185 | kaddr = kmap_atomic(cpage); |
c8b97818 | 186 | write_extent_buffer(leaf, kaddr, ptr, cur_size); |
7ac687d9 | 187 | kunmap_atomic(kaddr); |
c8b97818 CM |
188 | |
189 | i++; | |
190 | ptr += cur_size; | |
191 | compressed_size -= cur_size; | |
192 | } | |
193 | btrfs_set_file_extent_compression(leaf, ei, | |
261507a0 | 194 | compress_type); |
c8b97818 CM |
195 | } else { |
196 | page = find_get_page(inode->i_mapping, | |
197 | start >> PAGE_CACHE_SHIFT); | |
198 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
7ac687d9 | 199 | kaddr = kmap_atomic(page); |
c8b97818 CM |
200 | offset = start & (PAGE_CACHE_SIZE - 1); |
201 | write_extent_buffer(leaf, kaddr + offset, ptr, size); | |
7ac687d9 | 202 | kunmap_atomic(kaddr); |
c8b97818 CM |
203 | page_cache_release(page); |
204 | } | |
205 | btrfs_mark_buffer_dirty(leaf); | |
206 | btrfs_free_path(path); | |
207 | ||
c2167754 YZ |
208 | /* |
209 | * we're an inline extent, so nobody can | |
210 | * extend the file past i_size without locking | |
211 | * a page we already have locked. | |
212 | * | |
213 | * We must do any isize and inode updates | |
214 | * before we unlock the pages. Otherwise we | |
215 | * could end up racing with unlink. | |
216 | */ | |
c8b97818 | 217 | BTRFS_I(inode)->disk_i_size = inode->i_size; |
79787eaa | 218 | ret = btrfs_update_inode(trans, root, inode); |
c2167754 | 219 | |
79787eaa | 220 | return ret; |
c8b97818 CM |
221 | fail: |
222 | btrfs_free_path(path); | |
223 | return err; | |
224 | } | |
225 | ||
226 | ||
227 | /* | |
228 | * conditionally insert an inline extent into the file. This | |
229 | * does the checks required to make sure the data is small enough | |
230 | * to fit as an inline extent. | |
231 | */ | |
00361589 JB |
232 | static noinline int cow_file_range_inline(struct btrfs_root *root, |
233 | struct inode *inode, u64 start, | |
234 | u64 end, size_t compressed_size, | |
235 | int compress_type, | |
236 | struct page **compressed_pages) | |
c8b97818 | 237 | { |
00361589 | 238 | struct btrfs_trans_handle *trans; |
c8b97818 CM |
239 | u64 isize = i_size_read(inode); |
240 | u64 actual_end = min(end + 1, isize); | |
241 | u64 inline_len = actual_end - start; | |
fda2832f | 242 | u64 aligned_end = ALIGN(end, root->sectorsize); |
c8b97818 CM |
243 | u64 data_len = inline_len; |
244 | int ret; | |
245 | ||
246 | if (compressed_size) | |
247 | data_len = compressed_size; | |
248 | ||
249 | if (start > 0 || | |
70b99e69 | 250 | actual_end >= PAGE_CACHE_SIZE || |
c8b97818 CM |
251 | data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || |
252 | (!compressed_size && | |
253 | (actual_end & (root->sectorsize - 1)) == 0) || | |
254 | end + 1 < isize || | |
255 | data_len > root->fs_info->max_inline) { | |
256 | return 1; | |
257 | } | |
258 | ||
00361589 JB |
259 | trans = btrfs_join_transaction(root); |
260 | if (IS_ERR(trans)) | |
261 | return PTR_ERR(trans); | |
262 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | |
263 | ||
2671485d | 264 | ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1); |
00361589 JB |
265 | if (ret) { |
266 | btrfs_abort_transaction(trans, root, ret); | |
267 | goto out; | |
268 | } | |
c8b97818 CM |
269 | |
270 | if (isize > actual_end) | |
271 | inline_len = min_t(u64, isize, actual_end); | |
272 | ret = insert_inline_extent(trans, root, inode, start, | |
273 | inline_len, compressed_size, | |
fe3f566c | 274 | compress_type, compressed_pages); |
2adcac1a | 275 | if (ret && ret != -ENOSPC) { |
79787eaa | 276 | btrfs_abort_transaction(trans, root, ret); |
00361589 | 277 | goto out; |
2adcac1a | 278 | } else if (ret == -ENOSPC) { |
00361589 JB |
279 | ret = 1; |
280 | goto out; | |
79787eaa | 281 | } |
2adcac1a | 282 | |
bdc20e67 | 283 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); |
0ca1f7ce | 284 | btrfs_delalloc_release_metadata(inode, end + 1 - start); |
a1ed835e | 285 | btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); |
00361589 JB |
286 | out: |
287 | btrfs_end_transaction(trans, root); | |
288 | return ret; | |
c8b97818 CM |
289 | } |
290 | ||
771ed689 CM |
291 | struct async_extent { |
292 | u64 start; | |
293 | u64 ram_size; | |
294 | u64 compressed_size; | |
295 | struct page **pages; | |
296 | unsigned long nr_pages; | |
261507a0 | 297 | int compress_type; |
771ed689 CM |
298 | struct list_head list; |
299 | }; | |
300 | ||
301 | struct async_cow { | |
302 | struct inode *inode; | |
303 | struct btrfs_root *root; | |
304 | struct page *locked_page; | |
305 | u64 start; | |
306 | u64 end; | |
307 | struct list_head extents; | |
308 | struct btrfs_work work; | |
309 | }; | |
310 | ||
311 | static noinline int add_async_extent(struct async_cow *cow, | |
312 | u64 start, u64 ram_size, | |
313 | u64 compressed_size, | |
314 | struct page **pages, | |
261507a0 LZ |
315 | unsigned long nr_pages, |
316 | int compress_type) | |
771ed689 CM |
317 | { |
318 | struct async_extent *async_extent; | |
319 | ||
320 | async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); | |
79787eaa | 321 | BUG_ON(!async_extent); /* -ENOMEM */ |
771ed689 CM |
322 | async_extent->start = start; |
323 | async_extent->ram_size = ram_size; | |
324 | async_extent->compressed_size = compressed_size; | |
325 | async_extent->pages = pages; | |
326 | async_extent->nr_pages = nr_pages; | |
261507a0 | 327 | async_extent->compress_type = compress_type; |
771ed689 CM |
328 | list_add_tail(&async_extent->list, &cow->extents); |
329 | return 0; | |
330 | } | |
331 | ||
d352ac68 | 332 | /* |
771ed689 CM |
333 | * we create compressed extents in two phases. The first |
334 | * phase compresses a range of pages that have already been | |
335 | * locked (both pages and state bits are locked). | |
c8b97818 | 336 | * |
771ed689 CM |
337 | * This is done inside an ordered work queue, and the compression |
338 | * is spread across many cpus. The actual IO submission is step | |
339 | * two, and the ordered work queue takes care of making sure that | |
340 | * happens in the same order things were put onto the queue by | |
341 | * writepages and friends. | |
c8b97818 | 342 | * |
771ed689 CM |
343 | * If this code finds it can't get good compression, it puts an |
344 | * entry onto the work queue to write the uncompressed bytes. This | |
345 | * makes sure that both compressed inodes and uncompressed inodes | |
b2570314 AB |
346 | * are written in the same order that the flusher thread sent them |
347 | * down. | |
d352ac68 | 348 | */ |
771ed689 CM |
349 | static noinline int compress_file_range(struct inode *inode, |
350 | struct page *locked_page, | |
351 | u64 start, u64 end, | |
352 | struct async_cow *async_cow, | |
353 | int *num_added) | |
b888db2b CM |
354 | { |
355 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
db94535d | 356 | u64 num_bytes; |
db94535d | 357 | u64 blocksize = root->sectorsize; |
c8b97818 | 358 | u64 actual_end; |
42dc7bab | 359 | u64 isize = i_size_read(inode); |
e6dcd2dc | 360 | int ret = 0; |
c8b97818 CM |
361 | struct page **pages = NULL; |
362 | unsigned long nr_pages; | |
363 | unsigned long nr_pages_ret = 0; | |
364 | unsigned long total_compressed = 0; | |
365 | unsigned long total_in = 0; | |
366 | unsigned long max_compressed = 128 * 1024; | |
771ed689 | 367 | unsigned long max_uncompressed = 128 * 1024; |
c8b97818 CM |
368 | int i; |
369 | int will_compress; | |
261507a0 | 370 | int compress_type = root->fs_info->compress_type; |
4adaa611 | 371 | int redirty = 0; |
b888db2b | 372 | |
4cb13e5d LB |
373 | /* if this is a small write inside eof, kick off a defrag */ |
374 | if ((end - start + 1) < 16 * 1024 && | |
375 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | |
4cb5300b CM |
376 | btrfs_add_inode_defrag(NULL, inode); |
377 | ||
42dc7bab | 378 | actual_end = min_t(u64, isize, end + 1); |
c8b97818 CM |
379 | again: |
380 | will_compress = 0; | |
381 | nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; | |
382 | nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); | |
be20aa9d | 383 | |
f03d9301 CM |
384 | /* |
385 | * we don't want to send crud past the end of i_size through | |
386 | * compression, that's just a waste of CPU time. So, if the | |
387 | * end of the file is before the start of our current | |
388 | * requested range of bytes, we bail out to the uncompressed | |
389 | * cleanup code that can deal with all of this. | |
390 | * | |
391 | * It isn't really the fastest way to fix things, but this is a | |
392 | * very uncommon corner. | |
393 | */ | |
394 | if (actual_end <= start) | |
395 | goto cleanup_and_bail_uncompressed; | |
396 | ||
c8b97818 CM |
397 | total_compressed = actual_end - start; |
398 | ||
399 | /* we want to make sure that amount of ram required to uncompress | |
400 | * an extent is reasonable, so we limit the total size in ram | |
771ed689 CM |
401 | * of a compressed extent to 128k. This is a crucial number |
402 | * because it also controls how easily we can spread reads across | |
403 | * cpus for decompression. | |
404 | * | |
405 | * We also want to make sure the amount of IO required to do | |
406 | * a random read is reasonably small, so we limit the size of | |
407 | * a compressed extent to 128k. | |
c8b97818 CM |
408 | */ |
409 | total_compressed = min(total_compressed, max_uncompressed); | |
fda2832f | 410 | num_bytes = ALIGN(end - start + 1, blocksize); |
be20aa9d | 411 | num_bytes = max(blocksize, num_bytes); |
c8b97818 CM |
412 | total_in = 0; |
413 | ret = 0; | |
db94535d | 414 | |
771ed689 CM |
415 | /* |
416 | * we do compression for mount -o compress and when the | |
417 | * inode has not been flagged as nocompress. This flag can | |
418 | * change at any time if we discover bad compression ratios. | |
c8b97818 | 419 | */ |
6cbff00f | 420 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && |
1e701a32 | 421 | (btrfs_test_opt(root, COMPRESS) || |
75e7cb7f LB |
422 | (BTRFS_I(inode)->force_compress) || |
423 | (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) { | |
c8b97818 | 424 | WARN_ON(pages); |
cfbc246e | 425 | pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); |
560f7d75 LZ |
426 | if (!pages) { |
427 | /* just bail out to the uncompressed code */ | |
428 | goto cont; | |
429 | } | |
c8b97818 | 430 | |
261507a0 LZ |
431 | if (BTRFS_I(inode)->force_compress) |
432 | compress_type = BTRFS_I(inode)->force_compress; | |
433 | ||
4adaa611 CM |
434 | /* |
435 | * we need to call clear_page_dirty_for_io on each | |
436 | * page in the range. Otherwise applications with the file | |
437 | * mmap'd can wander in and change the page contents while | |
438 | * we are compressing them. | |
439 | * | |
440 | * If the compression fails for any reason, we set the pages | |
441 | * dirty again later on. | |
442 | */ | |
443 | extent_range_clear_dirty_for_io(inode, start, end); | |
444 | redirty = 1; | |
261507a0 LZ |
445 | ret = btrfs_compress_pages(compress_type, |
446 | inode->i_mapping, start, | |
447 | total_compressed, pages, | |
448 | nr_pages, &nr_pages_ret, | |
449 | &total_in, | |
450 | &total_compressed, | |
451 | max_compressed); | |
c8b97818 CM |
452 | |
453 | if (!ret) { | |
454 | unsigned long offset = total_compressed & | |
455 | (PAGE_CACHE_SIZE - 1); | |
456 | struct page *page = pages[nr_pages_ret - 1]; | |
457 | char *kaddr; | |
458 | ||
459 | /* zero the tail end of the last page, we might be | |
460 | * sending it down to disk | |
461 | */ | |
462 | if (offset) { | |
7ac687d9 | 463 | kaddr = kmap_atomic(page); |
c8b97818 CM |
464 | memset(kaddr + offset, 0, |
465 | PAGE_CACHE_SIZE - offset); | |
7ac687d9 | 466 | kunmap_atomic(kaddr); |
c8b97818 CM |
467 | } |
468 | will_compress = 1; | |
469 | } | |
470 | } | |
560f7d75 | 471 | cont: |
c8b97818 CM |
472 | if (start == 0) { |
473 | /* lets try to make an inline extent */ | |
771ed689 | 474 | if (ret || total_in < (actual_end - start)) { |
c8b97818 | 475 | /* we didn't compress the entire range, try |
771ed689 | 476 | * to make an uncompressed inline extent. |
c8b97818 | 477 | */ |
00361589 JB |
478 | ret = cow_file_range_inline(root, inode, start, end, |
479 | 0, 0, NULL); | |
c8b97818 | 480 | } else { |
771ed689 | 481 | /* try making a compressed inline extent */ |
00361589 | 482 | ret = cow_file_range_inline(root, inode, start, end, |
fe3f566c LZ |
483 | total_compressed, |
484 | compress_type, pages); | |
c8b97818 | 485 | } |
79787eaa | 486 | if (ret <= 0) { |
151a41bc JB |
487 | unsigned long clear_flags = EXTENT_DELALLOC | |
488 | EXTENT_DEFRAG; | |
489 | clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; | |
490 | ||
771ed689 | 491 | /* |
79787eaa JM |
492 | * inline extent creation worked or returned error, |
493 | * we don't need to create any more async work items. | |
494 | * Unlock and free up our temp pages. | |
771ed689 | 495 | */ |
c2790a2e | 496 | extent_clear_unlock_delalloc(inode, start, end, NULL, |
151a41bc | 497 | clear_flags, PAGE_UNLOCK | |
c2790a2e JB |
498 | PAGE_CLEAR_DIRTY | |
499 | PAGE_SET_WRITEBACK | | |
500 | PAGE_END_WRITEBACK); | |
c8b97818 CM |
501 | goto free_pages_out; |
502 | } | |
503 | } | |
504 | ||
505 | if (will_compress) { | |
506 | /* | |
507 | * we aren't doing an inline extent round the compressed size | |
508 | * up to a block size boundary so the allocator does sane | |
509 | * things | |
510 | */ | |
fda2832f | 511 | total_compressed = ALIGN(total_compressed, blocksize); |
c8b97818 CM |
512 | |
513 | /* | |
514 | * one last check to make sure the compression is really a | |
515 | * win, compare the page count read with the blocks on disk | |
516 | */ | |
fda2832f | 517 | total_in = ALIGN(total_in, PAGE_CACHE_SIZE); |
c8b97818 CM |
518 | if (total_compressed >= total_in) { |
519 | will_compress = 0; | |
520 | } else { | |
c8b97818 CM |
521 | num_bytes = total_in; |
522 | } | |
523 | } | |
524 | if (!will_compress && pages) { | |
525 | /* | |
526 | * the compression code ran but failed to make things smaller, | |
527 | * free any pages it allocated and our page pointer array | |
528 | */ | |
529 | for (i = 0; i < nr_pages_ret; i++) { | |
70b99e69 | 530 | WARN_ON(pages[i]->mapping); |
c8b97818 CM |
531 | page_cache_release(pages[i]); |
532 | } | |
533 | kfree(pages); | |
534 | pages = NULL; | |
535 | total_compressed = 0; | |
536 | nr_pages_ret = 0; | |
537 | ||
538 | /* flag the file so we don't compress in the future */ | |
1e701a32 CM |
539 | if (!btrfs_test_opt(root, FORCE_COMPRESS) && |
540 | !(BTRFS_I(inode)->force_compress)) { | |
a555f810 | 541 | BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; |
1e701a32 | 542 | } |
c8b97818 | 543 | } |
771ed689 CM |
544 | if (will_compress) { |
545 | *num_added += 1; | |
c8b97818 | 546 | |
771ed689 CM |
547 | /* the async work queues will take care of doing actual |
548 | * allocation on disk for these compressed pages, | |
549 | * and will submit them to the elevator. | |
550 | */ | |
551 | add_async_extent(async_cow, start, num_bytes, | |
261507a0 LZ |
552 | total_compressed, pages, nr_pages_ret, |
553 | compress_type); | |
179e29e4 | 554 | |
24ae6365 | 555 | if (start + num_bytes < end) { |
771ed689 CM |
556 | start += num_bytes; |
557 | pages = NULL; | |
558 | cond_resched(); | |
559 | goto again; | |
560 | } | |
561 | } else { | |
f03d9301 | 562 | cleanup_and_bail_uncompressed: |
771ed689 CM |
563 | /* |
564 | * No compression, but we still need to write the pages in | |
565 | * the file we've been given so far. redirty the locked | |
566 | * page if it corresponds to our extent and set things up | |
567 | * for the async work queue to run cow_file_range to do | |
568 | * the normal delalloc dance | |
569 | */ | |
570 | if (page_offset(locked_page) >= start && | |
571 | page_offset(locked_page) <= end) { | |
572 | __set_page_dirty_nobuffers(locked_page); | |
573 | /* unlocked later on in the async handlers */ | |
574 | } | |
4adaa611 CM |
575 | if (redirty) |
576 | extent_range_redirty_for_io(inode, start, end); | |
261507a0 LZ |
577 | add_async_extent(async_cow, start, end - start + 1, |
578 | 0, NULL, 0, BTRFS_COMPRESS_NONE); | |
771ed689 CM |
579 | *num_added += 1; |
580 | } | |
3b951516 | 581 | |
771ed689 | 582 | out: |
79787eaa | 583 | return ret; |
771ed689 CM |
584 | |
585 | free_pages_out: | |
586 | for (i = 0; i < nr_pages_ret; i++) { | |
587 | WARN_ON(pages[i]->mapping); | |
588 | page_cache_release(pages[i]); | |
589 | } | |
d397712b | 590 | kfree(pages); |
771ed689 CM |
591 | |
592 | goto out; | |
593 | } | |
594 | ||
595 | /* | |
596 | * phase two of compressed writeback. This is the ordered portion | |
597 | * of the code, which only gets called in the order the work was | |
598 | * queued. We walk all the async extents created by compress_file_range | |
599 | * and send them down to the disk. | |
600 | */ | |
601 | static noinline int submit_compressed_extents(struct inode *inode, | |
602 | struct async_cow *async_cow) | |
603 | { | |
604 | struct async_extent *async_extent; | |
605 | u64 alloc_hint = 0; | |
771ed689 CM |
606 | struct btrfs_key ins; |
607 | struct extent_map *em; | |
608 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
609 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
610 | struct extent_io_tree *io_tree; | |
f5a84ee3 | 611 | int ret = 0; |
771ed689 CM |
612 | |
613 | if (list_empty(&async_cow->extents)) | |
614 | return 0; | |
615 | ||
3e04e7f1 | 616 | again: |
d397712b | 617 | while (!list_empty(&async_cow->extents)) { |
771ed689 CM |
618 | async_extent = list_entry(async_cow->extents.next, |
619 | struct async_extent, list); | |
620 | list_del(&async_extent->list); | |
c8b97818 | 621 | |
771ed689 CM |
622 | io_tree = &BTRFS_I(inode)->io_tree; |
623 | ||
f5a84ee3 | 624 | retry: |
771ed689 CM |
625 | /* did the compression code fall back to uncompressed IO? */ |
626 | if (!async_extent->pages) { | |
627 | int page_started = 0; | |
628 | unsigned long nr_written = 0; | |
629 | ||
630 | lock_extent(io_tree, async_extent->start, | |
2ac55d41 | 631 | async_extent->start + |
d0082371 | 632 | async_extent->ram_size - 1); |
771ed689 CM |
633 | |
634 | /* allocate blocks */ | |
f5a84ee3 JB |
635 | ret = cow_file_range(inode, async_cow->locked_page, |
636 | async_extent->start, | |
637 | async_extent->start + | |
638 | async_extent->ram_size - 1, | |
639 | &page_started, &nr_written, 0); | |
771ed689 | 640 | |
79787eaa JM |
641 | /* JDM XXX */ |
642 | ||
771ed689 CM |
643 | /* |
644 | * if page_started, cow_file_range inserted an | |
645 | * inline extent and took care of all the unlocking | |
646 | * and IO for us. Otherwise, we need to submit | |
647 | * all those pages down to the drive. | |
648 | */ | |
f5a84ee3 | 649 | if (!page_started && !ret) |
771ed689 CM |
650 | extent_write_locked_range(io_tree, |
651 | inode, async_extent->start, | |
d397712b | 652 | async_extent->start + |
771ed689 CM |
653 | async_extent->ram_size - 1, |
654 | btrfs_get_extent, | |
655 | WB_SYNC_ALL); | |
3e04e7f1 JB |
656 | else if (ret) |
657 | unlock_page(async_cow->locked_page); | |
771ed689 CM |
658 | kfree(async_extent); |
659 | cond_resched(); | |
660 | continue; | |
661 | } | |
662 | ||
663 | lock_extent(io_tree, async_extent->start, | |
d0082371 | 664 | async_extent->start + async_extent->ram_size - 1); |
771ed689 | 665 | |
00361589 | 666 | ret = btrfs_reserve_extent(root, |
771ed689 CM |
667 | async_extent->compressed_size, |
668 | async_extent->compressed_size, | |
81c9ad23 | 669 | 0, alloc_hint, &ins, 1); |
f5a84ee3 JB |
670 | if (ret) { |
671 | int i; | |
3e04e7f1 | 672 | |
f5a84ee3 JB |
673 | for (i = 0; i < async_extent->nr_pages; i++) { |
674 | WARN_ON(async_extent->pages[i]->mapping); | |
675 | page_cache_release(async_extent->pages[i]); | |
676 | } | |
677 | kfree(async_extent->pages); | |
678 | async_extent->nr_pages = 0; | |
679 | async_extent->pages = NULL; | |
3e04e7f1 | 680 | |
fdf8e2ea JB |
681 | if (ret == -ENOSPC) { |
682 | unlock_extent(io_tree, async_extent->start, | |
683 | async_extent->start + | |
684 | async_extent->ram_size - 1); | |
79787eaa | 685 | goto retry; |
fdf8e2ea | 686 | } |
3e04e7f1 | 687 | goto out_free; |
f5a84ee3 JB |
688 | } |
689 | ||
c2167754 YZ |
690 | /* |
691 | * here we're doing allocation and writeback of the | |
692 | * compressed pages | |
693 | */ | |
694 | btrfs_drop_extent_cache(inode, async_extent->start, | |
695 | async_extent->start + | |
696 | async_extent->ram_size - 1, 0); | |
697 | ||
172ddd60 | 698 | em = alloc_extent_map(); |
b9aa55be LB |
699 | if (!em) { |
700 | ret = -ENOMEM; | |
3e04e7f1 | 701 | goto out_free_reserve; |
b9aa55be | 702 | } |
771ed689 CM |
703 | em->start = async_extent->start; |
704 | em->len = async_extent->ram_size; | |
445a6944 | 705 | em->orig_start = em->start; |
2ab28f32 JB |
706 | em->mod_start = em->start; |
707 | em->mod_len = em->len; | |
c8b97818 | 708 | |
771ed689 CM |
709 | em->block_start = ins.objectid; |
710 | em->block_len = ins.offset; | |
b4939680 | 711 | em->orig_block_len = ins.offset; |
cc95bef6 | 712 | em->ram_bytes = async_extent->ram_size; |
771ed689 | 713 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
261507a0 | 714 | em->compress_type = async_extent->compress_type; |
771ed689 CM |
715 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
716 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
70c8a91c | 717 | em->generation = -1; |
771ed689 | 718 | |
d397712b | 719 | while (1) { |
890871be | 720 | write_lock(&em_tree->lock); |
09a2a8f9 | 721 | ret = add_extent_mapping(em_tree, em, 1); |
890871be | 722 | write_unlock(&em_tree->lock); |
771ed689 CM |
723 | if (ret != -EEXIST) { |
724 | free_extent_map(em); | |
725 | break; | |
726 | } | |
727 | btrfs_drop_extent_cache(inode, async_extent->start, | |
728 | async_extent->start + | |
729 | async_extent->ram_size - 1, 0); | |
730 | } | |
731 | ||
3e04e7f1 JB |
732 | if (ret) |
733 | goto out_free_reserve; | |
734 | ||
261507a0 LZ |
735 | ret = btrfs_add_ordered_extent_compress(inode, |
736 | async_extent->start, | |
737 | ins.objectid, | |
738 | async_extent->ram_size, | |
739 | ins.offset, | |
740 | BTRFS_ORDERED_COMPRESSED, | |
741 | async_extent->compress_type); | |
3e04e7f1 JB |
742 | if (ret) |
743 | goto out_free_reserve; | |
771ed689 | 744 | |
771ed689 CM |
745 | /* |
746 | * clear dirty, set writeback and unlock the pages. | |
747 | */ | |
c2790a2e | 748 | extent_clear_unlock_delalloc(inode, async_extent->start, |
a791e35e CM |
749 | async_extent->start + |
750 | async_extent->ram_size - 1, | |
151a41bc JB |
751 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC, |
752 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
c2790a2e | 753 | PAGE_SET_WRITEBACK); |
771ed689 | 754 | ret = btrfs_submit_compressed_write(inode, |
d397712b CM |
755 | async_extent->start, |
756 | async_extent->ram_size, | |
757 | ins.objectid, | |
758 | ins.offset, async_extent->pages, | |
759 | async_extent->nr_pages); | |
771ed689 CM |
760 | alloc_hint = ins.objectid + ins.offset; |
761 | kfree(async_extent); | |
3e04e7f1 JB |
762 | if (ret) |
763 | goto out; | |
771ed689 CM |
764 | cond_resched(); |
765 | } | |
79787eaa JM |
766 | ret = 0; |
767 | out: | |
768 | return ret; | |
3e04e7f1 JB |
769 | out_free_reserve: |
770 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
79787eaa | 771 | out_free: |
c2790a2e | 772 | extent_clear_unlock_delalloc(inode, async_extent->start, |
3e04e7f1 JB |
773 | async_extent->start + |
774 | async_extent->ram_size - 1, | |
c2790a2e | 775 | NULL, EXTENT_LOCKED | EXTENT_DELALLOC | |
151a41bc JB |
776 | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, |
777 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
778 | PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); | |
79787eaa | 779 | kfree(async_extent); |
3e04e7f1 | 780 | goto again; |
771ed689 CM |
781 | } |
782 | ||
4b46fce2 JB |
783 | static u64 get_extent_allocation_hint(struct inode *inode, u64 start, |
784 | u64 num_bytes) | |
785 | { | |
786 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
787 | struct extent_map *em; | |
788 | u64 alloc_hint = 0; | |
789 | ||
790 | read_lock(&em_tree->lock); | |
791 | em = search_extent_mapping(em_tree, start, num_bytes); | |
792 | if (em) { | |
793 | /* | |
794 | * if block start isn't an actual block number then find the | |
795 | * first block in this inode and use that as a hint. If that | |
796 | * block is also bogus then just don't worry about it. | |
797 | */ | |
798 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) { | |
799 | free_extent_map(em); | |
800 | em = search_extent_mapping(em_tree, 0, 0); | |
801 | if (em && em->block_start < EXTENT_MAP_LAST_BYTE) | |
802 | alloc_hint = em->block_start; | |
803 | if (em) | |
804 | free_extent_map(em); | |
805 | } else { | |
806 | alloc_hint = em->block_start; | |
807 | free_extent_map(em); | |
808 | } | |
809 | } | |
810 | read_unlock(&em_tree->lock); | |
811 | ||
812 | return alloc_hint; | |
813 | } | |
814 | ||
771ed689 CM |
815 | /* |
816 | * when extent_io.c finds a delayed allocation range in the file, | |
817 | * the call backs end up in this code. The basic idea is to | |
818 | * allocate extents on disk for the range, and create ordered data structs | |
819 | * in ram to track those extents. | |
820 | * | |
821 | * locked_page is the page that writepage had locked already. We use | |
822 | * it to make sure we don't do extra locks or unlocks. | |
823 | * | |
824 | * *page_started is set to one if we unlock locked_page and do everything | |
825 | * required to start IO on it. It may be clean and already done with | |
826 | * IO when we return. | |
827 | */ | |
00361589 JB |
828 | static noinline int cow_file_range(struct inode *inode, |
829 | struct page *locked_page, | |
830 | u64 start, u64 end, int *page_started, | |
831 | unsigned long *nr_written, | |
832 | int unlock) | |
771ed689 | 833 | { |
00361589 | 834 | struct btrfs_root *root = BTRFS_I(inode)->root; |
771ed689 CM |
835 | u64 alloc_hint = 0; |
836 | u64 num_bytes; | |
837 | unsigned long ram_size; | |
838 | u64 disk_num_bytes; | |
839 | u64 cur_alloc_size; | |
840 | u64 blocksize = root->sectorsize; | |
771ed689 CM |
841 | struct btrfs_key ins; |
842 | struct extent_map *em; | |
843 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
844 | int ret = 0; | |
845 | ||
02ecd2c2 JB |
846 | if (btrfs_is_free_space_inode(inode)) { |
847 | WARN_ON_ONCE(1); | |
848 | return -EINVAL; | |
849 | } | |
771ed689 | 850 | |
fda2832f | 851 | num_bytes = ALIGN(end - start + 1, blocksize); |
771ed689 CM |
852 | num_bytes = max(blocksize, num_bytes); |
853 | disk_num_bytes = num_bytes; | |
771ed689 | 854 | |
4cb5300b | 855 | /* if this is a small write inside eof, kick off defrag */ |
4cb13e5d LB |
856 | if (num_bytes < 64 * 1024 && |
857 | (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) | |
00361589 | 858 | btrfs_add_inode_defrag(NULL, inode); |
4cb5300b | 859 | |
771ed689 CM |
860 | if (start == 0) { |
861 | /* lets try to make an inline extent */ | |
00361589 JB |
862 | ret = cow_file_range_inline(root, inode, start, end, 0, 0, |
863 | NULL); | |
771ed689 | 864 | if (ret == 0) { |
c2790a2e JB |
865 | extent_clear_unlock_delalloc(inode, start, end, NULL, |
866 | EXTENT_LOCKED | EXTENT_DELALLOC | | |
151a41bc | 867 | EXTENT_DEFRAG, PAGE_UNLOCK | |
c2790a2e JB |
868 | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK | |
869 | PAGE_END_WRITEBACK); | |
c2167754 | 870 | |
771ed689 CM |
871 | *nr_written = *nr_written + |
872 | (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; | |
873 | *page_started = 1; | |
771ed689 | 874 | goto out; |
79787eaa | 875 | } else if (ret < 0) { |
79787eaa | 876 | goto out_unlock; |
771ed689 CM |
877 | } |
878 | } | |
879 | ||
880 | BUG_ON(disk_num_bytes > | |
6c41761f | 881 | btrfs_super_total_bytes(root->fs_info->super_copy)); |
771ed689 | 882 | |
4b46fce2 | 883 | alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); |
771ed689 CM |
884 | btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); |
885 | ||
d397712b | 886 | while (disk_num_bytes > 0) { |
a791e35e CM |
887 | unsigned long op; |
888 | ||
287a0ab9 | 889 | cur_alloc_size = disk_num_bytes; |
00361589 | 890 | ret = btrfs_reserve_extent(root, cur_alloc_size, |
771ed689 | 891 | root->sectorsize, 0, alloc_hint, |
81c9ad23 | 892 | &ins, 1); |
00361589 | 893 | if (ret < 0) |
79787eaa | 894 | goto out_unlock; |
d397712b | 895 | |
172ddd60 | 896 | em = alloc_extent_map(); |
b9aa55be LB |
897 | if (!em) { |
898 | ret = -ENOMEM; | |
ace68bac | 899 | goto out_reserve; |
b9aa55be | 900 | } |
e6dcd2dc | 901 | em->start = start; |
445a6944 | 902 | em->orig_start = em->start; |
771ed689 CM |
903 | ram_size = ins.offset; |
904 | em->len = ins.offset; | |
2ab28f32 JB |
905 | em->mod_start = em->start; |
906 | em->mod_len = em->len; | |
c8b97818 | 907 | |
e6dcd2dc | 908 | em->block_start = ins.objectid; |
c8b97818 | 909 | em->block_len = ins.offset; |
b4939680 | 910 | em->orig_block_len = ins.offset; |
cc95bef6 | 911 | em->ram_bytes = ram_size; |
e6dcd2dc | 912 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
7f3c74fb | 913 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
70c8a91c | 914 | em->generation = -1; |
c8b97818 | 915 | |
d397712b | 916 | while (1) { |
890871be | 917 | write_lock(&em_tree->lock); |
09a2a8f9 | 918 | ret = add_extent_mapping(em_tree, em, 1); |
890871be | 919 | write_unlock(&em_tree->lock); |
e6dcd2dc CM |
920 | if (ret != -EEXIST) { |
921 | free_extent_map(em); | |
922 | break; | |
923 | } | |
924 | btrfs_drop_extent_cache(inode, start, | |
c8b97818 | 925 | start + ram_size - 1, 0); |
e6dcd2dc | 926 | } |
ace68bac LB |
927 | if (ret) |
928 | goto out_reserve; | |
e6dcd2dc | 929 | |
98d20f67 | 930 | cur_alloc_size = ins.offset; |
e6dcd2dc | 931 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
771ed689 | 932 | ram_size, cur_alloc_size, 0); |
ace68bac LB |
933 | if (ret) |
934 | goto out_reserve; | |
c8b97818 | 935 | |
17d217fe YZ |
936 | if (root->root_key.objectid == |
937 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
938 | ret = btrfs_reloc_clone_csums(inode, start, | |
939 | cur_alloc_size); | |
00361589 | 940 | if (ret) |
ace68bac | 941 | goto out_reserve; |
17d217fe YZ |
942 | } |
943 | ||
d397712b | 944 | if (disk_num_bytes < cur_alloc_size) |
3b951516 | 945 | break; |
d397712b | 946 | |
c8b97818 CM |
947 | /* we're not doing compressed IO, don't unlock the first |
948 | * page (which the caller expects to stay locked), don't | |
949 | * clear any dirty bits and don't set any writeback bits | |
8b62b72b CM |
950 | * |
951 | * Do set the Private2 bit so we know this page was properly | |
952 | * setup for writepage | |
c8b97818 | 953 | */ |
c2790a2e JB |
954 | op = unlock ? PAGE_UNLOCK : 0; |
955 | op |= PAGE_SET_PRIVATE2; | |
a791e35e | 956 | |
c2790a2e JB |
957 | extent_clear_unlock_delalloc(inode, start, |
958 | start + ram_size - 1, locked_page, | |
959 | EXTENT_LOCKED | EXTENT_DELALLOC, | |
960 | op); | |
c8b97818 | 961 | disk_num_bytes -= cur_alloc_size; |
c59f8951 CM |
962 | num_bytes -= cur_alloc_size; |
963 | alloc_hint = ins.objectid + ins.offset; | |
964 | start += cur_alloc_size; | |
b888db2b | 965 | } |
79787eaa | 966 | out: |
be20aa9d | 967 | return ret; |
b7d5b0a8 | 968 | |
ace68bac LB |
969 | out_reserve: |
970 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
79787eaa | 971 | out_unlock: |
c2790a2e | 972 | extent_clear_unlock_delalloc(inode, start, end, locked_page, |
151a41bc JB |
973 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | |
974 | EXTENT_DELALLOC | EXTENT_DEFRAG, | |
975 | PAGE_UNLOCK | PAGE_CLEAR_DIRTY | | |
976 | PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); | |
79787eaa | 977 | goto out; |
771ed689 | 978 | } |
c8b97818 | 979 | |
771ed689 CM |
980 | /* |
981 | * work queue call back to started compression on a file and pages | |
982 | */ | |
983 | static noinline void async_cow_start(struct btrfs_work *work) | |
984 | { | |
985 | struct async_cow *async_cow; | |
986 | int num_added = 0; | |
987 | async_cow = container_of(work, struct async_cow, work); | |
988 | ||
989 | compress_file_range(async_cow->inode, async_cow->locked_page, | |
990 | async_cow->start, async_cow->end, async_cow, | |
991 | &num_added); | |
8180ef88 | 992 | if (num_added == 0) { |
cb77fcd8 | 993 | btrfs_add_delayed_iput(async_cow->inode); |
771ed689 | 994 | async_cow->inode = NULL; |
8180ef88 | 995 | } |
771ed689 CM |
996 | } |
997 | ||
998 | /* | |
999 | * work queue call back to submit previously compressed pages | |
1000 | */ | |
1001 | static noinline void async_cow_submit(struct btrfs_work *work) | |
1002 | { | |
1003 | struct async_cow *async_cow; | |
1004 | struct btrfs_root *root; | |
1005 | unsigned long nr_pages; | |
1006 | ||
1007 | async_cow = container_of(work, struct async_cow, work); | |
1008 | ||
1009 | root = async_cow->root; | |
1010 | nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> | |
1011 | PAGE_CACHE_SHIFT; | |
1012 | ||
66657b31 | 1013 | if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < |
287082b0 | 1014 | 5 * 1024 * 1024 && |
771ed689 CM |
1015 | waitqueue_active(&root->fs_info->async_submit_wait)) |
1016 | wake_up(&root->fs_info->async_submit_wait); | |
1017 | ||
d397712b | 1018 | if (async_cow->inode) |
771ed689 | 1019 | submit_compressed_extents(async_cow->inode, async_cow); |
771ed689 | 1020 | } |
c8b97818 | 1021 | |
771ed689 CM |
1022 | static noinline void async_cow_free(struct btrfs_work *work) |
1023 | { | |
1024 | struct async_cow *async_cow; | |
1025 | async_cow = container_of(work, struct async_cow, work); | |
8180ef88 | 1026 | if (async_cow->inode) |
cb77fcd8 | 1027 | btrfs_add_delayed_iput(async_cow->inode); |
771ed689 CM |
1028 | kfree(async_cow); |
1029 | } | |
1030 | ||
1031 | static int cow_file_range_async(struct inode *inode, struct page *locked_page, | |
1032 | u64 start, u64 end, int *page_started, | |
1033 | unsigned long *nr_written) | |
1034 | { | |
1035 | struct async_cow *async_cow; | |
1036 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1037 | unsigned long nr_pages; | |
1038 | u64 cur_end; | |
287082b0 | 1039 | int limit = 10 * 1024 * 1024; |
771ed689 | 1040 | |
a3429ab7 CM |
1041 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, |
1042 | 1, 0, NULL, GFP_NOFS); | |
d397712b | 1043 | while (start < end) { |
771ed689 | 1044 | async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); |
79787eaa | 1045 | BUG_ON(!async_cow); /* -ENOMEM */ |
8180ef88 | 1046 | async_cow->inode = igrab(inode); |
771ed689 CM |
1047 | async_cow->root = root; |
1048 | async_cow->locked_page = locked_page; | |
1049 | async_cow->start = start; | |
1050 | ||
6cbff00f | 1051 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) |
771ed689 CM |
1052 | cur_end = end; |
1053 | else | |
1054 | cur_end = min(end, start + 512 * 1024 - 1); | |
1055 | ||
1056 | async_cow->end = cur_end; | |
1057 | INIT_LIST_HEAD(&async_cow->extents); | |
1058 | ||
1059 | async_cow->work.func = async_cow_start; | |
1060 | async_cow->work.ordered_func = async_cow_submit; | |
1061 | async_cow->work.ordered_free = async_cow_free; | |
1062 | async_cow->work.flags = 0; | |
1063 | ||
771ed689 CM |
1064 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> |
1065 | PAGE_CACHE_SHIFT; | |
1066 | atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); | |
1067 | ||
1068 | btrfs_queue_worker(&root->fs_info->delalloc_workers, | |
1069 | &async_cow->work); | |
1070 | ||
1071 | if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { | |
1072 | wait_event(root->fs_info->async_submit_wait, | |
1073 | (atomic_read(&root->fs_info->async_delalloc_pages) < | |
1074 | limit)); | |
1075 | } | |
1076 | ||
d397712b | 1077 | while (atomic_read(&root->fs_info->async_submit_draining) && |
771ed689 CM |
1078 | atomic_read(&root->fs_info->async_delalloc_pages)) { |
1079 | wait_event(root->fs_info->async_submit_wait, | |
1080 | (atomic_read(&root->fs_info->async_delalloc_pages) == | |
1081 | 0)); | |
1082 | } | |
1083 | ||
1084 | *nr_written += nr_pages; | |
1085 | start = cur_end + 1; | |
1086 | } | |
1087 | *page_started = 1; | |
1088 | return 0; | |
be20aa9d CM |
1089 | } |
1090 | ||
d397712b | 1091 | static noinline int csum_exist_in_range(struct btrfs_root *root, |
17d217fe YZ |
1092 | u64 bytenr, u64 num_bytes) |
1093 | { | |
1094 | int ret; | |
1095 | struct btrfs_ordered_sum *sums; | |
1096 | LIST_HEAD(list); | |
1097 | ||
07d400a6 | 1098 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, |
a2de733c | 1099 | bytenr + num_bytes - 1, &list, 0); |
17d217fe YZ |
1100 | if (ret == 0 && list_empty(&list)) |
1101 | return 0; | |
1102 | ||
1103 | while (!list_empty(&list)) { | |
1104 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | |
1105 | list_del(&sums->list); | |
1106 | kfree(sums); | |
1107 | } | |
1108 | return 1; | |
1109 | } | |
1110 | ||
d352ac68 CM |
1111 | /* |
1112 | * when nowcow writeback call back. This checks for snapshots or COW copies | |
1113 | * of the extents that exist in the file, and COWs the file as required. | |
1114 | * | |
1115 | * If no cow copies or snapshots exist, we write directly to the existing | |
1116 | * blocks on disk | |
1117 | */ | |
7f366cfe CM |
1118 | static noinline int run_delalloc_nocow(struct inode *inode, |
1119 | struct page *locked_page, | |
771ed689 CM |
1120 | u64 start, u64 end, int *page_started, int force, |
1121 | unsigned long *nr_written) | |
be20aa9d | 1122 | { |
be20aa9d | 1123 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7ea394f1 | 1124 | struct btrfs_trans_handle *trans; |
be20aa9d | 1125 | struct extent_buffer *leaf; |
be20aa9d | 1126 | struct btrfs_path *path; |
80ff3856 | 1127 | struct btrfs_file_extent_item *fi; |
be20aa9d | 1128 | struct btrfs_key found_key; |
80ff3856 YZ |
1129 | u64 cow_start; |
1130 | u64 cur_offset; | |
1131 | u64 extent_end; | |
5d4f98a2 | 1132 | u64 extent_offset; |
80ff3856 YZ |
1133 | u64 disk_bytenr; |
1134 | u64 num_bytes; | |
b4939680 | 1135 | u64 disk_num_bytes; |
cc95bef6 | 1136 | u64 ram_bytes; |
80ff3856 | 1137 | int extent_type; |
79787eaa | 1138 | int ret, err; |
d899e052 | 1139 | int type; |
80ff3856 YZ |
1140 | int nocow; |
1141 | int check_prev = 1; | |
82d5902d | 1142 | bool nolock; |
33345d01 | 1143 | u64 ino = btrfs_ino(inode); |
be20aa9d CM |
1144 | |
1145 | path = btrfs_alloc_path(); | |
17ca04af | 1146 | if (!path) { |
c2790a2e JB |
1147 | extent_clear_unlock_delalloc(inode, start, end, locked_page, |
1148 | EXTENT_LOCKED | EXTENT_DELALLOC | | |
151a41bc JB |
1149 | EXTENT_DO_ACCOUNTING | |
1150 | EXTENT_DEFRAG, PAGE_UNLOCK | | |
c2790a2e JB |
1151 | PAGE_CLEAR_DIRTY | |
1152 | PAGE_SET_WRITEBACK | | |
1153 | PAGE_END_WRITEBACK); | |
d8926bb3 | 1154 | return -ENOMEM; |
17ca04af | 1155 | } |
82d5902d | 1156 | |
83eea1f1 | 1157 | nolock = btrfs_is_free_space_inode(inode); |
82d5902d LZ |
1158 | |
1159 | if (nolock) | |
7a7eaa40 | 1160 | trans = btrfs_join_transaction_nolock(root); |
82d5902d | 1161 | else |
7a7eaa40 | 1162 | trans = btrfs_join_transaction(root); |
ff5714cc | 1163 | |
79787eaa | 1164 | if (IS_ERR(trans)) { |
c2790a2e JB |
1165 | extent_clear_unlock_delalloc(inode, start, end, locked_page, |
1166 | EXTENT_LOCKED | EXTENT_DELALLOC | | |
151a41bc JB |
1167 | EXTENT_DO_ACCOUNTING | |
1168 | EXTENT_DEFRAG, PAGE_UNLOCK | | |
c2790a2e JB |
1169 | PAGE_CLEAR_DIRTY | |
1170 | PAGE_SET_WRITEBACK | | |
1171 | PAGE_END_WRITEBACK); | |
79787eaa JM |
1172 | btrfs_free_path(path); |
1173 | return PTR_ERR(trans); | |
1174 | } | |
1175 | ||
74b21075 | 1176 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
be20aa9d | 1177 | |
80ff3856 YZ |
1178 | cow_start = (u64)-1; |
1179 | cur_offset = start; | |
1180 | while (1) { | |
33345d01 | 1181 | ret = btrfs_lookup_file_extent(trans, root, path, ino, |
80ff3856 | 1182 | cur_offset, 0); |
d788a349 | 1183 | if (ret < 0) |
79787eaa | 1184 | goto error; |
80ff3856 YZ |
1185 | if (ret > 0 && path->slots[0] > 0 && check_prev) { |
1186 | leaf = path->nodes[0]; | |
1187 | btrfs_item_key_to_cpu(leaf, &found_key, | |
1188 | path->slots[0] - 1); | |
33345d01 | 1189 | if (found_key.objectid == ino && |
80ff3856 YZ |
1190 | found_key.type == BTRFS_EXTENT_DATA_KEY) |
1191 | path->slots[0]--; | |
1192 | } | |
1193 | check_prev = 0; | |
1194 | next_slot: | |
1195 | leaf = path->nodes[0]; | |
1196 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
1197 | ret = btrfs_next_leaf(root, path); | |
d788a349 | 1198 | if (ret < 0) |
79787eaa | 1199 | goto error; |
80ff3856 YZ |
1200 | if (ret > 0) |
1201 | break; | |
1202 | leaf = path->nodes[0]; | |
1203 | } | |
be20aa9d | 1204 | |
80ff3856 YZ |
1205 | nocow = 0; |
1206 | disk_bytenr = 0; | |
17d217fe | 1207 | num_bytes = 0; |
80ff3856 YZ |
1208 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
1209 | ||
33345d01 | 1210 | if (found_key.objectid > ino || |
80ff3856 YZ |
1211 | found_key.type > BTRFS_EXTENT_DATA_KEY || |
1212 | found_key.offset > end) | |
1213 | break; | |
1214 | ||
1215 | if (found_key.offset > cur_offset) { | |
1216 | extent_end = found_key.offset; | |
e9061e21 | 1217 | extent_type = 0; |
80ff3856 YZ |
1218 | goto out_check; |
1219 | } | |
1220 | ||
1221 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1222 | struct btrfs_file_extent_item); | |
1223 | extent_type = btrfs_file_extent_type(leaf, fi); | |
1224 | ||
cc95bef6 | 1225 | ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); |
d899e052 YZ |
1226 | if (extent_type == BTRFS_FILE_EXTENT_REG || |
1227 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
80ff3856 | 1228 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
5d4f98a2 | 1229 | extent_offset = btrfs_file_extent_offset(leaf, fi); |
80ff3856 YZ |
1230 | extent_end = found_key.offset + |
1231 | btrfs_file_extent_num_bytes(leaf, fi); | |
b4939680 JB |
1232 | disk_num_bytes = |
1233 | btrfs_file_extent_disk_num_bytes(leaf, fi); | |
80ff3856 YZ |
1234 | if (extent_end <= start) { |
1235 | path->slots[0]++; | |
1236 | goto next_slot; | |
1237 | } | |
17d217fe YZ |
1238 | if (disk_bytenr == 0) |
1239 | goto out_check; | |
80ff3856 YZ |
1240 | if (btrfs_file_extent_compression(leaf, fi) || |
1241 | btrfs_file_extent_encryption(leaf, fi) || | |
1242 | btrfs_file_extent_other_encoding(leaf, fi)) | |
1243 | goto out_check; | |
d899e052 YZ |
1244 | if (extent_type == BTRFS_FILE_EXTENT_REG && !force) |
1245 | goto out_check; | |
d2fb3437 | 1246 | if (btrfs_extent_readonly(root, disk_bytenr)) |
80ff3856 | 1247 | goto out_check; |
33345d01 | 1248 | if (btrfs_cross_ref_exist(trans, root, ino, |
5d4f98a2 YZ |
1249 | found_key.offset - |
1250 | extent_offset, disk_bytenr)) | |
17d217fe | 1251 | goto out_check; |
5d4f98a2 | 1252 | disk_bytenr += extent_offset; |
17d217fe YZ |
1253 | disk_bytenr += cur_offset - found_key.offset; |
1254 | num_bytes = min(end + 1, extent_end) - cur_offset; | |
1255 | /* | |
1256 | * force cow if csum exists in the range. | |
1257 | * this ensure that csum for a given extent are | |
1258 | * either valid or do not exist. | |
1259 | */ | |
1260 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) | |
1261 | goto out_check; | |
80ff3856 YZ |
1262 | nocow = 1; |
1263 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
1264 | extent_end = found_key.offset + | |
1265 | btrfs_file_extent_inline_len(leaf, fi); | |
1266 | extent_end = ALIGN(extent_end, root->sectorsize); | |
1267 | } else { | |
1268 | BUG_ON(1); | |
1269 | } | |
1270 | out_check: | |
1271 | if (extent_end <= start) { | |
1272 | path->slots[0]++; | |
1273 | goto next_slot; | |
1274 | } | |
1275 | if (!nocow) { | |
1276 | if (cow_start == (u64)-1) | |
1277 | cow_start = cur_offset; | |
1278 | cur_offset = extent_end; | |
1279 | if (cur_offset > end) | |
1280 | break; | |
1281 | path->slots[0]++; | |
1282 | goto next_slot; | |
7ea394f1 YZ |
1283 | } |
1284 | ||
b3b4aa74 | 1285 | btrfs_release_path(path); |
80ff3856 | 1286 | if (cow_start != (u64)-1) { |
00361589 JB |
1287 | ret = cow_file_range(inode, locked_page, |
1288 | cow_start, found_key.offset - 1, | |
1289 | page_started, nr_written, 1); | |
d788a349 | 1290 | if (ret) |
79787eaa | 1291 | goto error; |
80ff3856 | 1292 | cow_start = (u64)-1; |
7ea394f1 | 1293 | } |
80ff3856 | 1294 | |
d899e052 YZ |
1295 | if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { |
1296 | struct extent_map *em; | |
1297 | struct extent_map_tree *em_tree; | |
1298 | em_tree = &BTRFS_I(inode)->extent_tree; | |
172ddd60 | 1299 | em = alloc_extent_map(); |
79787eaa | 1300 | BUG_ON(!em); /* -ENOMEM */ |
d899e052 | 1301 | em->start = cur_offset; |
70c8a91c | 1302 | em->orig_start = found_key.offset - extent_offset; |
d899e052 YZ |
1303 | em->len = num_bytes; |
1304 | em->block_len = num_bytes; | |
1305 | em->block_start = disk_bytenr; | |
b4939680 | 1306 | em->orig_block_len = disk_num_bytes; |
cc95bef6 | 1307 | em->ram_bytes = ram_bytes; |
d899e052 | 1308 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
2ab28f32 JB |
1309 | em->mod_start = em->start; |
1310 | em->mod_len = em->len; | |
d899e052 | 1311 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
b11e234d | 1312 | set_bit(EXTENT_FLAG_FILLING, &em->flags); |
70c8a91c | 1313 | em->generation = -1; |
d899e052 | 1314 | while (1) { |
890871be | 1315 | write_lock(&em_tree->lock); |
09a2a8f9 | 1316 | ret = add_extent_mapping(em_tree, em, 1); |
890871be | 1317 | write_unlock(&em_tree->lock); |
d899e052 YZ |
1318 | if (ret != -EEXIST) { |
1319 | free_extent_map(em); | |
1320 | break; | |
1321 | } | |
1322 | btrfs_drop_extent_cache(inode, em->start, | |
1323 | em->start + em->len - 1, 0); | |
1324 | } | |
1325 | type = BTRFS_ORDERED_PREALLOC; | |
1326 | } else { | |
1327 | type = BTRFS_ORDERED_NOCOW; | |
1328 | } | |
80ff3856 YZ |
1329 | |
1330 | ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, | |
d899e052 | 1331 | num_bytes, num_bytes, type); |
79787eaa | 1332 | BUG_ON(ret); /* -ENOMEM */ |
771ed689 | 1333 | |
efa56464 YZ |
1334 | if (root->root_key.objectid == |
1335 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | |
1336 | ret = btrfs_reloc_clone_csums(inode, cur_offset, | |
1337 | num_bytes); | |
d788a349 | 1338 | if (ret) |
79787eaa | 1339 | goto error; |
efa56464 YZ |
1340 | } |
1341 | ||
c2790a2e JB |
1342 | extent_clear_unlock_delalloc(inode, cur_offset, |
1343 | cur_offset + num_bytes - 1, | |
1344 | locked_page, EXTENT_LOCKED | | |
1345 | EXTENT_DELALLOC, PAGE_UNLOCK | | |
1346 | PAGE_SET_PRIVATE2); | |
80ff3856 YZ |
1347 | cur_offset = extent_end; |
1348 | if (cur_offset > end) | |
1349 | break; | |
be20aa9d | 1350 | } |
b3b4aa74 | 1351 | btrfs_release_path(path); |
80ff3856 | 1352 | |
17ca04af | 1353 | if (cur_offset <= end && cow_start == (u64)-1) { |
80ff3856 | 1354 | cow_start = cur_offset; |
17ca04af JB |
1355 | cur_offset = end; |
1356 | } | |
1357 | ||
80ff3856 | 1358 | if (cow_start != (u64)-1) { |
00361589 JB |
1359 | ret = cow_file_range(inode, locked_page, cow_start, end, |
1360 | page_started, nr_written, 1); | |
d788a349 | 1361 | if (ret) |
79787eaa | 1362 | goto error; |
80ff3856 YZ |
1363 | } |
1364 | ||
79787eaa | 1365 | error: |
a698d075 | 1366 | err = btrfs_end_transaction(trans, root); |
79787eaa JM |
1367 | if (!ret) |
1368 | ret = err; | |
1369 | ||
17ca04af | 1370 | if (ret && cur_offset < end) |
c2790a2e JB |
1371 | extent_clear_unlock_delalloc(inode, cur_offset, end, |
1372 | locked_page, EXTENT_LOCKED | | |
151a41bc JB |
1373 | EXTENT_DELALLOC | EXTENT_DEFRAG | |
1374 | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | | |
1375 | PAGE_CLEAR_DIRTY | | |
c2790a2e JB |
1376 | PAGE_SET_WRITEBACK | |
1377 | PAGE_END_WRITEBACK); | |
7ea394f1 | 1378 | btrfs_free_path(path); |
79787eaa | 1379 | return ret; |
be20aa9d CM |
1380 | } |
1381 | ||
d352ac68 CM |
1382 | /* |
1383 | * extent_io.c call back to do delayed allocation processing | |
1384 | */ | |
c8b97818 | 1385 | static int run_delalloc_range(struct inode *inode, struct page *locked_page, |
771ed689 CM |
1386 | u64 start, u64 end, int *page_started, |
1387 | unsigned long *nr_written) | |
be20aa9d | 1388 | { |
be20aa9d | 1389 | int ret; |
7f366cfe | 1390 | struct btrfs_root *root = BTRFS_I(inode)->root; |
a2135011 | 1391 | |
7ddf5a42 | 1392 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { |
c8b97818 | 1393 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1394 | page_started, 1, nr_written); |
7ddf5a42 | 1395 | } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { |
d899e052 | 1396 | ret = run_delalloc_nocow(inode, locked_page, start, end, |
d397712b | 1397 | page_started, 0, nr_written); |
7ddf5a42 JB |
1398 | } else if (!btrfs_test_opt(root, COMPRESS) && |
1399 | !(BTRFS_I(inode)->force_compress) && | |
1400 | !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { | |
7f366cfe CM |
1401 | ret = cow_file_range(inode, locked_page, start, end, |
1402 | page_started, nr_written, 1); | |
7ddf5a42 JB |
1403 | } else { |
1404 | set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
1405 | &BTRFS_I(inode)->runtime_flags); | |
771ed689 | 1406 | ret = cow_file_range_async(inode, locked_page, start, end, |
d397712b | 1407 | page_started, nr_written); |
7ddf5a42 | 1408 | } |
b888db2b CM |
1409 | return ret; |
1410 | } | |
1411 | ||
1bf85046 JM |
1412 | static void btrfs_split_extent_hook(struct inode *inode, |
1413 | struct extent_state *orig, u64 split) | |
9ed74f2d | 1414 | { |
0ca1f7ce | 1415 | /* not delalloc, ignore it */ |
9ed74f2d | 1416 | if (!(orig->state & EXTENT_DELALLOC)) |
1bf85046 | 1417 | return; |
9ed74f2d | 1418 | |
9e0baf60 JB |
1419 | spin_lock(&BTRFS_I(inode)->lock); |
1420 | BTRFS_I(inode)->outstanding_extents++; | |
1421 | spin_unlock(&BTRFS_I(inode)->lock); | |
9ed74f2d JB |
1422 | } |
1423 | ||
1424 | /* | |
1425 | * extent_io.c merge_extent_hook, used to track merged delayed allocation | |
1426 | * extents so we can keep track of new extents that are just merged onto old | |
1427 | * extents, such as when we are doing sequential writes, so we can properly | |
1428 | * account for the metadata space we'll need. | |
1429 | */ | |
1bf85046 JM |
1430 | static void btrfs_merge_extent_hook(struct inode *inode, |
1431 | struct extent_state *new, | |
1432 | struct extent_state *other) | |
9ed74f2d | 1433 | { |
9ed74f2d JB |
1434 | /* not delalloc, ignore it */ |
1435 | if (!(other->state & EXTENT_DELALLOC)) | |
1bf85046 | 1436 | return; |
9ed74f2d | 1437 | |
9e0baf60 JB |
1438 | spin_lock(&BTRFS_I(inode)->lock); |
1439 | BTRFS_I(inode)->outstanding_extents--; | |
1440 | spin_unlock(&BTRFS_I(inode)->lock); | |
9ed74f2d JB |
1441 | } |
1442 | ||
eb73c1b7 MX |
1443 | static void btrfs_add_delalloc_inodes(struct btrfs_root *root, |
1444 | struct inode *inode) | |
1445 | { | |
1446 | spin_lock(&root->delalloc_lock); | |
1447 | if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1448 | list_add_tail(&BTRFS_I(inode)->delalloc_inodes, | |
1449 | &root->delalloc_inodes); | |
1450 | set_bit(BTRFS_INODE_IN_DELALLOC_LIST, | |
1451 | &BTRFS_I(inode)->runtime_flags); | |
1452 | root->nr_delalloc_inodes++; | |
1453 | if (root->nr_delalloc_inodes == 1) { | |
1454 | spin_lock(&root->fs_info->delalloc_root_lock); | |
1455 | BUG_ON(!list_empty(&root->delalloc_root)); | |
1456 | list_add_tail(&root->delalloc_root, | |
1457 | &root->fs_info->delalloc_roots); | |
1458 | spin_unlock(&root->fs_info->delalloc_root_lock); | |
1459 | } | |
1460 | } | |
1461 | spin_unlock(&root->delalloc_lock); | |
1462 | } | |
1463 | ||
1464 | static void btrfs_del_delalloc_inode(struct btrfs_root *root, | |
1465 | struct inode *inode) | |
1466 | { | |
1467 | spin_lock(&root->delalloc_lock); | |
1468 | if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { | |
1469 | list_del_init(&BTRFS_I(inode)->delalloc_inodes); | |
1470 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, | |
1471 | &BTRFS_I(inode)->runtime_flags); | |
1472 | root->nr_delalloc_inodes--; | |
1473 | if (!root->nr_delalloc_inodes) { | |
1474 | spin_lock(&root->fs_info->delalloc_root_lock); | |
1475 | BUG_ON(list_empty(&root->delalloc_root)); | |
1476 | list_del_init(&root->delalloc_root); | |
1477 | spin_unlock(&root->fs_info->delalloc_root_lock); | |
1478 | } | |
1479 | } | |
1480 | spin_unlock(&root->delalloc_lock); | |
1481 | } | |
1482 | ||
d352ac68 CM |
1483 | /* |
1484 | * extent_io.c set_bit_hook, used to track delayed allocation | |
1485 | * bytes in this file, and to maintain the list of inodes that | |
1486 | * have pending delalloc work to be done. | |
1487 | */ | |
1bf85046 | 1488 | static void btrfs_set_bit_hook(struct inode *inode, |
41074888 | 1489 | struct extent_state *state, unsigned long *bits) |
291d673e | 1490 | { |
9ed74f2d | 1491 | |
75eff68e CM |
1492 | /* |
1493 | * set_bit and clear bit hooks normally require _irqsave/restore | |
27160b6b | 1494 | * but in this case, we are only testing for the DELALLOC |
75eff68e CM |
1495 | * bit, which is only set or cleared with irqs on |
1496 | */ | |
0ca1f7ce | 1497 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
291d673e | 1498 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1499 | u64 len = state->end + 1 - state->start; |
83eea1f1 | 1500 | bool do_list = !btrfs_is_free_space_inode(inode); |
9ed74f2d | 1501 | |
9e0baf60 | 1502 | if (*bits & EXTENT_FIRST_DELALLOC) { |
0ca1f7ce | 1503 | *bits &= ~EXTENT_FIRST_DELALLOC; |
9e0baf60 JB |
1504 | } else { |
1505 | spin_lock(&BTRFS_I(inode)->lock); | |
1506 | BTRFS_I(inode)->outstanding_extents++; | |
1507 | spin_unlock(&BTRFS_I(inode)->lock); | |
1508 | } | |
287a0ab9 | 1509 | |
963d678b MX |
1510 | __percpu_counter_add(&root->fs_info->delalloc_bytes, len, |
1511 | root->fs_info->delalloc_batch); | |
df0af1a5 | 1512 | spin_lock(&BTRFS_I(inode)->lock); |
0ca1f7ce | 1513 | BTRFS_I(inode)->delalloc_bytes += len; |
df0af1a5 | 1514 | if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
eb73c1b7 MX |
1515 | &BTRFS_I(inode)->runtime_flags)) |
1516 | btrfs_add_delalloc_inodes(root, inode); | |
df0af1a5 | 1517 | spin_unlock(&BTRFS_I(inode)->lock); |
291d673e | 1518 | } |
291d673e CM |
1519 | } |
1520 | ||
d352ac68 CM |
1521 | /* |
1522 | * extent_io.c clear_bit_hook, see set_bit_hook for why | |
1523 | */ | |
1bf85046 | 1524 | static void btrfs_clear_bit_hook(struct inode *inode, |
41074888 DS |
1525 | struct extent_state *state, |
1526 | unsigned long *bits) | |
291d673e | 1527 | { |
75eff68e CM |
1528 | /* |
1529 | * set_bit and clear bit hooks normally require _irqsave/restore | |
27160b6b | 1530 | * but in this case, we are only testing for the DELALLOC |
75eff68e CM |
1531 | * bit, which is only set or cleared with irqs on |
1532 | */ | |
0ca1f7ce | 1533 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
291d673e | 1534 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 1535 | u64 len = state->end + 1 - state->start; |
83eea1f1 | 1536 | bool do_list = !btrfs_is_free_space_inode(inode); |
bcbfce8a | 1537 | |
9e0baf60 | 1538 | if (*bits & EXTENT_FIRST_DELALLOC) { |
0ca1f7ce | 1539 | *bits &= ~EXTENT_FIRST_DELALLOC; |
9e0baf60 JB |
1540 | } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { |
1541 | spin_lock(&BTRFS_I(inode)->lock); | |
1542 | BTRFS_I(inode)->outstanding_extents--; | |
1543 | spin_unlock(&BTRFS_I(inode)->lock); | |
1544 | } | |
0ca1f7ce | 1545 | |
b6d08f06 JB |
1546 | /* |
1547 | * We don't reserve metadata space for space cache inodes so we | |
1548 | * don't need to call dellalloc_release_metadata if there is an | |
1549 | * error. | |
1550 | */ | |
1551 | if (*bits & EXTENT_DO_ACCOUNTING && | |
1552 | root != root->fs_info->tree_root) | |
0ca1f7ce YZ |
1553 | btrfs_delalloc_release_metadata(inode, len); |
1554 | ||
0cb59c99 | 1555 | if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID |
7ee9e440 | 1556 | && do_list && !(state->state & EXTENT_NORESERVE)) |
0ca1f7ce | 1557 | btrfs_free_reserved_data_space(inode, len); |
9ed74f2d | 1558 | |
963d678b MX |
1559 | __percpu_counter_add(&root->fs_info->delalloc_bytes, -len, |
1560 | root->fs_info->delalloc_batch); | |
df0af1a5 | 1561 | spin_lock(&BTRFS_I(inode)->lock); |
0ca1f7ce | 1562 | BTRFS_I(inode)->delalloc_bytes -= len; |
0cb59c99 | 1563 | if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && |
df0af1a5 | 1564 | test_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
eb73c1b7 MX |
1565 | &BTRFS_I(inode)->runtime_flags)) |
1566 | btrfs_del_delalloc_inode(root, inode); | |
df0af1a5 | 1567 | spin_unlock(&BTRFS_I(inode)->lock); |
291d673e | 1568 | } |
291d673e CM |
1569 | } |
1570 | ||
d352ac68 CM |
1571 | /* |
1572 | * extent_io.c merge_bio_hook, this must check the chunk tree to make sure | |
1573 | * we don't create bios that span stripes or chunks | |
1574 | */ | |
64a16701 | 1575 | int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, |
c8b97818 CM |
1576 | size_t size, struct bio *bio, |
1577 | unsigned long bio_flags) | |
239b14b3 CM |
1578 | { |
1579 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | |
a62b9401 | 1580 | u64 logical = (u64)bio->bi_sector << 9; |
239b14b3 CM |
1581 | u64 length = 0; |
1582 | u64 map_length; | |
239b14b3 CM |
1583 | int ret; |
1584 | ||
771ed689 CM |
1585 | if (bio_flags & EXTENT_BIO_COMPRESSED) |
1586 | return 0; | |
1587 | ||
f2d8d74d | 1588 | length = bio->bi_size; |
239b14b3 | 1589 | map_length = length; |
64a16701 | 1590 | ret = btrfs_map_block(root->fs_info, rw, logical, |
f188591e | 1591 | &map_length, NULL, 0); |
3ec706c8 | 1592 | /* Will always return 0 with map_multi == NULL */ |
3444a972 | 1593 | BUG_ON(ret < 0); |
d397712b | 1594 | if (map_length < length + size) |
239b14b3 | 1595 | return 1; |
3444a972 | 1596 | return 0; |
239b14b3 CM |
1597 | } |
1598 | ||
d352ac68 CM |
1599 | /* |
1600 | * in order to insert checksums into the metadata in large chunks, | |
1601 | * we wait until bio submission time. All the pages in the bio are | |
1602 | * checksummed and sums are attached onto the ordered extent record. | |
1603 | * | |
1604 | * At IO completion time the cums attached on the ordered extent record | |
1605 | * are inserted into the btree | |
1606 | */ | |
d397712b CM |
1607 | static int __btrfs_submit_bio_start(struct inode *inode, int rw, |
1608 | struct bio *bio, int mirror_num, | |
eaf25d93 CM |
1609 | unsigned long bio_flags, |
1610 | u64 bio_offset) | |
065631f6 | 1611 | { |
065631f6 | 1612 | struct btrfs_root *root = BTRFS_I(inode)->root; |
065631f6 | 1613 | int ret = 0; |
e015640f | 1614 | |
d20f7043 | 1615 | ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); |
79787eaa | 1616 | BUG_ON(ret); /* -ENOMEM */ |
4a69a410 CM |
1617 | return 0; |
1618 | } | |
e015640f | 1619 | |
4a69a410 CM |
1620 | /* |
1621 | * in order to insert checksums into the metadata in large chunks, | |
1622 | * we wait until bio submission time. All the pages in the bio are | |
1623 | * checksummed and sums are attached onto the ordered extent record. | |
1624 | * | |
1625 | * At IO completion time the cums attached on the ordered extent record | |
1626 | * are inserted into the btree | |
1627 | */ | |
b2950863 | 1628 | static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, |
eaf25d93 CM |
1629 | int mirror_num, unsigned long bio_flags, |
1630 | u64 bio_offset) | |
4a69a410 CM |
1631 | { |
1632 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
61891923 SB |
1633 | int ret; |
1634 | ||
1635 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); | |
1636 | if (ret) | |
1637 | bio_endio(bio, ret); | |
1638 | return ret; | |
44b8bd7e CM |
1639 | } |
1640 | ||
d352ac68 | 1641 | /* |
cad321ad CM |
1642 | * extent_io.c submission hook. This does the right thing for csum calculation |
1643 | * on write, or reading the csums from the tree before a read | |
d352ac68 | 1644 | */ |
b2950863 | 1645 | static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, |
eaf25d93 CM |
1646 | int mirror_num, unsigned long bio_flags, |
1647 | u64 bio_offset) | |
44b8bd7e CM |
1648 | { |
1649 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1650 | int ret = 0; | |
19b9bdb0 | 1651 | int skip_sum; |
0417341e | 1652 | int metadata = 0; |
b812ce28 | 1653 | int async = !atomic_read(&BTRFS_I(inode)->sync_writers); |
44b8bd7e | 1654 | |
6cbff00f | 1655 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
cad321ad | 1656 | |
83eea1f1 | 1657 | if (btrfs_is_free_space_inode(inode)) |
0417341e JM |
1658 | metadata = 2; |
1659 | ||
7b6d91da | 1660 | if (!(rw & REQ_WRITE)) { |
5fd02043 JB |
1661 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); |
1662 | if (ret) | |
61891923 | 1663 | goto out; |
5fd02043 | 1664 | |
d20f7043 | 1665 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
61891923 SB |
1666 | ret = btrfs_submit_compressed_read(inode, bio, |
1667 | mirror_num, | |
1668 | bio_flags); | |
1669 | goto out; | |
c2db1073 TI |
1670 | } else if (!skip_sum) { |
1671 | ret = btrfs_lookup_bio_sums(root, inode, bio, NULL); | |
1672 | if (ret) | |
61891923 | 1673 | goto out; |
c2db1073 | 1674 | } |
4d1b5fb4 | 1675 | goto mapit; |
b812ce28 | 1676 | } else if (async && !skip_sum) { |
17d217fe YZ |
1677 | /* csum items have already been cloned */ |
1678 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | |
1679 | goto mapit; | |
19b9bdb0 | 1680 | /* we're doing a write, do the async checksumming */ |
61891923 | 1681 | ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, |
44b8bd7e | 1682 | inode, rw, bio, mirror_num, |
eaf25d93 CM |
1683 | bio_flags, bio_offset, |
1684 | __btrfs_submit_bio_start, | |
4a69a410 | 1685 | __btrfs_submit_bio_done); |
61891923 | 1686 | goto out; |
b812ce28 JB |
1687 | } else if (!skip_sum) { |
1688 | ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); | |
1689 | if (ret) | |
1690 | goto out; | |
19b9bdb0 CM |
1691 | } |
1692 | ||
0b86a832 | 1693 | mapit: |
61891923 SB |
1694 | ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); |
1695 | ||
1696 | out: | |
1697 | if (ret < 0) | |
1698 | bio_endio(bio, ret); | |
1699 | return ret; | |
065631f6 | 1700 | } |
6885f308 | 1701 | |
d352ac68 CM |
1702 | /* |
1703 | * given a list of ordered sums record them in the inode. This happens | |
1704 | * at IO completion time based on sums calculated at bio submission time. | |
1705 | */ | |
ba1da2f4 | 1706 | static noinline int add_pending_csums(struct btrfs_trans_handle *trans, |
e6dcd2dc CM |
1707 | struct inode *inode, u64 file_offset, |
1708 | struct list_head *list) | |
1709 | { | |
e6dcd2dc CM |
1710 | struct btrfs_ordered_sum *sum; |
1711 | ||
c6e30871 | 1712 | list_for_each_entry(sum, list, list) { |
39847c4d | 1713 | trans->adding_csums = 1; |
d20f7043 CM |
1714 | btrfs_csum_file_blocks(trans, |
1715 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | |
39847c4d | 1716 | trans->adding_csums = 0; |
e6dcd2dc CM |
1717 | } |
1718 | return 0; | |
1719 | } | |
1720 | ||
2ac55d41 JB |
1721 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
1722 | struct extent_state **cached_state) | |
ea8c2819 | 1723 | { |
6c1500f2 | 1724 | WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); |
ea8c2819 | 1725 | return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, |
2ac55d41 | 1726 | cached_state, GFP_NOFS); |
ea8c2819 CM |
1727 | } |
1728 | ||
d352ac68 | 1729 | /* see btrfs_writepage_start_hook for details on why this is required */ |
247e743c CM |
1730 | struct btrfs_writepage_fixup { |
1731 | struct page *page; | |
1732 | struct btrfs_work work; | |
1733 | }; | |
1734 | ||
b2950863 | 1735 | static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
247e743c CM |
1736 | { |
1737 | struct btrfs_writepage_fixup *fixup; | |
1738 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 1739 | struct extent_state *cached_state = NULL; |
247e743c CM |
1740 | struct page *page; |
1741 | struct inode *inode; | |
1742 | u64 page_start; | |
1743 | u64 page_end; | |
87826df0 | 1744 | int ret; |
247e743c CM |
1745 | |
1746 | fixup = container_of(work, struct btrfs_writepage_fixup, work); | |
1747 | page = fixup->page; | |
4a096752 | 1748 | again: |
247e743c CM |
1749 | lock_page(page); |
1750 | if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { | |
1751 | ClearPageChecked(page); | |
1752 | goto out_page; | |
1753 | } | |
1754 | ||
1755 | inode = page->mapping->host; | |
1756 | page_start = page_offset(page); | |
1757 | page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; | |
1758 | ||
2ac55d41 | 1759 | lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, |
d0082371 | 1760 | &cached_state); |
4a096752 CM |
1761 | |
1762 | /* already ordered? We're done */ | |
8b62b72b | 1763 | if (PagePrivate2(page)) |
247e743c | 1764 | goto out; |
4a096752 CM |
1765 | |
1766 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
1767 | if (ordered) { | |
2ac55d41 JB |
1768 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, |
1769 | page_end, &cached_state, GFP_NOFS); | |
4a096752 CM |
1770 | unlock_page(page); |
1771 | btrfs_start_ordered_extent(inode, ordered, 1); | |
87826df0 | 1772 | btrfs_put_ordered_extent(ordered); |
4a096752 CM |
1773 | goto again; |
1774 | } | |
247e743c | 1775 | |
87826df0 JM |
1776 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
1777 | if (ret) { | |
1778 | mapping_set_error(page->mapping, ret); | |
1779 | end_extent_writepage(page, ret, page_start, page_end); | |
1780 | ClearPageChecked(page); | |
1781 | goto out; | |
1782 | } | |
1783 | ||
2ac55d41 | 1784 | btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); |
247e743c | 1785 | ClearPageChecked(page); |
87826df0 | 1786 | set_page_dirty(page); |
247e743c | 1787 | out: |
2ac55d41 JB |
1788 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, |
1789 | &cached_state, GFP_NOFS); | |
247e743c CM |
1790 | out_page: |
1791 | unlock_page(page); | |
1792 | page_cache_release(page); | |
b897abec | 1793 | kfree(fixup); |
247e743c CM |
1794 | } |
1795 | ||
1796 | /* | |
1797 | * There are a few paths in the higher layers of the kernel that directly | |
1798 | * set the page dirty bit without asking the filesystem if it is a | |
1799 | * good idea. This causes problems because we want to make sure COW | |
1800 | * properly happens and the data=ordered rules are followed. | |
1801 | * | |
c8b97818 | 1802 | * In our case any range that doesn't have the ORDERED bit set |
247e743c CM |
1803 | * hasn't been properly setup for IO. We kick off an async process |
1804 | * to fix it up. The async helper will wait for ordered extents, set | |
1805 | * the delalloc bit and make it safe to write the page. | |
1806 | */ | |
b2950863 | 1807 | static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
247e743c CM |
1808 | { |
1809 | struct inode *inode = page->mapping->host; | |
1810 | struct btrfs_writepage_fixup *fixup; | |
1811 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
247e743c | 1812 | |
8b62b72b CM |
1813 | /* this page is properly in the ordered list */ |
1814 | if (TestClearPagePrivate2(page)) | |
247e743c CM |
1815 | return 0; |
1816 | ||
1817 | if (PageChecked(page)) | |
1818 | return -EAGAIN; | |
1819 | ||
1820 | fixup = kzalloc(sizeof(*fixup), GFP_NOFS); | |
1821 | if (!fixup) | |
1822 | return -EAGAIN; | |
f421950f | 1823 | |
247e743c CM |
1824 | SetPageChecked(page); |
1825 | page_cache_get(page); | |
1826 | fixup->work.func = btrfs_writepage_fixup_worker; | |
1827 | fixup->page = page; | |
1828 | btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); | |
87826df0 | 1829 | return -EBUSY; |
247e743c CM |
1830 | } |
1831 | ||
d899e052 YZ |
1832 | static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, |
1833 | struct inode *inode, u64 file_pos, | |
1834 | u64 disk_bytenr, u64 disk_num_bytes, | |
1835 | u64 num_bytes, u64 ram_bytes, | |
1836 | u8 compression, u8 encryption, | |
1837 | u16 other_encoding, int extent_type) | |
1838 | { | |
1839 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1840 | struct btrfs_file_extent_item *fi; | |
1841 | struct btrfs_path *path; | |
1842 | struct extent_buffer *leaf; | |
1843 | struct btrfs_key ins; | |
d899e052 YZ |
1844 | int ret; |
1845 | ||
1846 | path = btrfs_alloc_path(); | |
d8926bb3 MF |
1847 | if (!path) |
1848 | return -ENOMEM; | |
d899e052 | 1849 | |
b9473439 | 1850 | path->leave_spinning = 1; |
a1ed835e CM |
1851 | |
1852 | /* | |
1853 | * we may be replacing one extent in the tree with another. | |
1854 | * The new extent is pinned in the extent map, and we don't want | |
1855 | * to drop it from the cache until it is completely in the btree. | |
1856 | * | |
1857 | * So, tell btrfs_drop_extents to leave this extent in the cache. | |
1858 | * the caller is expected to unpin it and allow it to be merged | |
1859 | * with the others. | |
1860 | */ | |
5dc562c5 | 1861 | ret = btrfs_drop_extents(trans, root, inode, file_pos, |
2671485d | 1862 | file_pos + num_bytes, 0); |
79787eaa JM |
1863 | if (ret) |
1864 | goto out; | |
d899e052 | 1865 | |
33345d01 | 1866 | ins.objectid = btrfs_ino(inode); |
d899e052 YZ |
1867 | ins.offset = file_pos; |
1868 | ins.type = BTRFS_EXTENT_DATA_KEY; | |
1869 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); | |
79787eaa JM |
1870 | if (ret) |
1871 | goto out; | |
d899e052 YZ |
1872 | leaf = path->nodes[0]; |
1873 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
1874 | struct btrfs_file_extent_item); | |
1875 | btrfs_set_file_extent_generation(leaf, fi, trans->transid); | |
1876 | btrfs_set_file_extent_type(leaf, fi, extent_type); | |
1877 | btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); | |
1878 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); | |
1879 | btrfs_set_file_extent_offset(leaf, fi, 0); | |
1880 | btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); | |
1881 | btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); | |
1882 | btrfs_set_file_extent_compression(leaf, fi, compression); | |
1883 | btrfs_set_file_extent_encryption(leaf, fi, encryption); | |
1884 | btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); | |
b9473439 | 1885 | |
d899e052 | 1886 | btrfs_mark_buffer_dirty(leaf); |
ce195332 | 1887 | btrfs_release_path(path); |
d899e052 YZ |
1888 | |
1889 | inode_add_bytes(inode, num_bytes); | |
d899e052 YZ |
1890 | |
1891 | ins.objectid = disk_bytenr; | |
1892 | ins.offset = disk_num_bytes; | |
1893 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
5d4f98a2 YZ |
1894 | ret = btrfs_alloc_reserved_file_extent(trans, root, |
1895 | root->root_key.objectid, | |
33345d01 | 1896 | btrfs_ino(inode), file_pos, &ins); |
79787eaa | 1897 | out: |
d899e052 | 1898 | btrfs_free_path(path); |
b9473439 | 1899 | |
79787eaa | 1900 | return ret; |
d899e052 YZ |
1901 | } |
1902 | ||
38c227d8 LB |
1903 | /* snapshot-aware defrag */ |
1904 | struct sa_defrag_extent_backref { | |
1905 | struct rb_node node; | |
1906 | struct old_sa_defrag_extent *old; | |
1907 | u64 root_id; | |
1908 | u64 inum; | |
1909 | u64 file_pos; | |
1910 | u64 extent_offset; | |
1911 | u64 num_bytes; | |
1912 | u64 generation; | |
1913 | }; | |
1914 | ||
1915 | struct old_sa_defrag_extent { | |
1916 | struct list_head list; | |
1917 | struct new_sa_defrag_extent *new; | |
1918 | ||
1919 | u64 extent_offset; | |
1920 | u64 bytenr; | |
1921 | u64 offset; | |
1922 | u64 len; | |
1923 | int count; | |
1924 | }; | |
1925 | ||
1926 | struct new_sa_defrag_extent { | |
1927 | struct rb_root root; | |
1928 | struct list_head head; | |
1929 | struct btrfs_path *path; | |
1930 | struct inode *inode; | |
1931 | u64 file_pos; | |
1932 | u64 len; | |
1933 | u64 bytenr; | |
1934 | u64 disk_len; | |
1935 | u8 compress_type; | |
1936 | }; | |
1937 | ||
1938 | static int backref_comp(struct sa_defrag_extent_backref *b1, | |
1939 | struct sa_defrag_extent_backref *b2) | |
1940 | { | |
1941 | if (b1->root_id < b2->root_id) | |
1942 | return -1; | |
1943 | else if (b1->root_id > b2->root_id) | |
1944 | return 1; | |
1945 | ||
1946 | if (b1->inum < b2->inum) | |
1947 | return -1; | |
1948 | else if (b1->inum > b2->inum) | |
1949 | return 1; | |
1950 | ||
1951 | if (b1->file_pos < b2->file_pos) | |
1952 | return -1; | |
1953 | else if (b1->file_pos > b2->file_pos) | |
1954 | return 1; | |
1955 | ||
1956 | /* | |
1957 | * [------------------------------] ===> (a range of space) | |
1958 | * |<--->| |<---->| =============> (fs/file tree A) | |
1959 | * |<---------------------------->| ===> (fs/file tree B) | |
1960 | * | |
1961 | * A range of space can refer to two file extents in one tree while | |
1962 | * refer to only one file extent in another tree. | |
1963 | * | |
1964 | * So we may process a disk offset more than one time(two extents in A) | |
1965 | * and locate at the same extent(one extent in B), then insert two same | |
1966 | * backrefs(both refer to the extent in B). | |
1967 | */ | |
1968 | return 0; | |
1969 | } | |
1970 | ||
1971 | static void backref_insert(struct rb_root *root, | |
1972 | struct sa_defrag_extent_backref *backref) | |
1973 | { | |
1974 | struct rb_node **p = &root->rb_node; | |
1975 | struct rb_node *parent = NULL; | |
1976 | struct sa_defrag_extent_backref *entry; | |
1977 | int ret; | |
1978 | ||
1979 | while (*p) { | |
1980 | parent = *p; | |
1981 | entry = rb_entry(parent, struct sa_defrag_extent_backref, node); | |
1982 | ||
1983 | ret = backref_comp(backref, entry); | |
1984 | if (ret < 0) | |
1985 | p = &(*p)->rb_left; | |
1986 | else | |
1987 | p = &(*p)->rb_right; | |
1988 | } | |
1989 | ||
1990 | rb_link_node(&backref->node, parent, p); | |
1991 | rb_insert_color(&backref->node, root); | |
1992 | } | |
1993 | ||
1994 | /* | |
1995 | * Note the backref might has changed, and in this case we just return 0. | |
1996 | */ | |
1997 | static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, | |
1998 | void *ctx) | |
1999 | { | |
2000 | struct btrfs_file_extent_item *extent; | |
2001 | struct btrfs_fs_info *fs_info; | |
2002 | struct old_sa_defrag_extent *old = ctx; | |
2003 | struct new_sa_defrag_extent *new = old->new; | |
2004 | struct btrfs_path *path = new->path; | |
2005 | struct btrfs_key key; | |
2006 | struct btrfs_root *root; | |
2007 | struct sa_defrag_extent_backref *backref; | |
2008 | struct extent_buffer *leaf; | |
2009 | struct inode *inode = new->inode; | |
2010 | int slot; | |
2011 | int ret; | |
2012 | u64 extent_offset; | |
2013 | u64 num_bytes; | |
2014 | ||
2015 | if (BTRFS_I(inode)->root->root_key.objectid == root_id && | |
2016 | inum == btrfs_ino(inode)) | |
2017 | return 0; | |
2018 | ||
2019 | key.objectid = root_id; | |
2020 | key.type = BTRFS_ROOT_ITEM_KEY; | |
2021 | key.offset = (u64)-1; | |
2022 | ||
2023 | fs_info = BTRFS_I(inode)->root->fs_info; | |
2024 | root = btrfs_read_fs_root_no_name(fs_info, &key); | |
2025 | if (IS_ERR(root)) { | |
2026 | if (PTR_ERR(root) == -ENOENT) | |
2027 | return 0; | |
2028 | WARN_ON(1); | |
2029 | pr_debug("inum=%llu, offset=%llu, root_id=%llu\n", | |
2030 | inum, offset, root_id); | |
2031 | return PTR_ERR(root); | |
2032 | } | |
2033 | ||
2034 | key.objectid = inum; | |
2035 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2036 | if (offset > (u64)-1 << 32) | |
2037 | key.offset = 0; | |
2038 | else | |
2039 | key.offset = offset; | |
2040 | ||
2041 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
fae7f21c | 2042 | if (WARN_ON(ret < 0)) |
38c227d8 | 2043 | return ret; |
50f1319c | 2044 | ret = 0; |
38c227d8 LB |
2045 | |
2046 | while (1) { | |
2047 | cond_resched(); | |
2048 | ||
2049 | leaf = path->nodes[0]; | |
2050 | slot = path->slots[0]; | |
2051 | ||
2052 | if (slot >= btrfs_header_nritems(leaf)) { | |
2053 | ret = btrfs_next_leaf(root, path); | |
2054 | if (ret < 0) { | |
2055 | goto out; | |
2056 | } else if (ret > 0) { | |
2057 | ret = 0; | |
2058 | goto out; | |
2059 | } | |
2060 | continue; | |
2061 | } | |
2062 | ||
2063 | path->slots[0]++; | |
2064 | ||
2065 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
2066 | ||
2067 | if (key.objectid > inum) | |
2068 | goto out; | |
2069 | ||
2070 | if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY) | |
2071 | continue; | |
2072 | ||
2073 | extent = btrfs_item_ptr(leaf, slot, | |
2074 | struct btrfs_file_extent_item); | |
2075 | ||
2076 | if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) | |
2077 | continue; | |
2078 | ||
e68afa49 LB |
2079 | /* |
2080 | * 'offset' refers to the exact key.offset, | |
2081 | * NOT the 'offset' field in btrfs_extent_data_ref, ie. | |
2082 | * (key.offset - extent_offset). | |
2083 | */ | |
2084 | if (key.offset != offset) | |
38c227d8 LB |
2085 | continue; |
2086 | ||
e68afa49 | 2087 | extent_offset = btrfs_file_extent_offset(leaf, extent); |
38c227d8 | 2088 | num_bytes = btrfs_file_extent_num_bytes(leaf, extent); |
e68afa49 | 2089 | |
38c227d8 LB |
2090 | if (extent_offset >= old->extent_offset + old->offset + |
2091 | old->len || extent_offset + num_bytes <= | |
2092 | old->extent_offset + old->offset) | |
2093 | continue; | |
38c227d8 LB |
2094 | break; |
2095 | } | |
2096 | ||
2097 | backref = kmalloc(sizeof(*backref), GFP_NOFS); | |
2098 | if (!backref) { | |
2099 | ret = -ENOENT; | |
2100 | goto out; | |
2101 | } | |
2102 | ||
2103 | backref->root_id = root_id; | |
2104 | backref->inum = inum; | |
e68afa49 | 2105 | backref->file_pos = offset; |
38c227d8 LB |
2106 | backref->num_bytes = num_bytes; |
2107 | backref->extent_offset = extent_offset; | |
2108 | backref->generation = btrfs_file_extent_generation(leaf, extent); | |
2109 | backref->old = old; | |
2110 | backref_insert(&new->root, backref); | |
2111 | old->count++; | |
2112 | out: | |
2113 | btrfs_release_path(path); | |
2114 | WARN_ON(ret); | |
2115 | return ret; | |
2116 | } | |
2117 | ||
2118 | static noinline bool record_extent_backrefs(struct btrfs_path *path, | |
2119 | struct new_sa_defrag_extent *new) | |
2120 | { | |
2121 | struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info; | |
2122 | struct old_sa_defrag_extent *old, *tmp; | |
2123 | int ret; | |
2124 | ||
2125 | new->path = path; | |
2126 | ||
2127 | list_for_each_entry_safe(old, tmp, &new->head, list) { | |
e68afa49 LB |
2128 | ret = iterate_inodes_from_logical(old->bytenr + |
2129 | old->extent_offset, fs_info, | |
38c227d8 LB |
2130 | path, record_one_backref, |
2131 | old); | |
2132 | BUG_ON(ret < 0 && ret != -ENOENT); | |
2133 | ||
2134 | /* no backref to be processed for this extent */ | |
2135 | if (!old->count) { | |
2136 | list_del(&old->list); | |
2137 | kfree(old); | |
2138 | } | |
2139 | } | |
2140 | ||
2141 | if (list_empty(&new->head)) | |
2142 | return false; | |
2143 | ||
2144 | return true; | |
2145 | } | |
2146 | ||
2147 | static int relink_is_mergable(struct extent_buffer *leaf, | |
2148 | struct btrfs_file_extent_item *fi, | |
116e0024 | 2149 | struct new_sa_defrag_extent *new) |
38c227d8 | 2150 | { |
116e0024 | 2151 | if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr) |
38c227d8 LB |
2152 | return 0; |
2153 | ||
2154 | if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG) | |
2155 | return 0; | |
2156 | ||
116e0024 LB |
2157 | if (btrfs_file_extent_compression(leaf, fi) != new->compress_type) |
2158 | return 0; | |
2159 | ||
2160 | if (btrfs_file_extent_encryption(leaf, fi) || | |
38c227d8 LB |
2161 | btrfs_file_extent_other_encoding(leaf, fi)) |
2162 | return 0; | |
2163 | ||
2164 | return 1; | |
2165 | } | |
2166 | ||
2167 | /* | |
2168 | * Note the backref might has changed, and in this case we just return 0. | |
2169 | */ | |
2170 | static noinline int relink_extent_backref(struct btrfs_path *path, | |
2171 | struct sa_defrag_extent_backref *prev, | |
2172 | struct sa_defrag_extent_backref *backref) | |
2173 | { | |
2174 | struct btrfs_file_extent_item *extent; | |
2175 | struct btrfs_file_extent_item *item; | |
2176 | struct btrfs_ordered_extent *ordered; | |
2177 | struct btrfs_trans_handle *trans; | |
2178 | struct btrfs_fs_info *fs_info; | |
2179 | struct btrfs_root *root; | |
2180 | struct btrfs_key key; | |
2181 | struct extent_buffer *leaf; | |
2182 | struct old_sa_defrag_extent *old = backref->old; | |
2183 | struct new_sa_defrag_extent *new = old->new; | |
2184 | struct inode *src_inode = new->inode; | |
2185 | struct inode *inode; | |
2186 | struct extent_state *cached = NULL; | |
2187 | int ret = 0; | |
2188 | u64 start; | |
2189 | u64 len; | |
2190 | u64 lock_start; | |
2191 | u64 lock_end; | |
2192 | bool merge = false; | |
2193 | int index; | |
2194 | ||
2195 | if (prev && prev->root_id == backref->root_id && | |
2196 | prev->inum == backref->inum && | |
2197 | prev->file_pos + prev->num_bytes == backref->file_pos) | |
2198 | merge = true; | |
2199 | ||
2200 | /* step 1: get root */ | |
2201 | key.objectid = backref->root_id; | |
2202 | key.type = BTRFS_ROOT_ITEM_KEY; | |
2203 | key.offset = (u64)-1; | |
2204 | ||
2205 | fs_info = BTRFS_I(src_inode)->root->fs_info; | |
2206 | index = srcu_read_lock(&fs_info->subvol_srcu); | |
2207 | ||
2208 | root = btrfs_read_fs_root_no_name(fs_info, &key); | |
2209 | if (IS_ERR(root)) { | |
2210 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2211 | if (PTR_ERR(root) == -ENOENT) | |
2212 | return 0; | |
2213 | return PTR_ERR(root); | |
2214 | } | |
38c227d8 LB |
2215 | |
2216 | /* step 2: get inode */ | |
2217 | key.objectid = backref->inum; | |
2218 | key.type = BTRFS_INODE_ITEM_KEY; | |
2219 | key.offset = 0; | |
2220 | ||
2221 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); | |
2222 | if (IS_ERR(inode)) { | |
2223 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2224 | return 0; | |
2225 | } | |
2226 | ||
2227 | srcu_read_unlock(&fs_info->subvol_srcu, index); | |
2228 | ||
2229 | /* step 3: relink backref */ | |
2230 | lock_start = backref->file_pos; | |
2231 | lock_end = backref->file_pos + backref->num_bytes - 1; | |
2232 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end, | |
2233 | 0, &cached); | |
2234 | ||
2235 | ordered = btrfs_lookup_first_ordered_extent(inode, lock_end); | |
2236 | if (ordered) { | |
2237 | btrfs_put_ordered_extent(ordered); | |
2238 | goto out_unlock; | |
2239 | } | |
2240 | ||
2241 | trans = btrfs_join_transaction(root); | |
2242 | if (IS_ERR(trans)) { | |
2243 | ret = PTR_ERR(trans); | |
2244 | goto out_unlock; | |
2245 | } | |
2246 | ||
2247 | key.objectid = backref->inum; | |
2248 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2249 | key.offset = backref->file_pos; | |
2250 | ||
2251 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2252 | if (ret < 0) { | |
2253 | goto out_free_path; | |
2254 | } else if (ret > 0) { | |
2255 | ret = 0; | |
2256 | goto out_free_path; | |
2257 | } | |
2258 | ||
2259 | extent = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
2260 | struct btrfs_file_extent_item); | |
2261 | ||
2262 | if (btrfs_file_extent_generation(path->nodes[0], extent) != | |
2263 | backref->generation) | |
2264 | goto out_free_path; | |
2265 | ||
2266 | btrfs_release_path(path); | |
2267 | ||
2268 | start = backref->file_pos; | |
2269 | if (backref->extent_offset < old->extent_offset + old->offset) | |
2270 | start += old->extent_offset + old->offset - | |
2271 | backref->extent_offset; | |
2272 | ||
2273 | len = min(backref->extent_offset + backref->num_bytes, | |
2274 | old->extent_offset + old->offset + old->len); | |
2275 | len -= max(backref->extent_offset, old->extent_offset + old->offset); | |
2276 | ||
2277 | ret = btrfs_drop_extents(trans, root, inode, start, | |
2278 | start + len, 1); | |
2279 | if (ret) | |
2280 | goto out_free_path; | |
2281 | again: | |
2282 | key.objectid = btrfs_ino(inode); | |
2283 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2284 | key.offset = start; | |
2285 | ||
a09a0a70 | 2286 | path->leave_spinning = 1; |
38c227d8 LB |
2287 | if (merge) { |
2288 | struct btrfs_file_extent_item *fi; | |
2289 | u64 extent_len; | |
2290 | struct btrfs_key found_key; | |
2291 | ||
2292 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | |
2293 | if (ret < 0) | |
2294 | goto out_free_path; | |
2295 | ||
2296 | path->slots[0]--; | |
2297 | leaf = path->nodes[0]; | |
2298 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
2299 | ||
2300 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
2301 | struct btrfs_file_extent_item); | |
2302 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); | |
2303 | ||
116e0024 LB |
2304 | if (extent_len + found_key.offset == start && |
2305 | relink_is_mergable(leaf, fi, new)) { | |
38c227d8 LB |
2306 | btrfs_set_file_extent_num_bytes(leaf, fi, |
2307 | extent_len + len); | |
2308 | btrfs_mark_buffer_dirty(leaf); | |
2309 | inode_add_bytes(inode, len); | |
2310 | ||
2311 | ret = 1; | |
2312 | goto out_free_path; | |
2313 | } else { | |
2314 | merge = false; | |
2315 | btrfs_release_path(path); | |
2316 | goto again; | |
2317 | } | |
2318 | } | |
2319 | ||
2320 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
2321 | sizeof(*extent)); | |
2322 | if (ret) { | |
2323 | btrfs_abort_transaction(trans, root, ret); | |
2324 | goto out_free_path; | |
2325 | } | |
2326 | ||
2327 | leaf = path->nodes[0]; | |
2328 | item = btrfs_item_ptr(leaf, path->slots[0], | |
2329 | struct btrfs_file_extent_item); | |
2330 | btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr); | |
2331 | btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len); | |
2332 | btrfs_set_file_extent_offset(leaf, item, start - new->file_pos); | |
2333 | btrfs_set_file_extent_num_bytes(leaf, item, len); | |
2334 | btrfs_set_file_extent_ram_bytes(leaf, item, new->len); | |
2335 | btrfs_set_file_extent_generation(leaf, item, trans->transid); | |
2336 | btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); | |
2337 | btrfs_set_file_extent_compression(leaf, item, new->compress_type); | |
2338 | btrfs_set_file_extent_encryption(leaf, item, 0); | |
2339 | btrfs_set_file_extent_other_encoding(leaf, item, 0); | |
2340 | ||
2341 | btrfs_mark_buffer_dirty(leaf); | |
2342 | inode_add_bytes(inode, len); | |
a09a0a70 | 2343 | btrfs_release_path(path); |
38c227d8 LB |
2344 | |
2345 | ret = btrfs_inc_extent_ref(trans, root, new->bytenr, | |
2346 | new->disk_len, 0, | |
2347 | backref->root_id, backref->inum, | |
2348 | new->file_pos, 0); /* start - extent_offset */ | |
2349 | if (ret) { | |
2350 | btrfs_abort_transaction(trans, root, ret); | |
2351 | goto out_free_path; | |
2352 | } | |
2353 | ||
2354 | ret = 1; | |
2355 | out_free_path: | |
2356 | btrfs_release_path(path); | |
a09a0a70 | 2357 | path->leave_spinning = 0; |
38c227d8 LB |
2358 | btrfs_end_transaction(trans, root); |
2359 | out_unlock: | |
2360 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end, | |
2361 | &cached, GFP_NOFS); | |
2362 | iput(inode); | |
2363 | return ret; | |
2364 | } | |
2365 | ||
6f519564 LB |
2366 | static void free_sa_defrag_extent(struct new_sa_defrag_extent *new) |
2367 | { | |
2368 | struct old_sa_defrag_extent *old, *tmp; | |
2369 | ||
2370 | if (!new) | |
2371 | return; | |
2372 | ||
2373 | list_for_each_entry_safe(old, tmp, &new->head, list) { | |
2374 | list_del(&old->list); | |
2375 | kfree(old); | |
2376 | } | |
2377 | kfree(new); | |
2378 | } | |
2379 | ||
38c227d8 LB |
2380 | static void relink_file_extents(struct new_sa_defrag_extent *new) |
2381 | { | |
2382 | struct btrfs_path *path; | |
38c227d8 LB |
2383 | struct sa_defrag_extent_backref *backref; |
2384 | struct sa_defrag_extent_backref *prev = NULL; | |
2385 | struct inode *inode; | |
2386 | struct btrfs_root *root; | |
2387 | struct rb_node *node; | |
2388 | int ret; | |
2389 | ||
2390 | inode = new->inode; | |
2391 | root = BTRFS_I(inode)->root; | |
2392 | ||
2393 | path = btrfs_alloc_path(); | |
2394 | if (!path) | |
2395 | return; | |
2396 | ||
2397 | if (!record_extent_backrefs(path, new)) { | |
2398 | btrfs_free_path(path); | |
2399 | goto out; | |
2400 | } | |
2401 | btrfs_release_path(path); | |
2402 | ||
2403 | while (1) { | |
2404 | node = rb_first(&new->root); | |
2405 | if (!node) | |
2406 | break; | |
2407 | rb_erase(node, &new->root); | |
2408 | ||
2409 | backref = rb_entry(node, struct sa_defrag_extent_backref, node); | |
2410 | ||
2411 | ret = relink_extent_backref(path, prev, backref); | |
2412 | WARN_ON(ret < 0); | |
2413 | ||
2414 | kfree(prev); | |
2415 | ||
2416 | if (ret == 1) | |
2417 | prev = backref; | |
2418 | else | |
2419 | prev = NULL; | |
2420 | cond_resched(); | |
2421 | } | |
2422 | kfree(prev); | |
2423 | ||
2424 | btrfs_free_path(path); | |
38c227d8 | 2425 | out: |
6f519564 LB |
2426 | free_sa_defrag_extent(new); |
2427 | ||
38c227d8 LB |
2428 | atomic_dec(&root->fs_info->defrag_running); |
2429 | wake_up(&root->fs_info->transaction_wait); | |
38c227d8 LB |
2430 | } |
2431 | ||
2432 | static struct new_sa_defrag_extent * | |
2433 | record_old_file_extents(struct inode *inode, | |
2434 | struct btrfs_ordered_extent *ordered) | |
2435 | { | |
2436 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2437 | struct btrfs_path *path; | |
2438 | struct btrfs_key key; | |
6f519564 | 2439 | struct old_sa_defrag_extent *old; |
38c227d8 LB |
2440 | struct new_sa_defrag_extent *new; |
2441 | int ret; | |
2442 | ||
2443 | new = kmalloc(sizeof(*new), GFP_NOFS); | |
2444 | if (!new) | |
2445 | return NULL; | |
2446 | ||
2447 | new->inode = inode; | |
2448 | new->file_pos = ordered->file_offset; | |
2449 | new->len = ordered->len; | |
2450 | new->bytenr = ordered->start; | |
2451 | new->disk_len = ordered->disk_len; | |
2452 | new->compress_type = ordered->compress_type; | |
2453 | new->root = RB_ROOT; | |
2454 | INIT_LIST_HEAD(&new->head); | |
2455 | ||
2456 | path = btrfs_alloc_path(); | |
2457 | if (!path) | |
2458 | goto out_kfree; | |
2459 | ||
2460 | key.objectid = btrfs_ino(inode); | |
2461 | key.type = BTRFS_EXTENT_DATA_KEY; | |
2462 | key.offset = new->file_pos; | |
2463 | ||
2464 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2465 | if (ret < 0) | |
2466 | goto out_free_path; | |
2467 | if (ret > 0 && path->slots[0] > 0) | |
2468 | path->slots[0]--; | |
2469 | ||
2470 | /* find out all the old extents for the file range */ | |
2471 | while (1) { | |
2472 | struct btrfs_file_extent_item *extent; | |
2473 | struct extent_buffer *l; | |
2474 | int slot; | |
2475 | u64 num_bytes; | |
2476 | u64 offset; | |
2477 | u64 end; | |
2478 | u64 disk_bytenr; | |
2479 | u64 extent_offset; | |
2480 | ||
2481 | l = path->nodes[0]; | |
2482 | slot = path->slots[0]; | |
2483 | ||
2484 | if (slot >= btrfs_header_nritems(l)) { | |
2485 | ret = btrfs_next_leaf(root, path); | |
2486 | if (ret < 0) | |
6f519564 | 2487 | goto out_free_path; |
38c227d8 LB |
2488 | else if (ret > 0) |
2489 | break; | |
2490 | continue; | |
2491 | } | |
2492 | ||
2493 | btrfs_item_key_to_cpu(l, &key, slot); | |
2494 | ||
2495 | if (key.objectid != btrfs_ino(inode)) | |
2496 | break; | |
2497 | if (key.type != BTRFS_EXTENT_DATA_KEY) | |
2498 | break; | |
2499 | if (key.offset >= new->file_pos + new->len) | |
2500 | break; | |
2501 | ||
2502 | extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); | |
2503 | ||
2504 | num_bytes = btrfs_file_extent_num_bytes(l, extent); | |
2505 | if (key.offset + num_bytes < new->file_pos) | |
2506 | goto next; | |
2507 | ||
2508 | disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); | |
2509 | if (!disk_bytenr) | |
2510 | goto next; | |
2511 | ||
2512 | extent_offset = btrfs_file_extent_offset(l, extent); | |
2513 | ||
2514 | old = kmalloc(sizeof(*old), GFP_NOFS); | |
2515 | if (!old) | |
6f519564 | 2516 | goto out_free_path; |
38c227d8 LB |
2517 | |
2518 | offset = max(new->file_pos, key.offset); | |
2519 | end = min(new->file_pos + new->len, key.offset + num_bytes); | |
2520 | ||
2521 | old->bytenr = disk_bytenr; | |
2522 | old->extent_offset = extent_offset; | |
2523 | old->offset = offset - key.offset; | |
2524 | old->len = end - offset; | |
2525 | old->new = new; | |
2526 | old->count = 0; | |
2527 | list_add_tail(&old->list, &new->head); | |
2528 | next: | |
2529 | path->slots[0]++; | |
2530 | cond_resched(); | |
2531 | } | |
2532 | ||
2533 | btrfs_free_path(path); | |
2534 | atomic_inc(&root->fs_info->defrag_running); | |
2535 | ||
2536 | return new; | |
2537 | ||
38c227d8 LB |
2538 | out_free_path: |
2539 | btrfs_free_path(path); | |
2540 | out_kfree: | |
6f519564 | 2541 | free_sa_defrag_extent(new); |
38c227d8 LB |
2542 | return NULL; |
2543 | } | |
2544 | ||
5d13a98f CM |
2545 | /* |
2546 | * helper function for btrfs_finish_ordered_io, this | |
2547 | * just reads in some of the csum leaves to prime them into ram | |
2548 | * before we start the transaction. It limits the amount of btree | |
2549 | * reads required while inside the transaction. | |
2550 | */ | |
d352ac68 CM |
2551 | /* as ordered data IO finishes, this gets called so we can finish |
2552 | * an ordered extent if the range of bytes in the file it covers are | |
2553 | * fully written. | |
2554 | */ | |
5fd02043 | 2555 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) |
e6dcd2dc | 2556 | { |
5fd02043 | 2557 | struct inode *inode = ordered_extent->inode; |
e6dcd2dc | 2558 | struct btrfs_root *root = BTRFS_I(inode)->root; |
0ca1f7ce | 2559 | struct btrfs_trans_handle *trans = NULL; |
e6dcd2dc | 2560 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
2ac55d41 | 2561 | struct extent_state *cached_state = NULL; |
38c227d8 | 2562 | struct new_sa_defrag_extent *new = NULL; |
261507a0 | 2563 | int compress_type = 0; |
77cef2ec JB |
2564 | int ret = 0; |
2565 | u64 logical_len = ordered_extent->len; | |
82d5902d | 2566 | bool nolock; |
77cef2ec | 2567 | bool truncated = false; |
e6dcd2dc | 2568 | |
83eea1f1 | 2569 | nolock = btrfs_is_free_space_inode(inode); |
0cb59c99 | 2570 | |
5fd02043 JB |
2571 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { |
2572 | ret = -EIO; | |
2573 | goto out; | |
2574 | } | |
2575 | ||
77cef2ec JB |
2576 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { |
2577 | truncated = true; | |
2578 | logical_len = ordered_extent->truncated_len; | |
2579 | /* Truncated the entire extent, don't bother adding */ | |
2580 | if (!logical_len) | |
2581 | goto out; | |
2582 | } | |
2583 | ||
c2167754 | 2584 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
79787eaa | 2585 | BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ |
6c760c07 JB |
2586 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
2587 | if (nolock) | |
2588 | trans = btrfs_join_transaction_nolock(root); | |
2589 | else | |
2590 | trans = btrfs_join_transaction(root); | |
2591 | if (IS_ERR(trans)) { | |
2592 | ret = PTR_ERR(trans); | |
2593 | trans = NULL; | |
2594 | goto out; | |
c2167754 | 2595 | } |
6c760c07 JB |
2596 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
2597 | ret = btrfs_update_inode_fallback(trans, root, inode); | |
2598 | if (ret) /* -ENOMEM or corruption */ | |
2599 | btrfs_abort_transaction(trans, root, ret); | |
c2167754 YZ |
2600 | goto out; |
2601 | } | |
e6dcd2dc | 2602 | |
2ac55d41 JB |
2603 | lock_extent_bits(io_tree, ordered_extent->file_offset, |
2604 | ordered_extent->file_offset + ordered_extent->len - 1, | |
d0082371 | 2605 | 0, &cached_state); |
e6dcd2dc | 2606 | |
38c227d8 LB |
2607 | ret = test_range_bit(io_tree, ordered_extent->file_offset, |
2608 | ordered_extent->file_offset + ordered_extent->len - 1, | |
2609 | EXTENT_DEFRAG, 1, cached_state); | |
2610 | if (ret) { | |
2611 | u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); | |
2612 | if (last_snapshot >= BTRFS_I(inode)->generation) | |
2613 | /* the inode is shared */ | |
2614 | new = record_old_file_extents(inode, ordered_extent); | |
2615 | ||
2616 | clear_extent_bit(io_tree, ordered_extent->file_offset, | |
2617 | ordered_extent->file_offset + ordered_extent->len - 1, | |
2618 | EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); | |
2619 | } | |
2620 | ||
0cb59c99 | 2621 | if (nolock) |
7a7eaa40 | 2622 | trans = btrfs_join_transaction_nolock(root); |
0cb59c99 | 2623 | else |
7a7eaa40 | 2624 | trans = btrfs_join_transaction(root); |
79787eaa JM |
2625 | if (IS_ERR(trans)) { |
2626 | ret = PTR_ERR(trans); | |
2627 | trans = NULL; | |
2628 | goto out_unlock; | |
2629 | } | |
0ca1f7ce | 2630 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
c2167754 | 2631 | |
c8b97818 | 2632 | if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) |
261507a0 | 2633 | compress_type = ordered_extent->compress_type; |
d899e052 | 2634 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { |
261507a0 | 2635 | BUG_ON(compress_type); |
920bbbfb | 2636 | ret = btrfs_mark_extent_written(trans, inode, |
d899e052 YZ |
2637 | ordered_extent->file_offset, |
2638 | ordered_extent->file_offset + | |
77cef2ec | 2639 | logical_len); |
d899e052 | 2640 | } else { |
0af3d00b | 2641 | BUG_ON(root == root->fs_info->tree_root); |
d899e052 YZ |
2642 | ret = insert_reserved_file_extent(trans, inode, |
2643 | ordered_extent->file_offset, | |
2644 | ordered_extent->start, | |
2645 | ordered_extent->disk_len, | |
77cef2ec | 2646 | logical_len, logical_len, |
261507a0 | 2647 | compress_type, 0, 0, |
d899e052 | 2648 | BTRFS_FILE_EXTENT_REG); |
d899e052 | 2649 | } |
5dc562c5 JB |
2650 | unpin_extent_cache(&BTRFS_I(inode)->extent_tree, |
2651 | ordered_extent->file_offset, ordered_extent->len, | |
2652 | trans->transid); | |
79787eaa JM |
2653 | if (ret < 0) { |
2654 | btrfs_abort_transaction(trans, root, ret); | |
5fd02043 | 2655 | goto out_unlock; |
79787eaa | 2656 | } |
2ac55d41 | 2657 | |
e6dcd2dc CM |
2658 | add_pending_csums(trans, inode, ordered_extent->file_offset, |
2659 | &ordered_extent->list); | |
2660 | ||
6c760c07 JB |
2661 | btrfs_ordered_update_i_size(inode, 0, ordered_extent); |
2662 | ret = btrfs_update_inode_fallback(trans, root, inode); | |
2663 | if (ret) { /* -ENOMEM or corruption */ | |
2664 | btrfs_abort_transaction(trans, root, ret); | |
2665 | goto out_unlock; | |
1ef30be1 JB |
2666 | } |
2667 | ret = 0; | |
5fd02043 JB |
2668 | out_unlock: |
2669 | unlock_extent_cached(io_tree, ordered_extent->file_offset, | |
2670 | ordered_extent->file_offset + | |
2671 | ordered_extent->len - 1, &cached_state, GFP_NOFS); | |
c2167754 | 2672 | out: |
5b0e95bf | 2673 | if (root != root->fs_info->tree_root) |
0cb59c99 | 2674 | btrfs_delalloc_release_metadata(inode, ordered_extent->len); |
a698d075 MX |
2675 | if (trans) |
2676 | btrfs_end_transaction(trans, root); | |
0cb59c99 | 2677 | |
77cef2ec JB |
2678 | if (ret || truncated) { |
2679 | u64 start, end; | |
2680 | ||
2681 | if (truncated) | |
2682 | start = ordered_extent->file_offset + logical_len; | |
2683 | else | |
2684 | start = ordered_extent->file_offset; | |
2685 | end = ordered_extent->file_offset + ordered_extent->len - 1; | |
2686 | clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); | |
2687 | ||
2688 | /* Drop the cache for the part of the extent we didn't write. */ | |
2689 | btrfs_drop_extent_cache(inode, start, end, 0); | |
5fd02043 | 2690 | |
0bec9ef5 JB |
2691 | /* |
2692 | * If the ordered extent had an IOERR or something else went | |
2693 | * wrong we need to return the space for this ordered extent | |
77cef2ec JB |
2694 | * back to the allocator. We only free the extent in the |
2695 | * truncated case if we didn't write out the extent at all. | |
0bec9ef5 | 2696 | */ |
77cef2ec JB |
2697 | if ((ret || !logical_len) && |
2698 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && | |
0bec9ef5 JB |
2699 | !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) |
2700 | btrfs_free_reserved_extent(root, ordered_extent->start, | |
2701 | ordered_extent->disk_len); | |
2702 | } | |
2703 | ||
2704 | ||
5fd02043 | 2705 | /* |
8bad3c02 LB |
2706 | * This needs to be done to make sure anybody waiting knows we are done |
2707 | * updating everything for this ordered extent. | |
5fd02043 JB |
2708 | */ |
2709 | btrfs_remove_ordered_extent(inode, ordered_extent); | |
2710 | ||
38c227d8 | 2711 | /* for snapshot-aware defrag */ |
6f519564 LB |
2712 | if (new) { |
2713 | if (ret) { | |
2714 | free_sa_defrag_extent(new); | |
2715 | atomic_dec(&root->fs_info->defrag_running); | |
2716 | } else { | |
2717 | relink_file_extents(new); | |
2718 | } | |
2719 | } | |
38c227d8 | 2720 | |
e6dcd2dc CM |
2721 | /* once for us */ |
2722 | btrfs_put_ordered_extent(ordered_extent); | |
2723 | /* once for the tree */ | |
2724 | btrfs_put_ordered_extent(ordered_extent); | |
2725 | ||
5fd02043 JB |
2726 | return ret; |
2727 | } | |
2728 | ||
2729 | static void finish_ordered_fn(struct btrfs_work *work) | |
2730 | { | |
2731 | struct btrfs_ordered_extent *ordered_extent; | |
2732 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); | |
2733 | btrfs_finish_ordered_io(ordered_extent); | |
e6dcd2dc CM |
2734 | } |
2735 | ||
b2950863 | 2736 | static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
211f90e6 CM |
2737 | struct extent_state *state, int uptodate) |
2738 | { | |
5fd02043 JB |
2739 | struct inode *inode = page->mapping->host; |
2740 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
2741 | struct btrfs_ordered_extent *ordered_extent = NULL; | |
2742 | struct btrfs_workers *workers; | |
2743 | ||
1abe9b8a | 2744 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
2745 | ||
8b62b72b | 2746 | ClearPagePrivate2(page); |
5fd02043 JB |
2747 | if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
2748 | end - start + 1, uptodate)) | |
2749 | return 0; | |
2750 | ||
2751 | ordered_extent->work.func = finish_ordered_fn; | |
2752 | ordered_extent->work.flags = 0; | |
2753 | ||
83eea1f1 | 2754 | if (btrfs_is_free_space_inode(inode)) |
5fd02043 JB |
2755 | workers = &root->fs_info->endio_freespace_worker; |
2756 | else | |
2757 | workers = &root->fs_info->endio_write_workers; | |
2758 | btrfs_queue_worker(workers, &ordered_extent->work); | |
2759 | ||
2760 | return 0; | |
211f90e6 CM |
2761 | } |
2762 | ||
d352ac68 CM |
2763 | /* |
2764 | * when reads are done, we need to check csums to verify the data is correct | |
4a54c8c1 JS |
2765 | * if there's a match, we allow the bio to finish. If not, the code in |
2766 | * extent_io.c will try to find good copies for us. | |
d352ac68 | 2767 | */ |
facc8a22 MX |
2768 | static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio, |
2769 | u64 phy_offset, struct page *page, | |
2770 | u64 start, u64 end, int mirror) | |
07157aac | 2771 | { |
4eee4fa4 | 2772 | size_t offset = start - page_offset(page); |
07157aac | 2773 | struct inode *inode = page->mapping->host; |
d1310b2e | 2774 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
07157aac | 2775 | char *kaddr; |
ff79f819 | 2776 | struct btrfs_root *root = BTRFS_I(inode)->root; |
facc8a22 | 2777 | u32 csum_expected; |
ff79f819 | 2778 | u32 csum = ~(u32)0; |
c2cf52eb SK |
2779 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, |
2780 | DEFAULT_RATELIMIT_BURST); | |
d1310b2e | 2781 | |
d20f7043 CM |
2782 | if (PageChecked(page)) { |
2783 | ClearPageChecked(page); | |
2784 | goto good; | |
2785 | } | |
6cbff00f CH |
2786 | |
2787 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) | |
08d2f347 | 2788 | goto good; |
17d217fe YZ |
2789 | |
2790 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | |
9655d298 | 2791 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
17d217fe YZ |
2792 | clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM, |
2793 | GFP_NOFS); | |
b6cda9bc | 2794 | return 0; |
17d217fe | 2795 | } |
d20f7043 | 2796 | |
facc8a22 MX |
2797 | phy_offset >>= inode->i_sb->s_blocksize_bits; |
2798 | csum_expected = *(((u32 *)io_bio->csum) + phy_offset); | |
d397712b | 2799 | |
facc8a22 | 2800 | kaddr = kmap_atomic(page); |
b0496686 | 2801 | csum = btrfs_csum_data(kaddr + offset, csum, end - start + 1); |
ff79f819 | 2802 | btrfs_csum_final(csum, (char *)&csum); |
facc8a22 | 2803 | if (csum != csum_expected) |
07157aac | 2804 | goto zeroit; |
d397712b | 2805 | |
7ac687d9 | 2806 | kunmap_atomic(kaddr); |
d20f7043 | 2807 | good: |
07157aac CM |
2808 | return 0; |
2809 | ||
2810 | zeroit: | |
c2cf52eb | 2811 | if (__ratelimit(&_rs)) |
facc8a22 | 2812 | btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", |
c1c9ff7c | 2813 | btrfs_ino(page->mapping->host), start, csum, csum_expected); |
db94535d CM |
2814 | memset(kaddr + offset, 1, end - start + 1); |
2815 | flush_dcache_page(page); | |
7ac687d9 | 2816 | kunmap_atomic(kaddr); |
facc8a22 | 2817 | if (csum_expected == 0) |
3b951516 | 2818 | return 0; |
7e38326f | 2819 | return -EIO; |
07157aac | 2820 | } |
b888db2b | 2821 | |
24bbcf04 YZ |
2822 | struct delayed_iput { |
2823 | struct list_head list; | |
2824 | struct inode *inode; | |
2825 | }; | |
2826 | ||
79787eaa JM |
2827 | /* JDM: If this is fs-wide, why can't we add a pointer to |
2828 | * btrfs_inode instead and avoid the allocation? */ | |
24bbcf04 YZ |
2829 | void btrfs_add_delayed_iput(struct inode *inode) |
2830 | { | |
2831 | struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; | |
2832 | struct delayed_iput *delayed; | |
2833 | ||
2834 | if (atomic_add_unless(&inode->i_count, -1, 1)) | |
2835 | return; | |
2836 | ||
2837 | delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL); | |
2838 | delayed->inode = inode; | |
2839 | ||
2840 | spin_lock(&fs_info->delayed_iput_lock); | |
2841 | list_add_tail(&delayed->list, &fs_info->delayed_iputs); | |
2842 | spin_unlock(&fs_info->delayed_iput_lock); | |
2843 | } | |
2844 | ||
2845 | void btrfs_run_delayed_iputs(struct btrfs_root *root) | |
2846 | { | |
2847 | LIST_HEAD(list); | |
2848 | struct btrfs_fs_info *fs_info = root->fs_info; | |
2849 | struct delayed_iput *delayed; | |
2850 | int empty; | |
2851 | ||
2852 | spin_lock(&fs_info->delayed_iput_lock); | |
2853 | empty = list_empty(&fs_info->delayed_iputs); | |
2854 | spin_unlock(&fs_info->delayed_iput_lock); | |
2855 | if (empty) | |
2856 | return; | |
2857 | ||
24bbcf04 YZ |
2858 | spin_lock(&fs_info->delayed_iput_lock); |
2859 | list_splice_init(&fs_info->delayed_iputs, &list); | |
2860 | spin_unlock(&fs_info->delayed_iput_lock); | |
2861 | ||
2862 | while (!list_empty(&list)) { | |
2863 | delayed = list_entry(list.next, struct delayed_iput, list); | |
2864 | list_del(&delayed->list); | |
2865 | iput(delayed->inode); | |
2866 | kfree(delayed); | |
2867 | } | |
24bbcf04 YZ |
2868 | } |
2869 | ||
d68fc57b | 2870 | /* |
42b2aa86 | 2871 | * This is called in transaction commit time. If there are no orphan |
d68fc57b YZ |
2872 | * files in the subvolume, it removes orphan item and frees block_rsv |
2873 | * structure. | |
2874 | */ | |
2875 | void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, | |
2876 | struct btrfs_root *root) | |
2877 | { | |
90290e19 | 2878 | struct btrfs_block_rsv *block_rsv; |
d68fc57b YZ |
2879 | int ret; |
2880 | ||
8a35d95f | 2881 | if (atomic_read(&root->orphan_inodes) || |
d68fc57b YZ |
2882 | root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) |
2883 | return; | |
2884 | ||
90290e19 | 2885 | spin_lock(&root->orphan_lock); |
8a35d95f | 2886 | if (atomic_read(&root->orphan_inodes)) { |
90290e19 JB |
2887 | spin_unlock(&root->orphan_lock); |
2888 | return; | |
2889 | } | |
2890 | ||
2891 | if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) { | |
2892 | spin_unlock(&root->orphan_lock); | |
2893 | return; | |
2894 | } | |
2895 | ||
2896 | block_rsv = root->orphan_block_rsv; | |
2897 | root->orphan_block_rsv = NULL; | |
2898 | spin_unlock(&root->orphan_lock); | |
2899 | ||
d68fc57b YZ |
2900 | if (root->orphan_item_inserted && |
2901 | btrfs_root_refs(&root->root_item) > 0) { | |
2902 | ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, | |
2903 | root->root_key.objectid); | |
4ef31a45 JB |
2904 | if (ret) |
2905 | btrfs_abort_transaction(trans, root, ret); | |
2906 | else | |
2907 | root->orphan_item_inserted = 0; | |
d68fc57b YZ |
2908 | } |
2909 | ||
90290e19 JB |
2910 | if (block_rsv) { |
2911 | WARN_ON(block_rsv->size > 0); | |
2912 | btrfs_free_block_rsv(root, block_rsv); | |
d68fc57b YZ |
2913 | } |
2914 | } | |
2915 | ||
7b128766 JB |
2916 | /* |
2917 | * This creates an orphan entry for the given inode in case something goes | |
2918 | * wrong in the middle of an unlink/truncate. | |
d68fc57b YZ |
2919 | * |
2920 | * NOTE: caller of this function should reserve 5 units of metadata for | |
2921 | * this function. | |
7b128766 JB |
2922 | */ |
2923 | int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |
2924 | { | |
2925 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d68fc57b YZ |
2926 | struct btrfs_block_rsv *block_rsv = NULL; |
2927 | int reserve = 0; | |
2928 | int insert = 0; | |
2929 | int ret; | |
7b128766 | 2930 | |
d68fc57b | 2931 | if (!root->orphan_block_rsv) { |
66d8f3dd | 2932 | block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
b532402e TI |
2933 | if (!block_rsv) |
2934 | return -ENOMEM; | |
d68fc57b | 2935 | } |
7b128766 | 2936 | |
d68fc57b YZ |
2937 | spin_lock(&root->orphan_lock); |
2938 | if (!root->orphan_block_rsv) { | |
2939 | root->orphan_block_rsv = block_rsv; | |
2940 | } else if (block_rsv) { | |
2941 | btrfs_free_block_rsv(root, block_rsv); | |
2942 | block_rsv = NULL; | |
7b128766 | 2943 | } |
7b128766 | 2944 | |
8a35d95f JB |
2945 | if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
2946 | &BTRFS_I(inode)->runtime_flags)) { | |
d68fc57b YZ |
2947 | #if 0 |
2948 | /* | |
2949 | * For proper ENOSPC handling, we should do orphan | |
2950 | * cleanup when mounting. But this introduces backward | |
2951 | * compatibility issue. | |
2952 | */ | |
2953 | if (!xchg(&root->orphan_item_inserted, 1)) | |
2954 | insert = 2; | |
2955 | else | |
2956 | insert = 1; | |
2957 | #endif | |
2958 | insert = 1; | |
321f0e70 | 2959 | atomic_inc(&root->orphan_inodes); |
7b128766 JB |
2960 | } |
2961 | ||
72ac3c0d JB |
2962 | if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
2963 | &BTRFS_I(inode)->runtime_flags)) | |
d68fc57b | 2964 | reserve = 1; |
d68fc57b | 2965 | spin_unlock(&root->orphan_lock); |
7b128766 | 2966 | |
d68fc57b YZ |
2967 | /* grab metadata reservation from transaction handle */ |
2968 | if (reserve) { | |
2969 | ret = btrfs_orphan_reserve_metadata(trans, inode); | |
79787eaa | 2970 | BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ |
d68fc57b | 2971 | } |
7b128766 | 2972 | |
d68fc57b YZ |
2973 | /* insert an orphan item to track this unlinked/truncated file */ |
2974 | if (insert >= 1) { | |
33345d01 | 2975 | ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); |
4ef31a45 | 2976 | if (ret) { |
703c88e0 | 2977 | atomic_dec(&root->orphan_inodes); |
4ef31a45 JB |
2978 | if (reserve) { |
2979 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | |
2980 | &BTRFS_I(inode)->runtime_flags); | |
2981 | btrfs_orphan_release_metadata(inode); | |
2982 | } | |
2983 | if (ret != -EEXIST) { | |
e8e7cff6 JB |
2984 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
2985 | &BTRFS_I(inode)->runtime_flags); | |
4ef31a45 JB |
2986 | btrfs_abort_transaction(trans, root, ret); |
2987 | return ret; | |
2988 | } | |
79787eaa JM |
2989 | } |
2990 | ret = 0; | |
d68fc57b YZ |
2991 | } |
2992 | ||
2993 | /* insert an orphan item to track subvolume contains orphan files */ | |
2994 | if (insert >= 2) { | |
2995 | ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, | |
2996 | root->root_key.objectid); | |
79787eaa JM |
2997 | if (ret && ret != -EEXIST) { |
2998 | btrfs_abort_transaction(trans, root, ret); | |
2999 | return ret; | |
3000 | } | |
d68fc57b YZ |
3001 | } |
3002 | return 0; | |
7b128766 JB |
3003 | } |
3004 | ||
3005 | /* | |
3006 | * We have done the truncate/delete so we can go ahead and remove the orphan | |
3007 | * item for this particular inode. | |
3008 | */ | |
48a3b636 ES |
3009 | static int btrfs_orphan_del(struct btrfs_trans_handle *trans, |
3010 | struct inode *inode) | |
7b128766 JB |
3011 | { |
3012 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
d68fc57b YZ |
3013 | int delete_item = 0; |
3014 | int release_rsv = 0; | |
7b128766 JB |
3015 | int ret = 0; |
3016 | ||
d68fc57b | 3017 | spin_lock(&root->orphan_lock); |
8a35d95f JB |
3018 | if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
3019 | &BTRFS_I(inode)->runtime_flags)) | |
d68fc57b | 3020 | delete_item = 1; |
7b128766 | 3021 | |
72ac3c0d JB |
3022 | if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, |
3023 | &BTRFS_I(inode)->runtime_flags)) | |
d68fc57b | 3024 | release_rsv = 1; |
d68fc57b | 3025 | spin_unlock(&root->orphan_lock); |
7b128766 | 3026 | |
703c88e0 | 3027 | if (delete_item) { |
8a35d95f | 3028 | atomic_dec(&root->orphan_inodes); |
703c88e0 FDBM |
3029 | if (trans) |
3030 | ret = btrfs_del_orphan_item(trans, root, | |
3031 | btrfs_ino(inode)); | |
8a35d95f | 3032 | } |
7b128766 | 3033 | |
703c88e0 FDBM |
3034 | if (release_rsv) |
3035 | btrfs_orphan_release_metadata(inode); | |
3036 | ||
4ef31a45 | 3037 | return ret; |
7b128766 JB |
3038 | } |
3039 | ||
3040 | /* | |
3041 | * this cleans up any orphans that may be left on the list from the last use | |
3042 | * of this root. | |
3043 | */ | |
66b4ffd1 | 3044 | int btrfs_orphan_cleanup(struct btrfs_root *root) |
7b128766 JB |
3045 | { |
3046 | struct btrfs_path *path; | |
3047 | struct extent_buffer *leaf; | |
7b128766 JB |
3048 | struct btrfs_key key, found_key; |
3049 | struct btrfs_trans_handle *trans; | |
3050 | struct inode *inode; | |
8f6d7f4f | 3051 | u64 last_objectid = 0; |
7b128766 JB |
3052 | int ret = 0, nr_unlink = 0, nr_truncate = 0; |
3053 | ||
d68fc57b | 3054 | if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) |
66b4ffd1 | 3055 | return 0; |
c71bf099 YZ |
3056 | |
3057 | path = btrfs_alloc_path(); | |
66b4ffd1 JB |
3058 | if (!path) { |
3059 | ret = -ENOMEM; | |
3060 | goto out; | |
3061 | } | |
7b128766 JB |
3062 | path->reada = -1; |
3063 | ||
3064 | key.objectid = BTRFS_ORPHAN_OBJECTID; | |
3065 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | |
3066 | key.offset = (u64)-1; | |
3067 | ||
7b128766 JB |
3068 | while (1) { |
3069 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
66b4ffd1 JB |
3070 | if (ret < 0) |
3071 | goto out; | |
7b128766 JB |
3072 | |
3073 | /* | |
3074 | * if ret == 0 means we found what we were searching for, which | |
25985edc | 3075 | * is weird, but possible, so only screw with path if we didn't |
7b128766 JB |
3076 | * find the key and see if we have stuff that matches |
3077 | */ | |
3078 | if (ret > 0) { | |
66b4ffd1 | 3079 | ret = 0; |
7b128766 JB |
3080 | if (path->slots[0] == 0) |
3081 | break; | |
3082 | path->slots[0]--; | |
3083 | } | |
3084 | ||
3085 | /* pull out the item */ | |
3086 | leaf = path->nodes[0]; | |
7b128766 JB |
3087 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
3088 | ||
3089 | /* make sure the item matches what we want */ | |
3090 | if (found_key.objectid != BTRFS_ORPHAN_OBJECTID) | |
3091 | break; | |
3092 | if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY) | |
3093 | break; | |
3094 | ||
3095 | /* release the path since we're done with it */ | |
b3b4aa74 | 3096 | btrfs_release_path(path); |
7b128766 JB |
3097 | |
3098 | /* | |
3099 | * this is where we are basically btrfs_lookup, without the | |
3100 | * crossing root thing. we store the inode number in the | |
3101 | * offset of the orphan item. | |
3102 | */ | |
8f6d7f4f JB |
3103 | |
3104 | if (found_key.offset == last_objectid) { | |
c2cf52eb SK |
3105 | btrfs_err(root->fs_info, |
3106 | "Error removing orphan entry, stopping orphan cleanup"); | |
8f6d7f4f JB |
3107 | ret = -EINVAL; |
3108 | goto out; | |
3109 | } | |
3110 | ||
3111 | last_objectid = found_key.offset; | |
3112 | ||
5d4f98a2 YZ |
3113 | found_key.objectid = found_key.offset; |
3114 | found_key.type = BTRFS_INODE_ITEM_KEY; | |
3115 | found_key.offset = 0; | |
73f73415 | 3116 | inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); |
8c6ffba0 | 3117 | ret = PTR_ERR_OR_ZERO(inode); |
a8c9e576 | 3118 | if (ret && ret != -ESTALE) |
66b4ffd1 | 3119 | goto out; |
7b128766 | 3120 | |
f8e9e0b0 AJ |
3121 | if (ret == -ESTALE && root == root->fs_info->tree_root) { |
3122 | struct btrfs_root *dead_root; | |
3123 | struct btrfs_fs_info *fs_info = root->fs_info; | |
3124 | int is_dead_root = 0; | |
3125 | ||
3126 | /* | |
3127 | * this is an orphan in the tree root. Currently these | |
3128 | * could come from 2 sources: | |
3129 | * a) a snapshot deletion in progress | |
3130 | * b) a free space cache inode | |
3131 | * We need to distinguish those two, as the snapshot | |
3132 | * orphan must not get deleted. | |
3133 | * find_dead_roots already ran before us, so if this | |
3134 | * is a snapshot deletion, we should find the root | |
3135 | * in the dead_roots list | |
3136 | */ | |
3137 | spin_lock(&fs_info->trans_lock); | |
3138 | list_for_each_entry(dead_root, &fs_info->dead_roots, | |
3139 | root_list) { | |
3140 | if (dead_root->root_key.objectid == | |
3141 | found_key.objectid) { | |
3142 | is_dead_root = 1; | |
3143 | break; | |
3144 | } | |
3145 | } | |
3146 | spin_unlock(&fs_info->trans_lock); | |
3147 | if (is_dead_root) { | |
3148 | /* prevent this orphan from being found again */ | |
3149 | key.offset = found_key.objectid - 1; | |
3150 | continue; | |
3151 | } | |
3152 | } | |
7b128766 | 3153 | /* |
a8c9e576 JB |
3154 | * Inode is already gone but the orphan item is still there, |
3155 | * kill the orphan item. | |
7b128766 | 3156 | */ |
a8c9e576 JB |
3157 | if (ret == -ESTALE) { |
3158 | trans = btrfs_start_transaction(root, 1); | |
66b4ffd1 JB |
3159 | if (IS_ERR(trans)) { |
3160 | ret = PTR_ERR(trans); | |
3161 | goto out; | |
3162 | } | |
c2cf52eb SK |
3163 | btrfs_debug(root->fs_info, "auto deleting %Lu", |
3164 | found_key.objectid); | |
a8c9e576 JB |
3165 | ret = btrfs_del_orphan_item(trans, root, |
3166 | found_key.objectid); | |
5b21f2ed | 3167 | btrfs_end_transaction(trans, root); |
4ef31a45 JB |
3168 | if (ret) |
3169 | goto out; | |
7b128766 JB |
3170 | continue; |
3171 | } | |
3172 | ||
a8c9e576 JB |
3173 | /* |
3174 | * add this inode to the orphan list so btrfs_orphan_del does | |
3175 | * the proper thing when we hit it | |
3176 | */ | |
8a35d95f JB |
3177 | set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
3178 | &BTRFS_I(inode)->runtime_flags); | |
925396ec | 3179 | atomic_inc(&root->orphan_inodes); |
a8c9e576 | 3180 | |
7b128766 JB |
3181 | /* if we have links, this was a truncate, lets do that */ |
3182 | if (inode->i_nlink) { | |
fae7f21c | 3183 | if (WARN_ON(!S_ISREG(inode->i_mode))) { |
a41ad394 JB |
3184 | iput(inode); |
3185 | continue; | |
3186 | } | |
7b128766 | 3187 | nr_truncate++; |
f3fe820c JB |
3188 | |
3189 | /* 1 for the orphan item deletion. */ | |
3190 | trans = btrfs_start_transaction(root, 1); | |
3191 | if (IS_ERR(trans)) { | |
c69b26b0 | 3192 | iput(inode); |
f3fe820c JB |
3193 | ret = PTR_ERR(trans); |
3194 | goto out; | |
3195 | } | |
3196 | ret = btrfs_orphan_add(trans, inode); | |
3197 | btrfs_end_transaction(trans, root); | |
c69b26b0 JB |
3198 | if (ret) { |
3199 | iput(inode); | |
f3fe820c | 3200 | goto out; |
c69b26b0 | 3201 | } |
f3fe820c | 3202 | |
66b4ffd1 | 3203 | ret = btrfs_truncate(inode); |
4a7d0f68 JB |
3204 | if (ret) |
3205 | btrfs_orphan_del(NULL, inode); | |
7b128766 JB |
3206 | } else { |
3207 | nr_unlink++; | |
3208 | } | |
3209 | ||
3210 | /* this will do delete_inode and everything for us */ | |
3211 | iput(inode); | |
66b4ffd1 JB |
3212 | if (ret) |
3213 | goto out; | |
7b128766 | 3214 | } |
3254c876 MX |
3215 | /* release the path since we're done with it */ |
3216 | btrfs_release_path(path); | |
3217 | ||
d68fc57b YZ |
3218 | root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; |
3219 | ||
3220 | if (root->orphan_block_rsv) | |
3221 | btrfs_block_rsv_release(root, root->orphan_block_rsv, | |
3222 | (u64)-1); | |
3223 | ||
3224 | if (root->orphan_block_rsv || root->orphan_item_inserted) { | |
7a7eaa40 | 3225 | trans = btrfs_join_transaction(root); |
66b4ffd1 JB |
3226 | if (!IS_ERR(trans)) |
3227 | btrfs_end_transaction(trans, root); | |
d68fc57b | 3228 | } |
7b128766 JB |
3229 | |
3230 | if (nr_unlink) | |
4884b476 | 3231 | btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink); |
7b128766 | 3232 | if (nr_truncate) |
4884b476 | 3233 | btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate); |
66b4ffd1 JB |
3234 | |
3235 | out: | |
3236 | if (ret) | |
c2cf52eb SK |
3237 | btrfs_crit(root->fs_info, |
3238 | "could not do orphan cleanup %d", ret); | |
66b4ffd1 JB |
3239 | btrfs_free_path(path); |
3240 | return ret; | |
7b128766 JB |
3241 | } |
3242 | ||
46a53cca CM |
3243 | /* |
3244 | * very simple check to peek ahead in the leaf looking for xattrs. If we | |
3245 | * don't find any xattrs, we know there can't be any acls. | |
3246 | * | |
3247 | * slot is the slot the inode is in, objectid is the objectid of the inode | |
3248 | */ | |
3249 | static noinline int acls_after_inode_item(struct extent_buffer *leaf, | |
3250 | int slot, u64 objectid) | |
3251 | { | |
3252 | u32 nritems = btrfs_header_nritems(leaf); | |
3253 | struct btrfs_key found_key; | |
f23b5a59 JB |
3254 | static u64 xattr_access = 0; |
3255 | static u64 xattr_default = 0; | |
46a53cca CM |
3256 | int scanned = 0; |
3257 | ||
f23b5a59 JB |
3258 | if (!xattr_access) { |
3259 | xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS, | |
3260 | strlen(POSIX_ACL_XATTR_ACCESS)); | |
3261 | xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT, | |
3262 | strlen(POSIX_ACL_XATTR_DEFAULT)); | |
3263 | } | |
3264 | ||
46a53cca CM |
3265 | slot++; |
3266 | while (slot < nritems) { | |
3267 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
3268 | ||
3269 | /* we found a different objectid, there must not be acls */ | |
3270 | if (found_key.objectid != objectid) | |
3271 | return 0; | |
3272 | ||
3273 | /* we found an xattr, assume we've got an acl */ | |
f23b5a59 JB |
3274 | if (found_key.type == BTRFS_XATTR_ITEM_KEY) { |
3275 | if (found_key.offset == xattr_access || | |
3276 | found_key.offset == xattr_default) | |
3277 | return 1; | |
3278 | } | |
46a53cca CM |
3279 | |
3280 | /* | |
3281 | * we found a key greater than an xattr key, there can't | |
3282 | * be any acls later on | |
3283 | */ | |
3284 | if (found_key.type > BTRFS_XATTR_ITEM_KEY) | |
3285 | return 0; | |
3286 | ||
3287 | slot++; | |
3288 | scanned++; | |
3289 | ||
3290 | /* | |
3291 | * it goes inode, inode backrefs, xattrs, extents, | |
3292 | * so if there are a ton of hard links to an inode there can | |
3293 | * be a lot of backrefs. Don't waste time searching too hard, | |
3294 | * this is just an optimization | |
3295 | */ | |
3296 | if (scanned >= 8) | |
3297 | break; | |
3298 | } | |
3299 | /* we hit the end of the leaf before we found an xattr or | |
3300 | * something larger than an xattr. We have to assume the inode | |
3301 | * has acls | |
3302 | */ | |
3303 | return 1; | |
3304 | } | |
3305 | ||
d352ac68 CM |
3306 | /* |
3307 | * read an inode from the btree into the in-memory inode | |
3308 | */ | |
5d4f98a2 | 3309 | static void btrfs_read_locked_inode(struct inode *inode) |
39279cc3 CM |
3310 | { |
3311 | struct btrfs_path *path; | |
5f39d397 | 3312 | struct extent_buffer *leaf; |
39279cc3 | 3313 | struct btrfs_inode_item *inode_item; |
0b86a832 | 3314 | struct btrfs_timespec *tspec; |
39279cc3 CM |
3315 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3316 | struct btrfs_key location; | |
46a53cca | 3317 | int maybe_acls; |
618e21d5 | 3318 | u32 rdev; |
39279cc3 | 3319 | int ret; |
2f7e33d4 MX |
3320 | bool filled = false; |
3321 | ||
3322 | ret = btrfs_fill_inode(inode, &rdev); | |
3323 | if (!ret) | |
3324 | filled = true; | |
39279cc3 CM |
3325 | |
3326 | path = btrfs_alloc_path(); | |
1748f843 MF |
3327 | if (!path) |
3328 | goto make_bad; | |
3329 | ||
d90c7321 | 3330 | path->leave_spinning = 1; |
39279cc3 | 3331 | memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); |
dc17ff8f | 3332 | |
39279cc3 | 3333 | ret = btrfs_lookup_inode(NULL, root, path, &location, 0); |
5f39d397 | 3334 | if (ret) |
39279cc3 | 3335 | goto make_bad; |
39279cc3 | 3336 | |
5f39d397 | 3337 | leaf = path->nodes[0]; |
2f7e33d4 MX |
3338 | |
3339 | if (filled) | |
3340 | goto cache_acl; | |
3341 | ||
5f39d397 CM |
3342 | inode_item = btrfs_item_ptr(leaf, path->slots[0], |
3343 | struct btrfs_inode_item); | |
5f39d397 | 3344 | inode->i_mode = btrfs_inode_mode(leaf, inode_item); |
bfe86848 | 3345 | set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); |
2f2f43d3 EB |
3346 | i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); |
3347 | i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); | |
dbe674a9 | 3348 | btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); |
5f39d397 CM |
3349 | |
3350 | tspec = btrfs_inode_atime(inode_item); | |
3351 | inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
3352 | inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
3353 | ||
3354 | tspec = btrfs_inode_mtime(inode_item); | |
3355 | inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
3356 | inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
3357 | ||
3358 | tspec = btrfs_inode_ctime(inode_item); | |
3359 | inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); | |
3360 | inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); | |
3361 | ||
a76a3cd4 | 3362 | inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); |
e02119d5 | 3363 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
5dc562c5 JB |
3364 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); |
3365 | ||
3366 | /* | |
3367 | * If we were modified in the current generation and evicted from memory | |
3368 | * and then re-read we need to do a full sync since we don't have any | |
3369 | * idea about which extents were modified before we were evicted from | |
3370 | * cache. | |
3371 | */ | |
3372 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) | |
3373 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
3374 | &BTRFS_I(inode)->runtime_flags); | |
3375 | ||
0c4d2d95 | 3376 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); |
e02119d5 | 3377 | inode->i_generation = BTRFS_I(inode)->generation; |
618e21d5 | 3378 | inode->i_rdev = 0; |
5f39d397 CM |
3379 | rdev = btrfs_inode_rdev(leaf, inode_item); |
3380 | ||
aec7477b | 3381 | BTRFS_I(inode)->index_cnt = (u64)-1; |
d2fb3437 | 3382 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); |
2f7e33d4 | 3383 | cache_acl: |
46a53cca CM |
3384 | /* |
3385 | * try to precache a NULL acl entry for files that don't have | |
3386 | * any xattrs or acls | |
3387 | */ | |
33345d01 LZ |
3388 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], |
3389 | btrfs_ino(inode)); | |
72c04902 AV |
3390 | if (!maybe_acls) |
3391 | cache_no_acl(inode); | |
46a53cca | 3392 | |
39279cc3 | 3393 | btrfs_free_path(path); |
39279cc3 | 3394 | |
39279cc3 | 3395 | switch (inode->i_mode & S_IFMT) { |
39279cc3 CM |
3396 | case S_IFREG: |
3397 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 3398 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d1310b2e | 3399 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 CM |
3400 | inode->i_fop = &btrfs_file_operations; |
3401 | inode->i_op = &btrfs_file_inode_operations; | |
3402 | break; | |
3403 | case S_IFDIR: | |
3404 | inode->i_fop = &btrfs_dir_file_operations; | |
3405 | if (root == root->fs_info->tree_root) | |
3406 | inode->i_op = &btrfs_dir_ro_inode_operations; | |
3407 | else | |
3408 | inode->i_op = &btrfs_dir_inode_operations; | |
3409 | break; | |
3410 | case S_IFLNK: | |
3411 | inode->i_op = &btrfs_symlink_inode_operations; | |
3412 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 3413 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
39279cc3 | 3414 | break; |
618e21d5 | 3415 | default: |
0279b4cd | 3416 | inode->i_op = &btrfs_special_inode_operations; |
618e21d5 JB |
3417 | init_special_inode(inode, inode->i_mode, rdev); |
3418 | break; | |
39279cc3 | 3419 | } |
6cbff00f CH |
3420 | |
3421 | btrfs_update_iflags(inode); | |
39279cc3 CM |
3422 | return; |
3423 | ||
3424 | make_bad: | |
39279cc3 | 3425 | btrfs_free_path(path); |
39279cc3 CM |
3426 | make_bad_inode(inode); |
3427 | } | |
3428 | ||
d352ac68 CM |
3429 | /* |
3430 | * given a leaf and an inode, copy the inode fields into the leaf | |
3431 | */ | |
e02119d5 CM |
3432 | static void fill_inode_item(struct btrfs_trans_handle *trans, |
3433 | struct extent_buffer *leaf, | |
5f39d397 | 3434 | struct btrfs_inode_item *item, |
39279cc3 CM |
3435 | struct inode *inode) |
3436 | { | |
51fab693 LB |
3437 | struct btrfs_map_token token; |
3438 | ||
3439 | btrfs_init_map_token(&token); | |
5f39d397 | 3440 | |
51fab693 LB |
3441 | btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); |
3442 | btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); | |
3443 | btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size, | |
3444 | &token); | |
3445 | btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); | |
3446 | btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); | |
5f39d397 | 3447 | |
51fab693 LB |
3448 | btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), |
3449 | inode->i_atime.tv_sec, &token); | |
3450 | btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), | |
3451 | inode->i_atime.tv_nsec, &token); | |
5f39d397 | 3452 | |
51fab693 LB |
3453 | btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), |
3454 | inode->i_mtime.tv_sec, &token); | |
3455 | btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), | |
3456 | inode->i_mtime.tv_nsec, &token); | |
5f39d397 | 3457 | |
51fab693 LB |
3458 | btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), |
3459 | inode->i_ctime.tv_sec, &token); | |
3460 | btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), | |
3461 | inode->i_ctime.tv_nsec, &token); | |
5f39d397 | 3462 | |
51fab693 LB |
3463 | btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), |
3464 | &token); | |
3465 | btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, | |
3466 | &token); | |
3467 | btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); | |
3468 | btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); | |
3469 | btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); | |
3470 | btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); | |
3471 | btrfs_set_token_inode_block_group(leaf, item, 0, &token); | |
39279cc3 CM |
3472 | } |
3473 | ||
d352ac68 CM |
3474 | /* |
3475 | * copy everything in the in-memory inode into the btree. | |
3476 | */ | |
2115133f | 3477 | static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans, |
d397712b | 3478 | struct btrfs_root *root, struct inode *inode) |
39279cc3 CM |
3479 | { |
3480 | struct btrfs_inode_item *inode_item; | |
3481 | struct btrfs_path *path; | |
5f39d397 | 3482 | struct extent_buffer *leaf; |
39279cc3 CM |
3483 | int ret; |
3484 | ||
3485 | path = btrfs_alloc_path(); | |
16cdcec7 MX |
3486 | if (!path) |
3487 | return -ENOMEM; | |
3488 | ||
b9473439 | 3489 | path->leave_spinning = 1; |
16cdcec7 MX |
3490 | ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, |
3491 | 1); | |
39279cc3 CM |
3492 | if (ret) { |
3493 | if (ret > 0) | |
3494 | ret = -ENOENT; | |
3495 | goto failed; | |
3496 | } | |
3497 | ||
b4ce94de | 3498 | btrfs_unlock_up_safe(path, 1); |
5f39d397 CM |
3499 | leaf = path->nodes[0]; |
3500 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
16cdcec7 | 3501 | struct btrfs_inode_item); |
39279cc3 | 3502 | |
e02119d5 | 3503 | fill_inode_item(trans, leaf, inode_item, inode); |
5f39d397 | 3504 | btrfs_mark_buffer_dirty(leaf); |
15ee9bc7 | 3505 | btrfs_set_inode_last_trans(trans, inode); |
39279cc3 CM |
3506 | ret = 0; |
3507 | failed: | |
39279cc3 CM |
3508 | btrfs_free_path(path); |
3509 | return ret; | |
3510 | } | |
3511 | ||
2115133f CM |
3512 | /* |
3513 | * copy everything in the in-memory inode into the btree. | |
3514 | */ | |
3515 | noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, | |
3516 | struct btrfs_root *root, struct inode *inode) | |
3517 | { | |
3518 | int ret; | |
3519 | ||
3520 | /* | |
3521 | * If the inode is a free space inode, we can deadlock during commit | |
3522 | * if we put it into the delayed code. | |
3523 | * | |
3524 | * The data relocation inode should also be directly updated | |
3525 | * without delay | |
3526 | */ | |
83eea1f1 | 3527 | if (!btrfs_is_free_space_inode(inode) |
2115133f | 3528 | && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { |
8ea05e3a AB |
3529 | btrfs_update_root_times(trans, root); |
3530 | ||
2115133f CM |
3531 | ret = btrfs_delayed_update_inode(trans, root, inode); |
3532 | if (!ret) | |
3533 | btrfs_set_inode_last_trans(trans, inode); | |
3534 | return ret; | |
3535 | } | |
3536 | ||
3537 | return btrfs_update_inode_item(trans, root, inode); | |
3538 | } | |
3539 | ||
be6aef60 JB |
3540 | noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, |
3541 | struct btrfs_root *root, | |
3542 | struct inode *inode) | |
2115133f CM |
3543 | { |
3544 | int ret; | |
3545 | ||
3546 | ret = btrfs_update_inode(trans, root, inode); | |
3547 | if (ret == -ENOSPC) | |
3548 | return btrfs_update_inode_item(trans, root, inode); | |
3549 | return ret; | |
3550 | } | |
3551 | ||
d352ac68 CM |
3552 | /* |
3553 | * unlink helper that gets used here in inode.c and in the tree logging | |
3554 | * recovery code. It remove a link in a directory with a given name, and | |
3555 | * also drops the back refs in the inode to the directory | |
3556 | */ | |
92986796 AV |
3557 | static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
3558 | struct btrfs_root *root, | |
3559 | struct inode *dir, struct inode *inode, | |
3560 | const char *name, int name_len) | |
39279cc3 CM |
3561 | { |
3562 | struct btrfs_path *path; | |
39279cc3 | 3563 | int ret = 0; |
5f39d397 | 3564 | struct extent_buffer *leaf; |
39279cc3 | 3565 | struct btrfs_dir_item *di; |
5f39d397 | 3566 | struct btrfs_key key; |
aec7477b | 3567 | u64 index; |
33345d01 LZ |
3568 | u64 ino = btrfs_ino(inode); |
3569 | u64 dir_ino = btrfs_ino(dir); | |
39279cc3 CM |
3570 | |
3571 | path = btrfs_alloc_path(); | |
54aa1f4d CM |
3572 | if (!path) { |
3573 | ret = -ENOMEM; | |
554233a6 | 3574 | goto out; |
54aa1f4d CM |
3575 | } |
3576 | ||
b9473439 | 3577 | path->leave_spinning = 1; |
33345d01 | 3578 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
39279cc3 CM |
3579 | name, name_len, -1); |
3580 | if (IS_ERR(di)) { | |
3581 | ret = PTR_ERR(di); | |
3582 | goto err; | |
3583 | } | |
3584 | if (!di) { | |
3585 | ret = -ENOENT; | |
3586 | goto err; | |
3587 | } | |
5f39d397 CM |
3588 | leaf = path->nodes[0]; |
3589 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
39279cc3 | 3590 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
54aa1f4d CM |
3591 | if (ret) |
3592 | goto err; | |
b3b4aa74 | 3593 | btrfs_release_path(path); |
39279cc3 | 3594 | |
33345d01 LZ |
3595 | ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, |
3596 | dir_ino, &index); | |
aec7477b | 3597 | if (ret) { |
c2cf52eb SK |
3598 | btrfs_info(root->fs_info, |
3599 | "failed to delete reference to %.*s, inode %llu parent %llu", | |
c1c9ff7c | 3600 | name_len, name, ino, dir_ino); |
79787eaa | 3601 | btrfs_abort_transaction(trans, root, ret); |
aec7477b JB |
3602 | goto err; |
3603 | } | |
3604 | ||
16cdcec7 | 3605 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); |
79787eaa JM |
3606 | if (ret) { |
3607 | btrfs_abort_transaction(trans, root, ret); | |
39279cc3 | 3608 | goto err; |
79787eaa | 3609 | } |
39279cc3 | 3610 | |
e02119d5 | 3611 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, |
33345d01 | 3612 | inode, dir_ino); |
79787eaa JM |
3613 | if (ret != 0 && ret != -ENOENT) { |
3614 | btrfs_abort_transaction(trans, root, ret); | |
3615 | goto err; | |
3616 | } | |
e02119d5 CM |
3617 | |
3618 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, | |
3619 | dir, index); | |
6418c961 CM |
3620 | if (ret == -ENOENT) |
3621 | ret = 0; | |
d4e3991b ZB |
3622 | else if (ret) |
3623 | btrfs_abort_transaction(trans, root, ret); | |
39279cc3 CM |
3624 | err: |
3625 | btrfs_free_path(path); | |
e02119d5 CM |
3626 | if (ret) |
3627 | goto out; | |
3628 | ||
3629 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
0c4d2d95 JB |
3630 | inode_inc_iversion(inode); |
3631 | inode_inc_iversion(dir); | |
e02119d5 | 3632 | inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; |
b9959295 | 3633 | ret = btrfs_update_inode(trans, root, dir); |
e02119d5 | 3634 | out: |
39279cc3 CM |
3635 | return ret; |
3636 | } | |
3637 | ||
92986796 AV |
3638 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
3639 | struct btrfs_root *root, | |
3640 | struct inode *dir, struct inode *inode, | |
3641 | const char *name, int name_len) | |
3642 | { | |
3643 | int ret; | |
3644 | ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len); | |
3645 | if (!ret) { | |
8b558c5f | 3646 | drop_nlink(inode); |
92986796 AV |
3647 | ret = btrfs_update_inode(trans, root, inode); |
3648 | } | |
3649 | return ret; | |
3650 | } | |
39279cc3 | 3651 | |
a22285a6 YZ |
3652 | /* |
3653 | * helper to start transaction for unlink and rmdir. | |
3654 | * | |
d52be818 JB |
3655 | * unlink and rmdir are special in btrfs, they do not always free space, so |
3656 | * if we cannot make our reservations the normal way try and see if there is | |
3657 | * plenty of slack room in the global reserve to migrate, otherwise we cannot | |
3658 | * allow the unlink to occur. | |
a22285a6 | 3659 | */ |
d52be818 | 3660 | static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) |
4df27c4d | 3661 | { |
39279cc3 | 3662 | struct btrfs_trans_handle *trans; |
a22285a6 | 3663 | struct btrfs_root *root = BTRFS_I(dir)->root; |
4df27c4d YZ |
3664 | int ret; |
3665 | ||
e70bea5f JB |
3666 | /* |
3667 | * 1 for the possible orphan item | |
3668 | * 1 for the dir item | |
3669 | * 1 for the dir index | |
3670 | * 1 for the inode ref | |
e70bea5f JB |
3671 | * 1 for the inode |
3672 | */ | |
6e137ed3 | 3673 | trans = btrfs_start_transaction(root, 5); |
a22285a6 YZ |
3674 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) |
3675 | return trans; | |
4df27c4d | 3676 | |
d52be818 JB |
3677 | if (PTR_ERR(trans) == -ENOSPC) { |
3678 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); | |
4df27c4d | 3679 | |
d52be818 JB |
3680 | trans = btrfs_start_transaction(root, 0); |
3681 | if (IS_ERR(trans)) | |
3682 | return trans; | |
3683 | ret = btrfs_cond_migrate_bytes(root->fs_info, | |
3684 | &root->fs_info->trans_block_rsv, | |
3685 | num_bytes, 5); | |
3686 | if (ret) { | |
3687 | btrfs_end_transaction(trans, root); | |
3688 | return ERR_PTR(ret); | |
a22285a6 | 3689 | } |
5a77d76c | 3690 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
d52be818 | 3691 | trans->bytes_reserved = num_bytes; |
a22285a6 | 3692 | } |
d52be818 | 3693 | return trans; |
a22285a6 YZ |
3694 | } |
3695 | ||
3696 | static int btrfs_unlink(struct inode *dir, struct dentry *dentry) | |
3697 | { | |
3698 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
3699 | struct btrfs_trans_handle *trans; | |
3700 | struct inode *inode = dentry->d_inode; | |
3701 | int ret; | |
a22285a6 | 3702 | |
d52be818 | 3703 | trans = __unlink_start_trans(dir); |
a22285a6 YZ |
3704 | if (IS_ERR(trans)) |
3705 | return PTR_ERR(trans); | |
5f39d397 | 3706 | |
12fcfd22 CM |
3707 | btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); |
3708 | ||
e02119d5 CM |
3709 | ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
3710 | dentry->d_name.name, dentry->d_name.len); | |
b532402e TI |
3711 | if (ret) |
3712 | goto out; | |
7b128766 | 3713 | |
a22285a6 | 3714 | if (inode->i_nlink == 0) { |
7b128766 | 3715 | ret = btrfs_orphan_add(trans, inode); |
b532402e TI |
3716 | if (ret) |
3717 | goto out; | |
a22285a6 | 3718 | } |
7b128766 | 3719 | |
b532402e | 3720 | out: |
d52be818 | 3721 | btrfs_end_transaction(trans, root); |
b53d3f5d | 3722 | btrfs_btree_balance_dirty(root); |
39279cc3 CM |
3723 | return ret; |
3724 | } | |
3725 | ||
4df27c4d YZ |
3726 | int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, |
3727 | struct btrfs_root *root, | |
3728 | struct inode *dir, u64 objectid, | |
3729 | const char *name, int name_len) | |
3730 | { | |
3731 | struct btrfs_path *path; | |
3732 | struct extent_buffer *leaf; | |
3733 | struct btrfs_dir_item *di; | |
3734 | struct btrfs_key key; | |
3735 | u64 index; | |
3736 | int ret; | |
33345d01 | 3737 | u64 dir_ino = btrfs_ino(dir); |
4df27c4d YZ |
3738 | |
3739 | path = btrfs_alloc_path(); | |
3740 | if (!path) | |
3741 | return -ENOMEM; | |
3742 | ||
33345d01 | 3743 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
4df27c4d | 3744 | name, name_len, -1); |
79787eaa JM |
3745 | if (IS_ERR_OR_NULL(di)) { |
3746 | if (!di) | |
3747 | ret = -ENOENT; | |
3748 | else | |
3749 | ret = PTR_ERR(di); | |
3750 | goto out; | |
3751 | } | |
4df27c4d YZ |
3752 | |
3753 | leaf = path->nodes[0]; | |
3754 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | |
3755 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | |
3756 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | |
79787eaa JM |
3757 | if (ret) { |
3758 | btrfs_abort_transaction(trans, root, ret); | |
3759 | goto out; | |
3760 | } | |
b3b4aa74 | 3761 | btrfs_release_path(path); |
4df27c4d YZ |
3762 | |
3763 | ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, | |
3764 | objectid, root->root_key.objectid, | |
33345d01 | 3765 | dir_ino, &index, name, name_len); |
4df27c4d | 3766 | if (ret < 0) { |
79787eaa JM |
3767 | if (ret != -ENOENT) { |
3768 | btrfs_abort_transaction(trans, root, ret); | |
3769 | goto out; | |
3770 | } | |
33345d01 | 3771 | di = btrfs_search_dir_index_item(root, path, dir_ino, |
4df27c4d | 3772 | name, name_len); |
79787eaa JM |
3773 | if (IS_ERR_OR_NULL(di)) { |
3774 | if (!di) | |
3775 | ret = -ENOENT; | |
3776 | else | |
3777 | ret = PTR_ERR(di); | |
3778 | btrfs_abort_transaction(trans, root, ret); | |
3779 | goto out; | |
3780 | } | |
4df27c4d YZ |
3781 | |
3782 | leaf = path->nodes[0]; | |
3783 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
b3b4aa74 | 3784 | btrfs_release_path(path); |
4df27c4d YZ |
3785 | index = key.offset; |
3786 | } | |
945d8962 | 3787 | btrfs_release_path(path); |
4df27c4d | 3788 | |
16cdcec7 | 3789 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); |
79787eaa JM |
3790 | if (ret) { |
3791 | btrfs_abort_transaction(trans, root, ret); | |
3792 | goto out; | |
3793 | } | |
4df27c4d YZ |
3794 | |
3795 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | |
0c4d2d95 | 3796 | inode_inc_iversion(dir); |
4df27c4d | 3797 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; |
5a24e84c | 3798 | ret = btrfs_update_inode_fallback(trans, root, dir); |
79787eaa JM |
3799 | if (ret) |
3800 | btrfs_abort_transaction(trans, root, ret); | |
3801 | out: | |
71d7aed0 | 3802 | btrfs_free_path(path); |
79787eaa | 3803 | return ret; |
4df27c4d YZ |
3804 | } |
3805 | ||
39279cc3 CM |
3806 | static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) |
3807 | { | |
3808 | struct inode *inode = dentry->d_inode; | |
1832a6d5 | 3809 | int err = 0; |
39279cc3 | 3810 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 | 3811 | struct btrfs_trans_handle *trans; |
39279cc3 | 3812 | |
b3ae244e | 3813 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) |
134d4512 | 3814 | return -ENOTEMPTY; |
b3ae244e DS |
3815 | if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) |
3816 | return -EPERM; | |
134d4512 | 3817 | |
d52be818 | 3818 | trans = __unlink_start_trans(dir); |
a22285a6 | 3819 | if (IS_ERR(trans)) |
5df6a9f6 | 3820 | return PTR_ERR(trans); |
5df6a9f6 | 3821 | |
33345d01 | 3822 | if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
4df27c4d YZ |
3823 | err = btrfs_unlink_subvol(trans, root, dir, |
3824 | BTRFS_I(inode)->location.objectid, | |
3825 | dentry->d_name.name, | |
3826 | dentry->d_name.len); | |
3827 | goto out; | |
3828 | } | |
3829 | ||
7b128766 JB |
3830 | err = btrfs_orphan_add(trans, inode); |
3831 | if (err) | |
4df27c4d | 3832 | goto out; |
7b128766 | 3833 | |
39279cc3 | 3834 | /* now the directory is empty */ |
e02119d5 CM |
3835 | err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, |
3836 | dentry->d_name.name, dentry->d_name.len); | |
d397712b | 3837 | if (!err) |
dbe674a9 | 3838 | btrfs_i_size_write(inode, 0); |
4df27c4d | 3839 | out: |
d52be818 | 3840 | btrfs_end_transaction(trans, root); |
b53d3f5d | 3841 | btrfs_btree_balance_dirty(root); |
3954401f | 3842 | |
39279cc3 CM |
3843 | return err; |
3844 | } | |
3845 | ||
39279cc3 CM |
3846 | /* |
3847 | * this can truncate away extent items, csum items and directory items. | |
3848 | * It starts at a high offset and removes keys until it can't find | |
d352ac68 | 3849 | * any higher than new_size |
39279cc3 CM |
3850 | * |
3851 | * csum items that cross the new i_size are truncated to the new size | |
3852 | * as well. | |
7b128766 JB |
3853 | * |
3854 | * min_type is the minimum key type to truncate down to. If set to 0, this | |
3855 | * will kill all the items on this inode, including the INODE_ITEM_KEY. | |
39279cc3 | 3856 | */ |
8082510e YZ |
3857 | int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, |
3858 | struct btrfs_root *root, | |
3859 | struct inode *inode, | |
3860 | u64 new_size, u32 min_type) | |
39279cc3 | 3861 | { |
39279cc3 | 3862 | struct btrfs_path *path; |
5f39d397 | 3863 | struct extent_buffer *leaf; |
39279cc3 | 3864 | struct btrfs_file_extent_item *fi; |
8082510e YZ |
3865 | struct btrfs_key key; |
3866 | struct btrfs_key found_key; | |
39279cc3 | 3867 | u64 extent_start = 0; |
db94535d | 3868 | u64 extent_num_bytes = 0; |
5d4f98a2 | 3869 | u64 extent_offset = 0; |
39279cc3 | 3870 | u64 item_end = 0; |
7f4f6e0a | 3871 | u64 last_size = (u64)-1; |
8082510e | 3872 | u32 found_type = (u8)-1; |
39279cc3 CM |
3873 | int found_extent; |
3874 | int del_item; | |
85e21bac CM |
3875 | int pending_del_nr = 0; |
3876 | int pending_del_slot = 0; | |
179e29e4 | 3877 | int extent_type = -1; |
8082510e YZ |
3878 | int ret; |
3879 | int err = 0; | |
33345d01 | 3880 | u64 ino = btrfs_ino(inode); |
8082510e YZ |
3881 | |
3882 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | |
39279cc3 | 3883 | |
0eb0e19c MF |
3884 | path = btrfs_alloc_path(); |
3885 | if (!path) | |
3886 | return -ENOMEM; | |
3887 | path->reada = -1; | |
3888 | ||
5dc562c5 JB |
3889 | /* |
3890 | * We want to drop from the next block forward in case this new size is | |
3891 | * not block aligned since we will be keeping the last block of the | |
3892 | * extent just the way it is. | |
3893 | */ | |
0af3d00b | 3894 | if (root->ref_cows || root == root->fs_info->tree_root) |
fda2832f QW |
3895 | btrfs_drop_extent_cache(inode, ALIGN(new_size, |
3896 | root->sectorsize), (u64)-1, 0); | |
8082510e | 3897 | |
16cdcec7 MX |
3898 | /* |
3899 | * This function is also used to drop the items in the log tree before | |
3900 | * we relog the inode, so if root != BTRFS_I(inode)->root, it means | |
3901 | * it is used to drop the loged items. So we shouldn't kill the delayed | |
3902 | * items. | |
3903 | */ | |
3904 | if (min_type == 0 && root == BTRFS_I(inode)->root) | |
3905 | btrfs_kill_delayed_inode_items(inode); | |
3906 | ||
33345d01 | 3907 | key.objectid = ino; |
39279cc3 | 3908 | key.offset = (u64)-1; |
5f39d397 CM |
3909 | key.type = (u8)-1; |
3910 | ||
85e21bac | 3911 | search_again: |
b9473439 | 3912 | path->leave_spinning = 1; |
85e21bac | 3913 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
8082510e YZ |
3914 | if (ret < 0) { |
3915 | err = ret; | |
3916 | goto out; | |
3917 | } | |
d397712b | 3918 | |
85e21bac | 3919 | if (ret > 0) { |
e02119d5 CM |
3920 | /* there are no items in the tree for us to truncate, we're |
3921 | * done | |
3922 | */ | |
8082510e YZ |
3923 | if (path->slots[0] == 0) |
3924 | goto out; | |
85e21bac CM |
3925 | path->slots[0]--; |
3926 | } | |
3927 | ||
d397712b | 3928 | while (1) { |
39279cc3 | 3929 | fi = NULL; |
5f39d397 CM |
3930 | leaf = path->nodes[0]; |
3931 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
3932 | found_type = btrfs_key_type(&found_key); | |
39279cc3 | 3933 | |
33345d01 | 3934 | if (found_key.objectid != ino) |
39279cc3 | 3935 | break; |
5f39d397 | 3936 | |
85e21bac | 3937 | if (found_type < min_type) |
39279cc3 CM |
3938 | break; |
3939 | ||
5f39d397 | 3940 | item_end = found_key.offset; |
39279cc3 | 3941 | if (found_type == BTRFS_EXTENT_DATA_KEY) { |
5f39d397 | 3942 | fi = btrfs_item_ptr(leaf, path->slots[0], |
39279cc3 | 3943 | struct btrfs_file_extent_item); |
179e29e4 CM |
3944 | extent_type = btrfs_file_extent_type(leaf, fi); |
3945 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 3946 | item_end += |
db94535d | 3947 | btrfs_file_extent_num_bytes(leaf, fi); |
179e29e4 | 3948 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
179e29e4 | 3949 | item_end += btrfs_file_extent_inline_len(leaf, |
c8b97818 | 3950 | fi); |
39279cc3 | 3951 | } |
008630c1 | 3952 | item_end--; |
39279cc3 | 3953 | } |
8082510e YZ |
3954 | if (found_type > min_type) { |
3955 | del_item = 1; | |
3956 | } else { | |
3957 | if (item_end < new_size) | |
b888db2b | 3958 | break; |
8082510e YZ |
3959 | if (found_key.offset >= new_size) |
3960 | del_item = 1; | |
3961 | else | |
3962 | del_item = 0; | |
39279cc3 | 3963 | } |
39279cc3 | 3964 | found_extent = 0; |
39279cc3 | 3965 | /* FIXME, shrink the extent if the ref count is only 1 */ |
179e29e4 CM |
3966 | if (found_type != BTRFS_EXTENT_DATA_KEY) |
3967 | goto delete; | |
3968 | ||
7f4f6e0a JB |
3969 | if (del_item) |
3970 | last_size = found_key.offset; | |
3971 | else | |
3972 | last_size = new_size; | |
3973 | ||
179e29e4 | 3974 | if (extent_type != BTRFS_FILE_EXTENT_INLINE) { |
39279cc3 | 3975 | u64 num_dec; |
db94535d | 3976 | extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); |
f70a9a6b | 3977 | if (!del_item) { |
db94535d CM |
3978 | u64 orig_num_bytes = |
3979 | btrfs_file_extent_num_bytes(leaf, fi); | |
fda2832f QW |
3980 | extent_num_bytes = ALIGN(new_size - |
3981 | found_key.offset, | |
3982 | root->sectorsize); | |
db94535d CM |
3983 | btrfs_set_file_extent_num_bytes(leaf, fi, |
3984 | extent_num_bytes); | |
3985 | num_dec = (orig_num_bytes - | |
9069218d | 3986 | extent_num_bytes); |
e02119d5 | 3987 | if (root->ref_cows && extent_start != 0) |
a76a3cd4 | 3988 | inode_sub_bytes(inode, num_dec); |
5f39d397 | 3989 | btrfs_mark_buffer_dirty(leaf); |
39279cc3 | 3990 | } else { |
db94535d CM |
3991 | extent_num_bytes = |
3992 | btrfs_file_extent_disk_num_bytes(leaf, | |
3993 | fi); | |
5d4f98a2 YZ |
3994 | extent_offset = found_key.offset - |
3995 | btrfs_file_extent_offset(leaf, fi); | |
3996 | ||
39279cc3 | 3997 | /* FIXME blocksize != 4096 */ |
9069218d | 3998 | num_dec = btrfs_file_extent_num_bytes(leaf, fi); |
39279cc3 CM |
3999 | if (extent_start != 0) { |
4000 | found_extent = 1; | |
e02119d5 | 4001 | if (root->ref_cows) |
a76a3cd4 | 4002 | inode_sub_bytes(inode, num_dec); |
e02119d5 | 4003 | } |
39279cc3 | 4004 | } |
9069218d | 4005 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { |
c8b97818 CM |
4006 | /* |
4007 | * we can't truncate inline items that have had | |
4008 | * special encodings | |
4009 | */ | |
4010 | if (!del_item && | |
4011 | btrfs_file_extent_compression(leaf, fi) == 0 && | |
4012 | btrfs_file_extent_encryption(leaf, fi) == 0 && | |
4013 | btrfs_file_extent_other_encoding(leaf, fi) == 0) { | |
e02119d5 CM |
4014 | u32 size = new_size - found_key.offset; |
4015 | ||
4016 | if (root->ref_cows) { | |
a76a3cd4 YZ |
4017 | inode_sub_bytes(inode, item_end + 1 - |
4018 | new_size); | |
e02119d5 CM |
4019 | } |
4020 | size = | |
4021 | btrfs_file_extent_calc_inline_size(size); | |
afe5fea7 | 4022 | btrfs_truncate_item(root, path, size, 1); |
e02119d5 | 4023 | } else if (root->ref_cows) { |
a76a3cd4 YZ |
4024 | inode_sub_bytes(inode, item_end + 1 - |
4025 | found_key.offset); | |
9069218d | 4026 | } |
39279cc3 | 4027 | } |
179e29e4 | 4028 | delete: |
39279cc3 | 4029 | if (del_item) { |
85e21bac CM |
4030 | if (!pending_del_nr) { |
4031 | /* no pending yet, add ourselves */ | |
4032 | pending_del_slot = path->slots[0]; | |
4033 | pending_del_nr = 1; | |
4034 | } else if (pending_del_nr && | |
4035 | path->slots[0] + 1 == pending_del_slot) { | |
4036 | /* hop on the pending chunk */ | |
4037 | pending_del_nr++; | |
4038 | pending_del_slot = path->slots[0]; | |
4039 | } else { | |
d397712b | 4040 | BUG(); |
85e21bac | 4041 | } |
39279cc3 CM |
4042 | } else { |
4043 | break; | |
4044 | } | |
0af3d00b JB |
4045 | if (found_extent && (root->ref_cows || |
4046 | root == root->fs_info->tree_root)) { | |
b9473439 | 4047 | btrfs_set_path_blocking(path); |
39279cc3 | 4048 | ret = btrfs_free_extent(trans, root, extent_start, |
5d4f98a2 YZ |
4049 | extent_num_bytes, 0, |
4050 | btrfs_header_owner(leaf), | |
66d7e7f0 | 4051 | ino, extent_offset, 0); |
39279cc3 CM |
4052 | BUG_ON(ret); |
4053 | } | |
85e21bac | 4054 | |
8082510e YZ |
4055 | if (found_type == BTRFS_INODE_ITEM_KEY) |
4056 | break; | |
4057 | ||
4058 | if (path->slots[0] == 0 || | |
4059 | path->slots[0] != pending_del_slot) { | |
8082510e YZ |
4060 | if (pending_del_nr) { |
4061 | ret = btrfs_del_items(trans, root, path, | |
4062 | pending_del_slot, | |
4063 | pending_del_nr); | |
79787eaa JM |
4064 | if (ret) { |
4065 | btrfs_abort_transaction(trans, | |
4066 | root, ret); | |
4067 | goto error; | |
4068 | } | |
8082510e YZ |
4069 | pending_del_nr = 0; |
4070 | } | |
b3b4aa74 | 4071 | btrfs_release_path(path); |
85e21bac | 4072 | goto search_again; |
8082510e YZ |
4073 | } else { |
4074 | path->slots[0]--; | |
85e21bac | 4075 | } |
39279cc3 | 4076 | } |
8082510e | 4077 | out: |
85e21bac CM |
4078 | if (pending_del_nr) { |
4079 | ret = btrfs_del_items(trans, root, path, pending_del_slot, | |
4080 | pending_del_nr); | |
79787eaa JM |
4081 | if (ret) |
4082 | btrfs_abort_transaction(trans, root, ret); | |
85e21bac | 4083 | } |
79787eaa | 4084 | error: |
7f4f6e0a JB |
4085 | if (last_size != (u64)-1) |
4086 | btrfs_ordered_update_i_size(inode, last_size, NULL); | |
39279cc3 | 4087 | btrfs_free_path(path); |
8082510e | 4088 | return err; |
39279cc3 CM |
4089 | } |
4090 | ||
4091 | /* | |
2aaa6655 JB |
4092 | * btrfs_truncate_page - read, zero a chunk and write a page |
4093 | * @inode - inode that we're zeroing | |
4094 | * @from - the offset to start zeroing | |
4095 | * @len - the length to zero, 0 to zero the entire range respective to the | |
4096 | * offset | |
4097 | * @front - zero up to the offset instead of from the offset on | |
4098 | * | |
4099 | * This will find the page for the "from" offset and cow the page and zero the | |
4100 | * part we want to zero. This is used with truncate and hole punching. | |
39279cc3 | 4101 | */ |
2aaa6655 JB |
4102 | int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, |
4103 | int front) | |
39279cc3 | 4104 | { |
2aaa6655 | 4105 | struct address_space *mapping = inode->i_mapping; |
db94535d | 4106 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
4107 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
4108 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 4109 | struct extent_state *cached_state = NULL; |
e6dcd2dc | 4110 | char *kaddr; |
db94535d | 4111 | u32 blocksize = root->sectorsize; |
39279cc3 CM |
4112 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
4113 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | |
4114 | struct page *page; | |
3b16a4e3 | 4115 | gfp_t mask = btrfs_alloc_write_mask(mapping); |
39279cc3 | 4116 | int ret = 0; |
a52d9a80 | 4117 | u64 page_start; |
e6dcd2dc | 4118 | u64 page_end; |
39279cc3 | 4119 | |
2aaa6655 JB |
4120 | if ((offset & (blocksize - 1)) == 0 && |
4121 | (!len || ((len & (blocksize - 1)) == 0))) | |
39279cc3 | 4122 | goto out; |
0ca1f7ce | 4123 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
5d5e103a JB |
4124 | if (ret) |
4125 | goto out; | |
39279cc3 | 4126 | |
211c17f5 | 4127 | again: |
3b16a4e3 | 4128 | page = find_or_create_page(mapping, index, mask); |
5d5e103a | 4129 | if (!page) { |
0ca1f7ce | 4130 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
ac6a2b36 | 4131 | ret = -ENOMEM; |
39279cc3 | 4132 | goto out; |
5d5e103a | 4133 | } |
e6dcd2dc CM |
4134 | |
4135 | page_start = page_offset(page); | |
4136 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
4137 | ||
39279cc3 | 4138 | if (!PageUptodate(page)) { |
9ebefb18 | 4139 | ret = btrfs_readpage(NULL, page); |
39279cc3 | 4140 | lock_page(page); |
211c17f5 CM |
4141 | if (page->mapping != mapping) { |
4142 | unlock_page(page); | |
4143 | page_cache_release(page); | |
4144 | goto again; | |
4145 | } | |
39279cc3 CM |
4146 | if (!PageUptodate(page)) { |
4147 | ret = -EIO; | |
89642229 | 4148 | goto out_unlock; |
39279cc3 CM |
4149 | } |
4150 | } | |
211c17f5 | 4151 | wait_on_page_writeback(page); |
e6dcd2dc | 4152 | |
d0082371 | 4153 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); |
e6dcd2dc CM |
4154 | set_page_extent_mapped(page); |
4155 | ||
4156 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | |
4157 | if (ordered) { | |
2ac55d41 JB |
4158 | unlock_extent_cached(io_tree, page_start, page_end, |
4159 | &cached_state, GFP_NOFS); | |
e6dcd2dc CM |
4160 | unlock_page(page); |
4161 | page_cache_release(page); | |
eb84ae03 | 4162 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
4163 | btrfs_put_ordered_extent(ordered); |
4164 | goto again; | |
4165 | } | |
4166 | ||
2ac55d41 | 4167 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
9e8a4a8b LB |
4168 | EXTENT_DIRTY | EXTENT_DELALLOC | |
4169 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
2ac55d41 | 4170 | 0, 0, &cached_state, GFP_NOFS); |
5d5e103a | 4171 | |
2ac55d41 JB |
4172 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
4173 | &cached_state); | |
9ed74f2d | 4174 | if (ret) { |
2ac55d41 JB |
4175 | unlock_extent_cached(io_tree, page_start, page_end, |
4176 | &cached_state, GFP_NOFS); | |
9ed74f2d JB |
4177 | goto out_unlock; |
4178 | } | |
4179 | ||
e6dcd2dc | 4180 | if (offset != PAGE_CACHE_SIZE) { |
2aaa6655 JB |
4181 | if (!len) |
4182 | len = PAGE_CACHE_SIZE - offset; | |
e6dcd2dc | 4183 | kaddr = kmap(page); |
2aaa6655 JB |
4184 | if (front) |
4185 | memset(kaddr, 0, offset); | |
4186 | else | |
4187 | memset(kaddr + offset, 0, len); | |
e6dcd2dc CM |
4188 | flush_dcache_page(page); |
4189 | kunmap(page); | |
4190 | } | |
247e743c | 4191 | ClearPageChecked(page); |
e6dcd2dc | 4192 | set_page_dirty(page); |
2ac55d41 JB |
4193 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, |
4194 | GFP_NOFS); | |
39279cc3 | 4195 | |
89642229 | 4196 | out_unlock: |
5d5e103a | 4197 | if (ret) |
0ca1f7ce | 4198 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
39279cc3 CM |
4199 | unlock_page(page); |
4200 | page_cache_release(page); | |
4201 | out: | |
4202 | return ret; | |
4203 | } | |
4204 | ||
695a0d0d JB |
4205 | /* |
4206 | * This function puts in dummy file extents for the area we're creating a hole | |
4207 | * for. So if we are truncating this file to a larger size we need to insert | |
4208 | * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for | |
4209 | * the range between oldsize and size | |
4210 | */ | |
a41ad394 | 4211 | int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) |
39279cc3 | 4212 | { |
9036c102 YZ |
4213 | struct btrfs_trans_handle *trans; |
4214 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4215 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | |
a22285a6 | 4216 | struct extent_map *em = NULL; |
2ac55d41 | 4217 | struct extent_state *cached_state = NULL; |
5dc562c5 | 4218 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
fda2832f QW |
4219 | u64 hole_start = ALIGN(oldsize, root->sectorsize); |
4220 | u64 block_end = ALIGN(size, root->sectorsize); | |
9036c102 YZ |
4221 | u64 last_byte; |
4222 | u64 cur_offset; | |
4223 | u64 hole_size; | |
9ed74f2d | 4224 | int err = 0; |
39279cc3 | 4225 | |
a71754fc JB |
4226 | /* |
4227 | * If our size started in the middle of a page we need to zero out the | |
4228 | * rest of the page before we expand the i_size, otherwise we could | |
4229 | * expose stale data. | |
4230 | */ | |
4231 | err = btrfs_truncate_page(inode, oldsize, 0, 0); | |
4232 | if (err) | |
4233 | return err; | |
4234 | ||
9036c102 YZ |
4235 | if (size <= hole_start) |
4236 | return 0; | |
4237 | ||
9036c102 YZ |
4238 | while (1) { |
4239 | struct btrfs_ordered_extent *ordered; | |
fa7c1494 | 4240 | |
2ac55d41 | 4241 | lock_extent_bits(io_tree, hole_start, block_end - 1, 0, |
d0082371 | 4242 | &cached_state); |
fa7c1494 MX |
4243 | ordered = btrfs_lookup_ordered_range(inode, hole_start, |
4244 | block_end - hole_start); | |
9036c102 YZ |
4245 | if (!ordered) |
4246 | break; | |
2ac55d41 JB |
4247 | unlock_extent_cached(io_tree, hole_start, block_end - 1, |
4248 | &cached_state, GFP_NOFS); | |
fa7c1494 | 4249 | btrfs_start_ordered_extent(inode, ordered, 1); |
9036c102 YZ |
4250 | btrfs_put_ordered_extent(ordered); |
4251 | } | |
39279cc3 | 4252 | |
9036c102 YZ |
4253 | cur_offset = hole_start; |
4254 | while (1) { | |
4255 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
4256 | block_end - cur_offset, 0); | |
79787eaa JM |
4257 | if (IS_ERR(em)) { |
4258 | err = PTR_ERR(em); | |
f2767956 | 4259 | em = NULL; |
79787eaa JM |
4260 | break; |
4261 | } | |
9036c102 | 4262 | last_byte = min(extent_map_end(em), block_end); |
fda2832f | 4263 | last_byte = ALIGN(last_byte , root->sectorsize); |
8082510e | 4264 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
5dc562c5 | 4265 | struct extent_map *hole_em; |
9036c102 | 4266 | hole_size = last_byte - cur_offset; |
9ed74f2d | 4267 | |
3642320e | 4268 | trans = btrfs_start_transaction(root, 3); |
a22285a6 YZ |
4269 | if (IS_ERR(trans)) { |
4270 | err = PTR_ERR(trans); | |
9ed74f2d | 4271 | break; |
a22285a6 | 4272 | } |
8082510e | 4273 | |
5dc562c5 JB |
4274 | err = btrfs_drop_extents(trans, root, inode, |
4275 | cur_offset, | |
2671485d | 4276 | cur_offset + hole_size, 1); |
5b397377 | 4277 | if (err) { |
79787eaa | 4278 | btrfs_abort_transaction(trans, root, err); |
5b397377 | 4279 | btrfs_end_transaction(trans, root); |
3893e33b | 4280 | break; |
5b397377 | 4281 | } |
8082510e | 4282 | |
9036c102 | 4283 | err = btrfs_insert_file_extent(trans, root, |
33345d01 | 4284 | btrfs_ino(inode), cur_offset, 0, |
9036c102 YZ |
4285 | 0, hole_size, 0, hole_size, |
4286 | 0, 0, 0); | |
5b397377 | 4287 | if (err) { |
79787eaa | 4288 | btrfs_abort_transaction(trans, root, err); |
5b397377 | 4289 | btrfs_end_transaction(trans, root); |
3893e33b | 4290 | break; |
5b397377 | 4291 | } |
8082510e | 4292 | |
5dc562c5 JB |
4293 | btrfs_drop_extent_cache(inode, cur_offset, |
4294 | cur_offset + hole_size - 1, 0); | |
4295 | hole_em = alloc_extent_map(); | |
4296 | if (!hole_em) { | |
4297 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
4298 | &BTRFS_I(inode)->runtime_flags); | |
4299 | goto next; | |
4300 | } | |
4301 | hole_em->start = cur_offset; | |
4302 | hole_em->len = hole_size; | |
4303 | hole_em->orig_start = cur_offset; | |
8082510e | 4304 | |
5dc562c5 JB |
4305 | hole_em->block_start = EXTENT_MAP_HOLE; |
4306 | hole_em->block_len = 0; | |
b4939680 | 4307 | hole_em->orig_block_len = 0; |
cc95bef6 | 4308 | hole_em->ram_bytes = hole_size; |
5dc562c5 JB |
4309 | hole_em->bdev = root->fs_info->fs_devices->latest_bdev; |
4310 | hole_em->compress_type = BTRFS_COMPRESS_NONE; | |
4311 | hole_em->generation = trans->transid; | |
8082510e | 4312 | |
5dc562c5 JB |
4313 | while (1) { |
4314 | write_lock(&em_tree->lock); | |
09a2a8f9 | 4315 | err = add_extent_mapping(em_tree, hole_em, 1); |
5dc562c5 JB |
4316 | write_unlock(&em_tree->lock); |
4317 | if (err != -EEXIST) | |
4318 | break; | |
4319 | btrfs_drop_extent_cache(inode, cur_offset, | |
4320 | cur_offset + | |
4321 | hole_size - 1, 0); | |
4322 | } | |
4323 | free_extent_map(hole_em); | |
4324 | next: | |
3642320e | 4325 | btrfs_update_inode(trans, root, inode); |
8082510e | 4326 | btrfs_end_transaction(trans, root); |
9036c102 YZ |
4327 | } |
4328 | free_extent_map(em); | |
a22285a6 | 4329 | em = NULL; |
9036c102 | 4330 | cur_offset = last_byte; |
8082510e | 4331 | if (cur_offset >= block_end) |
9036c102 YZ |
4332 | break; |
4333 | } | |
1832a6d5 | 4334 | |
a22285a6 | 4335 | free_extent_map(em); |
2ac55d41 JB |
4336 | unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, |
4337 | GFP_NOFS); | |
9036c102 YZ |
4338 | return err; |
4339 | } | |
39279cc3 | 4340 | |
3972f260 | 4341 | static int btrfs_setsize(struct inode *inode, struct iattr *attr) |
8082510e | 4342 | { |
f4a2f4c5 MX |
4343 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4344 | struct btrfs_trans_handle *trans; | |
a41ad394 | 4345 | loff_t oldsize = i_size_read(inode); |
3972f260 ES |
4346 | loff_t newsize = attr->ia_size; |
4347 | int mask = attr->ia_valid; | |
8082510e YZ |
4348 | int ret; |
4349 | ||
3972f260 ES |
4350 | /* |
4351 | * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a | |
4352 | * special case where we need to update the times despite not having | |
4353 | * these flags set. For all other operations the VFS set these flags | |
4354 | * explicitly if it wants a timestamp update. | |
4355 | */ | |
4356 | if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) | |
4357 | inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); | |
4358 | ||
a41ad394 | 4359 | if (newsize > oldsize) { |
7caef267 | 4360 | truncate_pagecache(inode, newsize); |
a41ad394 | 4361 | ret = btrfs_cont_expand(inode, oldsize, newsize); |
f4a2f4c5 | 4362 | if (ret) |
8082510e | 4363 | return ret; |
8082510e | 4364 | |
f4a2f4c5 MX |
4365 | trans = btrfs_start_transaction(root, 1); |
4366 | if (IS_ERR(trans)) | |
4367 | return PTR_ERR(trans); | |
4368 | ||
4369 | i_size_write(inode, newsize); | |
4370 | btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); | |
4371 | ret = btrfs_update_inode(trans, root, inode); | |
7ad85bb7 | 4372 | btrfs_end_transaction(trans, root); |
a41ad394 | 4373 | } else { |
8082510e | 4374 | |
a41ad394 JB |
4375 | /* |
4376 | * We're truncating a file that used to have good data down to | |
4377 | * zero. Make sure it gets into the ordered flush list so that | |
4378 | * any new writes get down to disk quickly. | |
4379 | */ | |
4380 | if (newsize == 0) | |
72ac3c0d JB |
4381 | set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, |
4382 | &BTRFS_I(inode)->runtime_flags); | |
8082510e | 4383 | |
f3fe820c JB |
4384 | /* |
4385 | * 1 for the orphan item we're going to add | |
4386 | * 1 for the orphan item deletion. | |
4387 | */ | |
4388 | trans = btrfs_start_transaction(root, 2); | |
4389 | if (IS_ERR(trans)) | |
4390 | return PTR_ERR(trans); | |
4391 | ||
4392 | /* | |
4393 | * We need to do this in case we fail at _any_ point during the | |
4394 | * actual truncate. Once we do the truncate_setsize we could | |
4395 | * invalidate pages which forces any outstanding ordered io to | |
4396 | * be instantly completed which will give us extents that need | |
4397 | * to be truncated. If we fail to get an orphan inode down we | |
4398 | * could have left over extents that were never meant to live, | |
4399 | * so we need to garuntee from this point on that everything | |
4400 | * will be consistent. | |
4401 | */ | |
4402 | ret = btrfs_orphan_add(trans, inode); | |
4403 | btrfs_end_transaction(trans, root); | |
4404 | if (ret) | |
4405 | return ret; | |
4406 | ||
a41ad394 JB |
4407 | /* we don't support swapfiles, so vmtruncate shouldn't fail */ |
4408 | truncate_setsize(inode, newsize); | |
2e60a51e MX |
4409 | |
4410 | /* Disable nonlocked read DIO to avoid the end less truncate */ | |
4411 | btrfs_inode_block_unlocked_dio(inode); | |
4412 | inode_dio_wait(inode); | |
4413 | btrfs_inode_resume_unlocked_dio(inode); | |
4414 | ||
a41ad394 | 4415 | ret = btrfs_truncate(inode); |
7f4f6e0a JB |
4416 | if (ret && inode->i_nlink) { |
4417 | int err; | |
4418 | ||
4419 | /* | |
4420 | * failed to truncate, disk_i_size is only adjusted down | |
4421 | * as we remove extents, so it should represent the true | |
4422 | * size of the inode, so reset the in memory size and | |
4423 | * delete our orphan entry. | |
4424 | */ | |
4425 | trans = btrfs_join_transaction(root); | |
4426 | if (IS_ERR(trans)) { | |
4427 | btrfs_orphan_del(NULL, inode); | |
4428 | return ret; | |
4429 | } | |
4430 | i_size_write(inode, BTRFS_I(inode)->disk_i_size); | |
4431 | err = btrfs_orphan_del(trans, inode); | |
4432 | if (err) | |
4433 | btrfs_abort_transaction(trans, root, err); | |
4434 | btrfs_end_transaction(trans, root); | |
4435 | } | |
8082510e YZ |
4436 | } |
4437 | ||
a41ad394 | 4438 | return ret; |
8082510e YZ |
4439 | } |
4440 | ||
9036c102 YZ |
4441 | static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) |
4442 | { | |
4443 | struct inode *inode = dentry->d_inode; | |
b83cc969 | 4444 | struct btrfs_root *root = BTRFS_I(inode)->root; |
9036c102 | 4445 | int err; |
39279cc3 | 4446 | |
b83cc969 LZ |
4447 | if (btrfs_root_readonly(root)) |
4448 | return -EROFS; | |
4449 | ||
9036c102 YZ |
4450 | err = inode_change_ok(inode, attr); |
4451 | if (err) | |
4452 | return err; | |
2bf5a725 | 4453 | |
5a3f23d5 | 4454 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
3972f260 | 4455 | err = btrfs_setsize(inode, attr); |
8082510e YZ |
4456 | if (err) |
4457 | return err; | |
39279cc3 | 4458 | } |
9036c102 | 4459 | |
1025774c CH |
4460 | if (attr->ia_valid) { |
4461 | setattr_copy(inode, attr); | |
0c4d2d95 | 4462 | inode_inc_iversion(inode); |
22c44fe6 | 4463 | err = btrfs_dirty_inode(inode); |
1025774c | 4464 | |
22c44fe6 | 4465 | if (!err && attr->ia_valid & ATTR_MODE) |
1025774c CH |
4466 | err = btrfs_acl_chmod(inode); |
4467 | } | |
33268eaf | 4468 | |
39279cc3 CM |
4469 | return err; |
4470 | } | |
61295eb8 | 4471 | |
bd555975 | 4472 | void btrfs_evict_inode(struct inode *inode) |
39279cc3 CM |
4473 | { |
4474 | struct btrfs_trans_handle *trans; | |
4475 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
726c35fa | 4476 | struct btrfs_block_rsv *rsv, *global_rsv; |
07127184 | 4477 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); |
39279cc3 CM |
4478 | int ret; |
4479 | ||
1abe9b8a | 4480 | trace_btrfs_inode_evict(inode); |
4481 | ||
39279cc3 | 4482 | truncate_inode_pages(&inode->i_data, 0); |
69e9c6c6 SB |
4483 | if (inode->i_nlink && |
4484 | ((btrfs_root_refs(&root->root_item) != 0 && | |
4485 | root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || | |
4486 | btrfs_is_free_space_inode(inode))) | |
bd555975 AV |
4487 | goto no_delete; |
4488 | ||
39279cc3 | 4489 | if (is_bad_inode(inode)) { |
7b128766 | 4490 | btrfs_orphan_del(NULL, inode); |
39279cc3 CM |
4491 | goto no_delete; |
4492 | } | |
bd555975 | 4493 | /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ |
4a096752 | 4494 | btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5f39d397 | 4495 | |
c71bf099 | 4496 | if (root->fs_info->log_root_recovering) { |
6bf02314 | 4497 | BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
8a35d95f | 4498 | &BTRFS_I(inode)->runtime_flags)); |
c71bf099 YZ |
4499 | goto no_delete; |
4500 | } | |
4501 | ||
76dda93c | 4502 | if (inode->i_nlink > 0) { |
69e9c6c6 SB |
4503 | BUG_ON(btrfs_root_refs(&root->root_item) != 0 && |
4504 | root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); | |
76dda93c YZ |
4505 | goto no_delete; |
4506 | } | |
4507 | ||
0e8c36a9 MX |
4508 | ret = btrfs_commit_inode_delayed_inode(inode); |
4509 | if (ret) { | |
4510 | btrfs_orphan_del(NULL, inode); | |
4511 | goto no_delete; | |
4512 | } | |
4513 | ||
66d8f3dd | 4514 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
4289a667 JB |
4515 | if (!rsv) { |
4516 | btrfs_orphan_del(NULL, inode); | |
4517 | goto no_delete; | |
4518 | } | |
4a338542 | 4519 | rsv->size = min_size; |
ca7e70f5 | 4520 | rsv->failfast = 1; |
726c35fa | 4521 | global_rsv = &root->fs_info->global_block_rsv; |
4289a667 | 4522 | |
dbe674a9 | 4523 | btrfs_i_size_write(inode, 0); |
5f39d397 | 4524 | |
4289a667 | 4525 | /* |
8407aa46 MX |
4526 | * This is a bit simpler than btrfs_truncate since we've already |
4527 | * reserved our space for our orphan item in the unlink, so we just | |
4528 | * need to reserve some slack space in case we add bytes and update | |
4529 | * inode item when doing the truncate. | |
4289a667 | 4530 | */ |
8082510e | 4531 | while (1) { |
08e007d2 MX |
4532 | ret = btrfs_block_rsv_refill(root, rsv, min_size, |
4533 | BTRFS_RESERVE_FLUSH_LIMIT); | |
726c35fa JB |
4534 | |
4535 | /* | |
4536 | * Try and steal from the global reserve since we will | |
4537 | * likely not use this space anyway, we want to try as | |
4538 | * hard as possible to get this to work. | |
4539 | */ | |
4540 | if (ret) | |
4541 | ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size); | |
d68fc57b | 4542 | |
d68fc57b | 4543 | if (ret) { |
c2cf52eb SK |
4544 | btrfs_warn(root->fs_info, |
4545 | "Could not get space for a delete, will truncate on mount %d", | |
4546 | ret); | |
4289a667 JB |
4547 | btrfs_orphan_del(NULL, inode); |
4548 | btrfs_free_block_rsv(root, rsv); | |
4549 | goto no_delete; | |
d68fc57b | 4550 | } |
7b128766 | 4551 | |
0e8c36a9 | 4552 | trans = btrfs_join_transaction(root); |
4289a667 JB |
4553 | if (IS_ERR(trans)) { |
4554 | btrfs_orphan_del(NULL, inode); | |
4555 | btrfs_free_block_rsv(root, rsv); | |
4556 | goto no_delete; | |
d68fc57b | 4557 | } |
7b128766 | 4558 | |
4289a667 JB |
4559 | trans->block_rsv = rsv; |
4560 | ||
d68fc57b | 4561 | ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); |
ca7e70f5 | 4562 | if (ret != -ENOSPC) |
8082510e | 4563 | break; |
85e21bac | 4564 | |
8407aa46 | 4565 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
8082510e YZ |
4566 | btrfs_end_transaction(trans, root); |
4567 | trans = NULL; | |
b53d3f5d | 4568 | btrfs_btree_balance_dirty(root); |
8082510e | 4569 | } |
5f39d397 | 4570 | |
4289a667 JB |
4571 | btrfs_free_block_rsv(root, rsv); |
4572 | ||
4ef31a45 JB |
4573 | /* |
4574 | * Errors here aren't a big deal, it just means we leave orphan items | |
4575 | * in the tree. They will be cleaned up on the next mount. | |
4576 | */ | |
8082510e | 4577 | if (ret == 0) { |
4289a667 | 4578 | trans->block_rsv = root->orphan_block_rsv; |
4ef31a45 JB |
4579 | btrfs_orphan_del(trans, inode); |
4580 | } else { | |
4581 | btrfs_orphan_del(NULL, inode); | |
8082510e | 4582 | } |
54aa1f4d | 4583 | |
4289a667 | 4584 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
581bb050 LZ |
4585 | if (!(root == root->fs_info->tree_root || |
4586 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) | |
33345d01 | 4587 | btrfs_return_ino(root, btrfs_ino(inode)); |
581bb050 | 4588 | |
54aa1f4d | 4589 | btrfs_end_transaction(trans, root); |
b53d3f5d | 4590 | btrfs_btree_balance_dirty(root); |
39279cc3 | 4591 | no_delete: |
89042e5a | 4592 | btrfs_remove_delayed_node(inode); |
dbd5768f | 4593 | clear_inode(inode); |
8082510e | 4594 | return; |
39279cc3 CM |
4595 | } |
4596 | ||
4597 | /* | |
4598 | * this returns the key found in the dir entry in the location pointer. | |
4599 | * If no dir entries were found, location->objectid is 0. | |
4600 | */ | |
4601 | static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |
4602 | struct btrfs_key *location) | |
4603 | { | |
4604 | const char *name = dentry->d_name.name; | |
4605 | int namelen = dentry->d_name.len; | |
4606 | struct btrfs_dir_item *di; | |
4607 | struct btrfs_path *path; | |
4608 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
0d9f7f3e | 4609 | int ret = 0; |
39279cc3 CM |
4610 | |
4611 | path = btrfs_alloc_path(); | |
d8926bb3 MF |
4612 | if (!path) |
4613 | return -ENOMEM; | |
3954401f | 4614 | |
33345d01 | 4615 | di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, |
39279cc3 | 4616 | namelen, 0); |
0d9f7f3e Y |
4617 | if (IS_ERR(di)) |
4618 | ret = PTR_ERR(di); | |
d397712b | 4619 | |
c704005d | 4620 | if (IS_ERR_OR_NULL(di)) |
3954401f | 4621 | goto out_err; |
d397712b | 4622 | |
5f39d397 | 4623 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
39279cc3 | 4624 | out: |
39279cc3 CM |
4625 | btrfs_free_path(path); |
4626 | return ret; | |
3954401f CM |
4627 | out_err: |
4628 | location->objectid = 0; | |
4629 | goto out; | |
39279cc3 CM |
4630 | } |
4631 | ||
4632 | /* | |
4633 | * when we hit a tree root in a directory, the btrfs part of the inode | |
4634 | * needs to be changed to reflect the root directory of the tree root. This | |
4635 | * is kind of like crossing a mount point. | |
4636 | */ | |
4637 | static int fixup_tree_root_location(struct btrfs_root *root, | |
4df27c4d YZ |
4638 | struct inode *dir, |
4639 | struct dentry *dentry, | |
4640 | struct btrfs_key *location, | |
4641 | struct btrfs_root **sub_root) | |
39279cc3 | 4642 | { |
4df27c4d YZ |
4643 | struct btrfs_path *path; |
4644 | struct btrfs_root *new_root; | |
4645 | struct btrfs_root_ref *ref; | |
4646 | struct extent_buffer *leaf; | |
4647 | int ret; | |
4648 | int err = 0; | |
39279cc3 | 4649 | |
4df27c4d YZ |
4650 | path = btrfs_alloc_path(); |
4651 | if (!path) { | |
4652 | err = -ENOMEM; | |
4653 | goto out; | |
4654 | } | |
39279cc3 | 4655 | |
4df27c4d YZ |
4656 | err = -ENOENT; |
4657 | ret = btrfs_find_root_ref(root->fs_info->tree_root, path, | |
4658 | BTRFS_I(dir)->root->root_key.objectid, | |
4659 | location->objectid); | |
4660 | if (ret) { | |
4661 | if (ret < 0) | |
4662 | err = ret; | |
4663 | goto out; | |
4664 | } | |
39279cc3 | 4665 | |
4df27c4d YZ |
4666 | leaf = path->nodes[0]; |
4667 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | |
33345d01 | 4668 | if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || |
4df27c4d YZ |
4669 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) |
4670 | goto out; | |
39279cc3 | 4671 | |
4df27c4d YZ |
4672 | ret = memcmp_extent_buffer(leaf, dentry->d_name.name, |
4673 | (unsigned long)(ref + 1), | |
4674 | dentry->d_name.len); | |
4675 | if (ret) | |
4676 | goto out; | |
4677 | ||
b3b4aa74 | 4678 | btrfs_release_path(path); |
4df27c4d YZ |
4679 | |
4680 | new_root = btrfs_read_fs_root_no_name(root->fs_info, location); | |
4681 | if (IS_ERR(new_root)) { | |
4682 | err = PTR_ERR(new_root); | |
4683 | goto out; | |
4684 | } | |
4685 | ||
4df27c4d YZ |
4686 | *sub_root = new_root; |
4687 | location->objectid = btrfs_root_dirid(&new_root->root_item); | |
4688 | location->type = BTRFS_INODE_ITEM_KEY; | |
4689 | location->offset = 0; | |
4690 | err = 0; | |
4691 | out: | |
4692 | btrfs_free_path(path); | |
4693 | return err; | |
39279cc3 CM |
4694 | } |
4695 | ||
5d4f98a2 YZ |
4696 | static void inode_tree_add(struct inode *inode) |
4697 | { | |
4698 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4699 | struct btrfs_inode *entry; | |
03e860bd NP |
4700 | struct rb_node **p; |
4701 | struct rb_node *parent; | |
cef21937 | 4702 | struct rb_node *new = &BTRFS_I(inode)->rb_node; |
33345d01 | 4703 | u64 ino = btrfs_ino(inode); |
5d4f98a2 | 4704 | |
1d3382cb | 4705 | if (inode_unhashed(inode)) |
76dda93c | 4706 | return; |
e1409cef | 4707 | parent = NULL; |
5d4f98a2 | 4708 | spin_lock(&root->inode_lock); |
e1409cef | 4709 | p = &root->inode_tree.rb_node; |
5d4f98a2 YZ |
4710 | while (*p) { |
4711 | parent = *p; | |
4712 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | |
4713 | ||
33345d01 | 4714 | if (ino < btrfs_ino(&entry->vfs_inode)) |
03e860bd | 4715 | p = &parent->rb_left; |
33345d01 | 4716 | else if (ino > btrfs_ino(&entry->vfs_inode)) |
03e860bd | 4717 | p = &parent->rb_right; |
5d4f98a2 YZ |
4718 | else { |
4719 | WARN_ON(!(entry->vfs_inode.i_state & | |
a4ffdde6 | 4720 | (I_WILL_FREE | I_FREEING))); |
cef21937 | 4721 | rb_replace_node(parent, new, &root->inode_tree); |
03e860bd NP |
4722 | RB_CLEAR_NODE(parent); |
4723 | spin_unlock(&root->inode_lock); | |
cef21937 | 4724 | return; |
5d4f98a2 YZ |
4725 | } |
4726 | } | |
cef21937 FDBM |
4727 | rb_link_node(new, parent, p); |
4728 | rb_insert_color(new, &root->inode_tree); | |
5d4f98a2 YZ |
4729 | spin_unlock(&root->inode_lock); |
4730 | } | |
4731 | ||
4732 | static void inode_tree_del(struct inode *inode) | |
4733 | { | |
4734 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
76dda93c | 4735 | int empty = 0; |
5d4f98a2 | 4736 | |
03e860bd | 4737 | spin_lock(&root->inode_lock); |
5d4f98a2 | 4738 | if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) { |
5d4f98a2 | 4739 | rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree); |
5d4f98a2 | 4740 | RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); |
76dda93c | 4741 | empty = RB_EMPTY_ROOT(&root->inode_tree); |
5d4f98a2 | 4742 | } |
03e860bd | 4743 | spin_unlock(&root->inode_lock); |
76dda93c | 4744 | |
69e9c6c6 | 4745 | if (empty && btrfs_root_refs(&root->root_item) == 0) { |
76dda93c YZ |
4746 | synchronize_srcu(&root->fs_info->subvol_srcu); |
4747 | spin_lock(&root->inode_lock); | |
4748 | empty = RB_EMPTY_ROOT(&root->inode_tree); | |
4749 | spin_unlock(&root->inode_lock); | |
4750 | if (empty) | |
4751 | btrfs_add_dead_root(root); | |
4752 | } | |
4753 | } | |
4754 | ||
143bede5 | 4755 | void btrfs_invalidate_inodes(struct btrfs_root *root) |
76dda93c YZ |
4756 | { |
4757 | struct rb_node *node; | |
4758 | struct rb_node *prev; | |
4759 | struct btrfs_inode *entry; | |
4760 | struct inode *inode; | |
4761 | u64 objectid = 0; | |
4762 | ||
4763 | WARN_ON(btrfs_root_refs(&root->root_item) != 0); | |
4764 | ||
4765 | spin_lock(&root->inode_lock); | |
4766 | again: | |
4767 | node = root->inode_tree.rb_node; | |
4768 | prev = NULL; | |
4769 | while (node) { | |
4770 | prev = node; | |
4771 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
4772 | ||
33345d01 | 4773 | if (objectid < btrfs_ino(&entry->vfs_inode)) |
76dda93c | 4774 | node = node->rb_left; |
33345d01 | 4775 | else if (objectid > btrfs_ino(&entry->vfs_inode)) |
76dda93c YZ |
4776 | node = node->rb_right; |
4777 | else | |
4778 | break; | |
4779 | } | |
4780 | if (!node) { | |
4781 | while (prev) { | |
4782 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | |
33345d01 | 4783 | if (objectid <= btrfs_ino(&entry->vfs_inode)) { |
76dda93c YZ |
4784 | node = prev; |
4785 | break; | |
4786 | } | |
4787 | prev = rb_next(prev); | |
4788 | } | |
4789 | } | |
4790 | while (node) { | |
4791 | entry = rb_entry(node, struct btrfs_inode, rb_node); | |
33345d01 | 4792 | objectid = btrfs_ino(&entry->vfs_inode) + 1; |
76dda93c YZ |
4793 | inode = igrab(&entry->vfs_inode); |
4794 | if (inode) { | |
4795 | spin_unlock(&root->inode_lock); | |
4796 | if (atomic_read(&inode->i_count) > 1) | |
4797 | d_prune_aliases(inode); | |
4798 | /* | |
45321ac5 | 4799 | * btrfs_drop_inode will have it removed from |
76dda93c YZ |
4800 | * the inode cache when its usage count |
4801 | * hits zero. | |
4802 | */ | |
4803 | iput(inode); | |
4804 | cond_resched(); | |
4805 | spin_lock(&root->inode_lock); | |
4806 | goto again; | |
4807 | } | |
4808 | ||
4809 | if (cond_resched_lock(&root->inode_lock)) | |
4810 | goto again; | |
4811 | ||
4812 | node = rb_next(node); | |
4813 | } | |
4814 | spin_unlock(&root->inode_lock); | |
5d4f98a2 YZ |
4815 | } |
4816 | ||
e02119d5 CM |
4817 | static int btrfs_init_locked_inode(struct inode *inode, void *p) |
4818 | { | |
4819 | struct btrfs_iget_args *args = p; | |
4820 | inode->i_ino = args->ino; | |
e02119d5 | 4821 | BTRFS_I(inode)->root = args->root; |
39279cc3 CM |
4822 | return 0; |
4823 | } | |
4824 | ||
4825 | static int btrfs_find_actor(struct inode *inode, void *opaque) | |
4826 | { | |
4827 | struct btrfs_iget_args *args = opaque; | |
33345d01 | 4828 | return args->ino == btrfs_ino(inode) && |
d397712b | 4829 | args->root == BTRFS_I(inode)->root; |
39279cc3 CM |
4830 | } |
4831 | ||
5d4f98a2 YZ |
4832 | static struct inode *btrfs_iget_locked(struct super_block *s, |
4833 | u64 objectid, | |
4834 | struct btrfs_root *root) | |
39279cc3 CM |
4835 | { |
4836 | struct inode *inode; | |
4837 | struct btrfs_iget_args args; | |
778ba82b FDBM |
4838 | unsigned long hashval = btrfs_inode_hash(objectid, root); |
4839 | ||
39279cc3 CM |
4840 | args.ino = objectid; |
4841 | args.root = root; | |
4842 | ||
778ba82b | 4843 | inode = iget5_locked(s, hashval, btrfs_find_actor, |
39279cc3 CM |
4844 | btrfs_init_locked_inode, |
4845 | (void *)&args); | |
4846 | return inode; | |
4847 | } | |
4848 | ||
1a54ef8c BR |
4849 | /* Get an inode object given its location and corresponding root. |
4850 | * Returns in *is_new if the inode was read from disk | |
4851 | */ | |
4852 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | |
73f73415 | 4853 | struct btrfs_root *root, int *new) |
1a54ef8c BR |
4854 | { |
4855 | struct inode *inode; | |
4856 | ||
4857 | inode = btrfs_iget_locked(s, location->objectid, root); | |
4858 | if (!inode) | |
5d4f98a2 | 4859 | return ERR_PTR(-ENOMEM); |
1a54ef8c BR |
4860 | |
4861 | if (inode->i_state & I_NEW) { | |
4862 | BTRFS_I(inode)->root = root; | |
4863 | memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); | |
4864 | btrfs_read_locked_inode(inode); | |
1748f843 MF |
4865 | if (!is_bad_inode(inode)) { |
4866 | inode_tree_add(inode); | |
4867 | unlock_new_inode(inode); | |
4868 | if (new) | |
4869 | *new = 1; | |
4870 | } else { | |
e0b6d65b ST |
4871 | unlock_new_inode(inode); |
4872 | iput(inode); | |
4873 | inode = ERR_PTR(-ESTALE); | |
1748f843 MF |
4874 | } |
4875 | } | |
4876 | ||
1a54ef8c BR |
4877 | return inode; |
4878 | } | |
4879 | ||
4df27c4d YZ |
4880 | static struct inode *new_simple_dir(struct super_block *s, |
4881 | struct btrfs_key *key, | |
4882 | struct btrfs_root *root) | |
4883 | { | |
4884 | struct inode *inode = new_inode(s); | |
4885 | ||
4886 | if (!inode) | |
4887 | return ERR_PTR(-ENOMEM); | |
4888 | ||
4df27c4d YZ |
4889 | BTRFS_I(inode)->root = root; |
4890 | memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); | |
72ac3c0d | 4891 | set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); |
4df27c4d YZ |
4892 | |
4893 | inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; | |
848cce0d | 4894 | inode->i_op = &btrfs_dir_ro_inode_operations; |
4df27c4d YZ |
4895 | inode->i_fop = &simple_dir_operations; |
4896 | inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; | |
4897 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | |
4898 | ||
4899 | return inode; | |
4900 | } | |
4901 | ||
3de4586c | 4902 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) |
39279cc3 | 4903 | { |
d397712b | 4904 | struct inode *inode; |
4df27c4d | 4905 | struct btrfs_root *root = BTRFS_I(dir)->root; |
39279cc3 CM |
4906 | struct btrfs_root *sub_root = root; |
4907 | struct btrfs_key location; | |
76dda93c | 4908 | int index; |
b4aff1f8 | 4909 | int ret = 0; |
39279cc3 CM |
4910 | |
4911 | if (dentry->d_name.len > BTRFS_NAME_LEN) | |
4912 | return ERR_PTR(-ENAMETOOLONG); | |
5f39d397 | 4913 | |
39e3c955 | 4914 | ret = btrfs_inode_by_name(dir, dentry, &location); |
39279cc3 CM |
4915 | if (ret < 0) |
4916 | return ERR_PTR(ret); | |
5f39d397 | 4917 | |
4df27c4d YZ |
4918 | if (location.objectid == 0) |
4919 | return NULL; | |
4920 | ||
4921 | if (location.type == BTRFS_INODE_ITEM_KEY) { | |
73f73415 | 4922 | inode = btrfs_iget(dir->i_sb, &location, root, NULL); |
4df27c4d YZ |
4923 | return inode; |
4924 | } | |
4925 | ||
4926 | BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY); | |
4927 | ||
76dda93c | 4928 | index = srcu_read_lock(&root->fs_info->subvol_srcu); |
4df27c4d YZ |
4929 | ret = fixup_tree_root_location(root, dir, dentry, |
4930 | &location, &sub_root); | |
4931 | if (ret < 0) { | |
4932 | if (ret != -ENOENT) | |
4933 | inode = ERR_PTR(ret); | |
4934 | else | |
4935 | inode = new_simple_dir(dir->i_sb, &location, sub_root); | |
4936 | } else { | |
73f73415 | 4937 | inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); |
39279cc3 | 4938 | } |
76dda93c YZ |
4939 | srcu_read_unlock(&root->fs_info->subvol_srcu, index); |
4940 | ||
34d19bad | 4941 | if (!IS_ERR(inode) && root != sub_root) { |
c71bf099 YZ |
4942 | down_read(&root->fs_info->cleanup_work_sem); |
4943 | if (!(inode->i_sb->s_flags & MS_RDONLY)) | |
66b4ffd1 | 4944 | ret = btrfs_orphan_cleanup(sub_root); |
c71bf099 | 4945 | up_read(&root->fs_info->cleanup_work_sem); |
01cd3367 JB |
4946 | if (ret) { |
4947 | iput(inode); | |
66b4ffd1 | 4948 | inode = ERR_PTR(ret); |
01cd3367 | 4949 | } |
c71bf099 YZ |
4950 | } |
4951 | ||
3de4586c CM |
4952 | return inode; |
4953 | } | |
4954 | ||
fe15ce44 | 4955 | static int btrfs_dentry_delete(const struct dentry *dentry) |
76dda93c YZ |
4956 | { |
4957 | struct btrfs_root *root; | |
848cce0d | 4958 | struct inode *inode = dentry->d_inode; |
76dda93c | 4959 | |
848cce0d LZ |
4960 | if (!inode && !IS_ROOT(dentry)) |
4961 | inode = dentry->d_parent->d_inode; | |
76dda93c | 4962 | |
848cce0d LZ |
4963 | if (inode) { |
4964 | root = BTRFS_I(inode)->root; | |
efefb143 YZ |
4965 | if (btrfs_root_refs(&root->root_item) == 0) |
4966 | return 1; | |
848cce0d LZ |
4967 | |
4968 | if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | |
4969 | return 1; | |
efefb143 | 4970 | } |
76dda93c YZ |
4971 | return 0; |
4972 | } | |
4973 | ||
b4aff1f8 JB |
4974 | static void btrfs_dentry_release(struct dentry *dentry) |
4975 | { | |
4976 | if (dentry->d_fsdata) | |
4977 | kfree(dentry->d_fsdata); | |
4978 | } | |
4979 | ||
3de4586c | 4980 | static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, |
00cd8dd3 | 4981 | unsigned int flags) |
3de4586c | 4982 | { |
a66e7cc6 JB |
4983 | struct dentry *ret; |
4984 | ||
4985 | ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); | |
a66e7cc6 | 4986 | return ret; |
39279cc3 CM |
4987 | } |
4988 | ||
16cdcec7 | 4989 | unsigned char btrfs_filetype_table[] = { |
39279cc3 CM |
4990 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK |
4991 | }; | |
4992 | ||
9cdda8d3 | 4993 | static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) |
39279cc3 | 4994 | { |
9cdda8d3 | 4995 | struct inode *inode = file_inode(file); |
39279cc3 CM |
4996 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4997 | struct btrfs_item *item; | |
4998 | struct btrfs_dir_item *di; | |
4999 | struct btrfs_key key; | |
5f39d397 | 5000 | struct btrfs_key found_key; |
39279cc3 | 5001 | struct btrfs_path *path; |
16cdcec7 MX |
5002 | struct list_head ins_list; |
5003 | struct list_head del_list; | |
39279cc3 | 5004 | int ret; |
5f39d397 | 5005 | struct extent_buffer *leaf; |
39279cc3 | 5006 | int slot; |
39279cc3 CM |
5007 | unsigned char d_type; |
5008 | int over = 0; | |
5009 | u32 di_cur; | |
5010 | u32 di_total; | |
5011 | u32 di_len; | |
5012 | int key_type = BTRFS_DIR_INDEX_KEY; | |
5f39d397 CM |
5013 | char tmp_name[32]; |
5014 | char *name_ptr; | |
5015 | int name_len; | |
9cdda8d3 | 5016 | int is_curr = 0; /* ctx->pos points to the current index? */ |
39279cc3 CM |
5017 | |
5018 | /* FIXME, use a real flag for deciding about the key type */ | |
5019 | if (root->fs_info->tree_root == root) | |
5020 | key_type = BTRFS_DIR_ITEM_KEY; | |
5f39d397 | 5021 | |
9cdda8d3 AV |
5022 | if (!dir_emit_dots(file, ctx)) |
5023 | return 0; | |
5024 | ||
49593bfa | 5025 | path = btrfs_alloc_path(); |
16cdcec7 MX |
5026 | if (!path) |
5027 | return -ENOMEM; | |
ff5714cc | 5028 | |
026fd317 | 5029 | path->reada = 1; |
49593bfa | 5030 | |
16cdcec7 MX |
5031 | if (key_type == BTRFS_DIR_INDEX_KEY) { |
5032 | INIT_LIST_HEAD(&ins_list); | |
5033 | INIT_LIST_HEAD(&del_list); | |
5034 | btrfs_get_delayed_items(inode, &ins_list, &del_list); | |
5035 | } | |
5036 | ||
39279cc3 | 5037 | btrfs_set_key_type(&key, key_type); |
9cdda8d3 | 5038 | key.offset = ctx->pos; |
33345d01 | 5039 | key.objectid = btrfs_ino(inode); |
5f39d397 | 5040 | |
39279cc3 CM |
5041 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
5042 | if (ret < 0) | |
5043 | goto err; | |
49593bfa DW |
5044 | |
5045 | while (1) { | |
5f39d397 | 5046 | leaf = path->nodes[0]; |
39279cc3 | 5047 | slot = path->slots[0]; |
b9e03af0 LZ |
5048 | if (slot >= btrfs_header_nritems(leaf)) { |
5049 | ret = btrfs_next_leaf(root, path); | |
5050 | if (ret < 0) | |
5051 | goto err; | |
5052 | else if (ret > 0) | |
5053 | break; | |
5054 | continue; | |
39279cc3 | 5055 | } |
3de4586c | 5056 | |
dd3cc16b | 5057 | item = btrfs_item_nr(slot); |
5f39d397 CM |
5058 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
5059 | ||
5060 | if (found_key.objectid != key.objectid) | |
39279cc3 | 5061 | break; |
5f39d397 | 5062 | if (btrfs_key_type(&found_key) != key_type) |
39279cc3 | 5063 | break; |
9cdda8d3 | 5064 | if (found_key.offset < ctx->pos) |
b9e03af0 | 5065 | goto next; |
16cdcec7 MX |
5066 | if (key_type == BTRFS_DIR_INDEX_KEY && |
5067 | btrfs_should_delete_dir_index(&del_list, | |
5068 | found_key.offset)) | |
5069 | goto next; | |
5f39d397 | 5070 | |
9cdda8d3 | 5071 | ctx->pos = found_key.offset; |
16cdcec7 | 5072 | is_curr = 1; |
49593bfa | 5073 | |
39279cc3 CM |
5074 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
5075 | di_cur = 0; | |
5f39d397 | 5076 | di_total = btrfs_item_size(leaf, item); |
49593bfa DW |
5077 | |
5078 | while (di_cur < di_total) { | |
5f39d397 CM |
5079 | struct btrfs_key location; |
5080 | ||
22a94d44 JB |
5081 | if (verify_dir_item(root, leaf, di)) |
5082 | break; | |
5083 | ||
5f39d397 | 5084 | name_len = btrfs_dir_name_len(leaf, di); |
49593bfa | 5085 | if (name_len <= sizeof(tmp_name)) { |
5f39d397 CM |
5086 | name_ptr = tmp_name; |
5087 | } else { | |
5088 | name_ptr = kmalloc(name_len, GFP_NOFS); | |
49593bfa DW |
5089 | if (!name_ptr) { |
5090 | ret = -ENOMEM; | |
5091 | goto err; | |
5092 | } | |
5f39d397 CM |
5093 | } |
5094 | read_extent_buffer(leaf, name_ptr, | |
5095 | (unsigned long)(di + 1), name_len); | |
5096 | ||
5097 | d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; | |
5098 | btrfs_dir_item_key_to_cpu(leaf, di, &location); | |
3de4586c | 5099 | |
fede766f | 5100 | |
3de4586c | 5101 | /* is this a reference to our own snapshot? If so |
8c9c2bf7 AJ |
5102 | * skip it. |
5103 | * | |
5104 | * In contrast to old kernels, we insert the snapshot's | |
5105 | * dir item and dir index after it has been created, so | |
5106 | * we won't find a reference to our own snapshot. We | |
5107 | * still keep the following code for backward | |
5108 | * compatibility. | |
3de4586c CM |
5109 | */ |
5110 | if (location.type == BTRFS_ROOT_ITEM_KEY && | |
5111 | location.objectid == root->root_key.objectid) { | |
5112 | over = 0; | |
5113 | goto skip; | |
5114 | } | |
9cdda8d3 AV |
5115 | over = !dir_emit(ctx, name_ptr, name_len, |
5116 | location.objectid, d_type); | |
5f39d397 | 5117 | |
3de4586c | 5118 | skip: |
5f39d397 CM |
5119 | if (name_ptr != tmp_name) |
5120 | kfree(name_ptr); | |
5121 | ||
39279cc3 CM |
5122 | if (over) |
5123 | goto nopos; | |
5103e947 | 5124 | di_len = btrfs_dir_name_len(leaf, di) + |
49593bfa | 5125 | btrfs_dir_data_len(leaf, di) + sizeof(*di); |
39279cc3 CM |
5126 | di_cur += di_len; |
5127 | di = (struct btrfs_dir_item *)((char *)di + di_len); | |
5128 | } | |
b9e03af0 LZ |
5129 | next: |
5130 | path->slots[0]++; | |
39279cc3 | 5131 | } |
49593bfa | 5132 | |
16cdcec7 MX |
5133 | if (key_type == BTRFS_DIR_INDEX_KEY) { |
5134 | if (is_curr) | |
9cdda8d3 AV |
5135 | ctx->pos++; |
5136 | ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); | |
16cdcec7 MX |
5137 | if (ret) |
5138 | goto nopos; | |
5139 | } | |
5140 | ||
49593bfa | 5141 | /* Reached end of directory/root. Bump pos past the last item. */ |
db62efbb ZB |
5142 | ctx->pos++; |
5143 | ||
5144 | /* | |
5145 | * Stop new entries from being returned after we return the last | |
5146 | * entry. | |
5147 | * | |
5148 | * New directory entries are assigned a strictly increasing | |
5149 | * offset. This means that new entries created during readdir | |
5150 | * are *guaranteed* to be seen in the future by that readdir. | |
5151 | * This has broken buggy programs which operate on names as | |
5152 | * they're returned by readdir. Until we re-use freed offsets | |
5153 | * we have this hack to stop new entries from being returned | |
5154 | * under the assumption that they'll never reach this huge | |
5155 | * offset. | |
5156 | * | |
5157 | * This is being careful not to overflow 32bit loff_t unless the | |
5158 | * last entry requires it because doing so has broken 32bit apps | |
5159 | * in the past. | |
5160 | */ | |
5161 | if (key_type == BTRFS_DIR_INDEX_KEY) { | |
5162 | if (ctx->pos >= INT_MAX) | |
5163 | ctx->pos = LLONG_MAX; | |
5164 | else | |
5165 | ctx->pos = INT_MAX; | |
5166 | } | |
39279cc3 CM |
5167 | nopos: |
5168 | ret = 0; | |
5169 | err: | |
16cdcec7 MX |
5170 | if (key_type == BTRFS_DIR_INDEX_KEY) |
5171 | btrfs_put_delayed_items(&ins_list, &del_list); | |
39279cc3 | 5172 | btrfs_free_path(path); |
39279cc3 CM |
5173 | return ret; |
5174 | } | |
5175 | ||
a9185b41 | 5176 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
39279cc3 CM |
5177 | { |
5178 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5179 | struct btrfs_trans_handle *trans; | |
5180 | int ret = 0; | |
0af3d00b | 5181 | bool nolock = false; |
39279cc3 | 5182 | |
72ac3c0d | 5183 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) |
4ca8b41e CM |
5184 | return 0; |
5185 | ||
83eea1f1 | 5186 | if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) |
82d5902d | 5187 | nolock = true; |
0af3d00b | 5188 | |
a9185b41 | 5189 | if (wbc->sync_mode == WB_SYNC_ALL) { |
0af3d00b | 5190 | if (nolock) |
7a7eaa40 | 5191 | trans = btrfs_join_transaction_nolock(root); |
0af3d00b | 5192 | else |
7a7eaa40 | 5193 | trans = btrfs_join_transaction(root); |
3612b495 TI |
5194 | if (IS_ERR(trans)) |
5195 | return PTR_ERR(trans); | |
a698d075 | 5196 | ret = btrfs_commit_transaction(trans, root); |
39279cc3 CM |
5197 | } |
5198 | return ret; | |
5199 | } | |
5200 | ||
5201 | /* | |
54aa1f4d | 5202 | * This is somewhat expensive, updating the tree every time the |
39279cc3 CM |
5203 | * inode changes. But, it is most likely to find the inode in cache. |
5204 | * FIXME, needs more benchmarking...there are no reasons other than performance | |
5205 | * to keep or drop this code. | |
5206 | */ | |
48a3b636 | 5207 | static int btrfs_dirty_inode(struct inode *inode) |
39279cc3 CM |
5208 | { |
5209 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5210 | struct btrfs_trans_handle *trans; | |
8929ecfa YZ |
5211 | int ret; |
5212 | ||
72ac3c0d | 5213 | if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) |
22c44fe6 | 5214 | return 0; |
39279cc3 | 5215 | |
7a7eaa40 | 5216 | trans = btrfs_join_transaction(root); |
22c44fe6 JB |
5217 | if (IS_ERR(trans)) |
5218 | return PTR_ERR(trans); | |
8929ecfa YZ |
5219 | |
5220 | ret = btrfs_update_inode(trans, root, inode); | |
94b60442 CM |
5221 | if (ret && ret == -ENOSPC) { |
5222 | /* whoops, lets try again with the full transaction */ | |
5223 | btrfs_end_transaction(trans, root); | |
5224 | trans = btrfs_start_transaction(root, 1); | |
22c44fe6 JB |
5225 | if (IS_ERR(trans)) |
5226 | return PTR_ERR(trans); | |
8929ecfa | 5227 | |
94b60442 | 5228 | ret = btrfs_update_inode(trans, root, inode); |
94b60442 | 5229 | } |
39279cc3 | 5230 | btrfs_end_transaction(trans, root); |
16cdcec7 MX |
5231 | if (BTRFS_I(inode)->delayed_node) |
5232 | btrfs_balance_delayed_items(root); | |
22c44fe6 JB |
5233 | |
5234 | return ret; | |
5235 | } | |
5236 | ||
5237 | /* | |
5238 | * This is a copy of file_update_time. We need this so we can return error on | |
5239 | * ENOSPC for updating the inode in the case of file write and mmap writes. | |
5240 | */ | |
e41f941a JB |
5241 | static int btrfs_update_time(struct inode *inode, struct timespec *now, |
5242 | int flags) | |
22c44fe6 | 5243 | { |
2bc55652 AB |
5244 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5245 | ||
5246 | if (btrfs_root_readonly(root)) | |
5247 | return -EROFS; | |
5248 | ||
e41f941a | 5249 | if (flags & S_VERSION) |
22c44fe6 | 5250 | inode_inc_iversion(inode); |
e41f941a JB |
5251 | if (flags & S_CTIME) |
5252 | inode->i_ctime = *now; | |
5253 | if (flags & S_MTIME) | |
5254 | inode->i_mtime = *now; | |
5255 | if (flags & S_ATIME) | |
5256 | inode->i_atime = *now; | |
5257 | return btrfs_dirty_inode(inode); | |
39279cc3 CM |
5258 | } |
5259 | ||
d352ac68 CM |
5260 | /* |
5261 | * find the highest existing sequence number in a directory | |
5262 | * and then set the in-memory index_cnt variable to reflect | |
5263 | * free sequence numbers | |
5264 | */ | |
aec7477b JB |
5265 | static int btrfs_set_inode_index_count(struct inode *inode) |
5266 | { | |
5267 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
5268 | struct btrfs_key key, found_key; | |
5269 | struct btrfs_path *path; | |
5270 | struct extent_buffer *leaf; | |
5271 | int ret; | |
5272 | ||
33345d01 | 5273 | key.objectid = btrfs_ino(inode); |
aec7477b JB |
5274 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); |
5275 | key.offset = (u64)-1; | |
5276 | ||
5277 | path = btrfs_alloc_path(); | |
5278 | if (!path) | |
5279 | return -ENOMEM; | |
5280 | ||
5281 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
5282 | if (ret < 0) | |
5283 | goto out; | |
5284 | /* FIXME: we should be able to handle this */ | |
5285 | if (ret == 0) | |
5286 | goto out; | |
5287 | ret = 0; | |
5288 | ||
5289 | /* | |
5290 | * MAGIC NUMBER EXPLANATION: | |
5291 | * since we search a directory based on f_pos we have to start at 2 | |
5292 | * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody | |
5293 | * else has to start at 2 | |
5294 | */ | |
5295 | if (path->slots[0] == 0) { | |
5296 | BTRFS_I(inode)->index_cnt = 2; | |
5297 | goto out; | |
5298 | } | |
5299 | ||
5300 | path->slots[0]--; | |
5301 | ||
5302 | leaf = path->nodes[0]; | |
5303 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
5304 | ||
33345d01 | 5305 | if (found_key.objectid != btrfs_ino(inode) || |
aec7477b JB |
5306 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { |
5307 | BTRFS_I(inode)->index_cnt = 2; | |
5308 | goto out; | |
5309 | } | |
5310 | ||
5311 | BTRFS_I(inode)->index_cnt = found_key.offset + 1; | |
5312 | out: | |
5313 | btrfs_free_path(path); | |
5314 | return ret; | |
5315 | } | |
5316 | ||
d352ac68 CM |
5317 | /* |
5318 | * helper to find a free sequence number in a given directory. This current | |
5319 | * code is very simple, later versions will do smarter things in the btree | |
5320 | */ | |
3de4586c | 5321 | int btrfs_set_inode_index(struct inode *dir, u64 *index) |
aec7477b JB |
5322 | { |
5323 | int ret = 0; | |
5324 | ||
5325 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { | |
16cdcec7 MX |
5326 | ret = btrfs_inode_delayed_dir_index_count(dir); |
5327 | if (ret) { | |
5328 | ret = btrfs_set_inode_index_count(dir); | |
5329 | if (ret) | |
5330 | return ret; | |
5331 | } | |
aec7477b JB |
5332 | } |
5333 | ||
00e4e6b3 | 5334 | *index = BTRFS_I(dir)->index_cnt; |
aec7477b JB |
5335 | BTRFS_I(dir)->index_cnt++; |
5336 | ||
5337 | return ret; | |
5338 | } | |
5339 | ||
39279cc3 CM |
5340 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, |
5341 | struct btrfs_root *root, | |
aec7477b | 5342 | struct inode *dir, |
9c58309d | 5343 | const char *name, int name_len, |
175a4eb7 AV |
5344 | u64 ref_objectid, u64 objectid, |
5345 | umode_t mode, u64 *index) | |
39279cc3 CM |
5346 | { |
5347 | struct inode *inode; | |
5f39d397 | 5348 | struct btrfs_inode_item *inode_item; |
39279cc3 | 5349 | struct btrfs_key *location; |
5f39d397 | 5350 | struct btrfs_path *path; |
9c58309d CM |
5351 | struct btrfs_inode_ref *ref; |
5352 | struct btrfs_key key[2]; | |
5353 | u32 sizes[2]; | |
5354 | unsigned long ptr; | |
39279cc3 CM |
5355 | int ret; |
5356 | int owner; | |
5357 | ||
5f39d397 | 5358 | path = btrfs_alloc_path(); |
d8926bb3 MF |
5359 | if (!path) |
5360 | return ERR_PTR(-ENOMEM); | |
5f39d397 | 5361 | |
39279cc3 | 5362 | inode = new_inode(root->fs_info->sb); |
8fb27640 YS |
5363 | if (!inode) { |
5364 | btrfs_free_path(path); | |
39279cc3 | 5365 | return ERR_PTR(-ENOMEM); |
8fb27640 | 5366 | } |
39279cc3 | 5367 | |
581bb050 LZ |
5368 | /* |
5369 | * we have to initialize this early, so we can reclaim the inode | |
5370 | * number if we fail afterwards in this function. | |
5371 | */ | |
5372 | inode->i_ino = objectid; | |
5373 | ||
aec7477b | 5374 | if (dir) { |
1abe9b8a | 5375 | trace_btrfs_inode_request(dir); |
5376 | ||
3de4586c | 5377 | ret = btrfs_set_inode_index(dir, index); |
09771430 | 5378 | if (ret) { |
8fb27640 | 5379 | btrfs_free_path(path); |
09771430 | 5380 | iput(inode); |
aec7477b | 5381 | return ERR_PTR(ret); |
09771430 | 5382 | } |
aec7477b JB |
5383 | } |
5384 | /* | |
5385 | * index_cnt is ignored for everything but a dir, | |
5386 | * btrfs_get_inode_index_count has an explanation for the magic | |
5387 | * number | |
5388 | */ | |
5389 | BTRFS_I(inode)->index_cnt = 2; | |
39279cc3 | 5390 | BTRFS_I(inode)->root = root; |
e02119d5 | 5391 | BTRFS_I(inode)->generation = trans->transid; |
76195853 | 5392 | inode->i_generation = BTRFS_I(inode)->generation; |
b888db2b | 5393 | |
5dc562c5 JB |
5394 | /* |
5395 | * We could have gotten an inode number from somebody who was fsynced | |
5396 | * and then removed in this same transaction, so let's just set full | |
5397 | * sync since it will be a full sync anyway and this will blow away the | |
5398 | * old info in the log. | |
5399 | */ | |
5400 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
5401 | ||
569254b0 | 5402 | if (S_ISDIR(mode)) |
39279cc3 CM |
5403 | owner = 0; |
5404 | else | |
5405 | owner = 1; | |
9c58309d CM |
5406 | |
5407 | key[0].objectid = objectid; | |
5408 | btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); | |
5409 | key[0].offset = 0; | |
5410 | ||
f186373f MF |
5411 | /* |
5412 | * Start new inodes with an inode_ref. This is slightly more | |
5413 | * efficient for small numbers of hard links since they will | |
5414 | * be packed into one item. Extended refs will kick in if we | |
5415 | * add more hard links than can fit in the ref item. | |
5416 | */ | |
9c58309d CM |
5417 | key[1].objectid = objectid; |
5418 | btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY); | |
5419 | key[1].offset = ref_objectid; | |
5420 | ||
5421 | sizes[0] = sizeof(struct btrfs_inode_item); | |
5422 | sizes[1] = name_len + sizeof(*ref); | |
5423 | ||
b9473439 | 5424 | path->leave_spinning = 1; |
9c58309d CM |
5425 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2); |
5426 | if (ret != 0) | |
5f39d397 CM |
5427 | goto fail; |
5428 | ||
ecc11fab | 5429 | inode_init_owner(inode, dir, mode); |
a76a3cd4 | 5430 | inode_set_bytes(inode, 0); |
39279cc3 | 5431 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
5f39d397 CM |
5432 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
5433 | struct btrfs_inode_item); | |
293f7e07 LZ |
5434 | memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, |
5435 | sizeof(*inode_item)); | |
e02119d5 | 5436 | fill_inode_item(trans, path->nodes[0], inode_item, inode); |
9c58309d CM |
5437 | |
5438 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, | |
5439 | struct btrfs_inode_ref); | |
5440 | btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); | |
00e4e6b3 | 5441 | btrfs_set_inode_ref_index(path->nodes[0], ref, *index); |
9c58309d CM |
5442 | ptr = (unsigned long)(ref + 1); |
5443 | write_extent_buffer(path->nodes[0], name, ptr, name_len); | |
5444 | ||
5f39d397 CM |
5445 | btrfs_mark_buffer_dirty(path->nodes[0]); |
5446 | btrfs_free_path(path); | |
5447 | ||
39279cc3 CM |
5448 | location = &BTRFS_I(inode)->location; |
5449 | location->objectid = objectid; | |
39279cc3 CM |
5450 | location->offset = 0; |
5451 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | |
5452 | ||
6cbff00f CH |
5453 | btrfs_inherit_iflags(inode, dir); |
5454 | ||
569254b0 | 5455 | if (S_ISREG(mode)) { |
94272164 CM |
5456 | if (btrfs_test_opt(root, NODATASUM)) |
5457 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; | |
213490b3 | 5458 | if (btrfs_test_opt(root, NODATACOW)) |
f2bdf9a8 JB |
5459 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | |
5460 | BTRFS_INODE_NODATASUM; | |
94272164 CM |
5461 | } |
5462 | ||
778ba82b | 5463 | btrfs_insert_inode_hash(inode); |
5d4f98a2 | 5464 | inode_tree_add(inode); |
1abe9b8a | 5465 | |
5466 | trace_btrfs_inode_new(inode); | |
1973f0fa | 5467 | btrfs_set_inode_last_trans(trans, inode); |
1abe9b8a | 5468 | |
8ea05e3a AB |
5469 | btrfs_update_root_times(trans, root); |
5470 | ||
39279cc3 | 5471 | return inode; |
5f39d397 | 5472 | fail: |
aec7477b JB |
5473 | if (dir) |
5474 | BTRFS_I(dir)->index_cnt--; | |
5f39d397 | 5475 | btrfs_free_path(path); |
09771430 | 5476 | iput(inode); |
5f39d397 | 5477 | return ERR_PTR(ret); |
39279cc3 CM |
5478 | } |
5479 | ||
5480 | static inline u8 btrfs_inode_type(struct inode *inode) | |
5481 | { | |
5482 | return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; | |
5483 | } | |
5484 | ||
d352ac68 CM |
5485 | /* |
5486 | * utility function to add 'inode' into 'parent_inode' with | |
5487 | * a give name and a given sequence number. | |
5488 | * if 'add_backref' is true, also insert a backref from the | |
5489 | * inode to the parent directory. | |
5490 | */ | |
e02119d5 CM |
5491 | int btrfs_add_link(struct btrfs_trans_handle *trans, |
5492 | struct inode *parent_inode, struct inode *inode, | |
5493 | const char *name, int name_len, int add_backref, u64 index) | |
39279cc3 | 5494 | { |
4df27c4d | 5495 | int ret = 0; |
39279cc3 | 5496 | struct btrfs_key key; |
e02119d5 | 5497 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; |
33345d01 LZ |
5498 | u64 ino = btrfs_ino(inode); |
5499 | u64 parent_ino = btrfs_ino(parent_inode); | |
5f39d397 | 5500 | |
33345d01 | 5501 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
5502 | memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); |
5503 | } else { | |
33345d01 | 5504 | key.objectid = ino; |
4df27c4d YZ |
5505 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); |
5506 | key.offset = 0; | |
5507 | } | |
5508 | ||
33345d01 | 5509 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
5510 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, |
5511 | key.objectid, root->root_key.objectid, | |
33345d01 | 5512 | parent_ino, index, name, name_len); |
4df27c4d | 5513 | } else if (add_backref) { |
33345d01 LZ |
5514 | ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, |
5515 | parent_ino, index); | |
4df27c4d | 5516 | } |
39279cc3 | 5517 | |
79787eaa JM |
5518 | /* Nothing to clean up yet */ |
5519 | if (ret) | |
5520 | return ret; | |
4df27c4d | 5521 | |
79787eaa JM |
5522 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
5523 | parent_inode, &key, | |
5524 | btrfs_inode_type(inode), index); | |
9c52057c | 5525 | if (ret == -EEXIST || ret == -EOVERFLOW) |
79787eaa JM |
5526 | goto fail_dir_item; |
5527 | else if (ret) { | |
5528 | btrfs_abort_transaction(trans, root, ret); | |
5529 | return ret; | |
39279cc3 | 5530 | } |
79787eaa JM |
5531 | |
5532 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | |
5533 | name_len * 2); | |
0c4d2d95 | 5534 | inode_inc_iversion(parent_inode); |
79787eaa JM |
5535 | parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; |
5536 | ret = btrfs_update_inode(trans, root, parent_inode); | |
5537 | if (ret) | |
5538 | btrfs_abort_transaction(trans, root, ret); | |
39279cc3 | 5539 | return ret; |
fe66a05a CM |
5540 | |
5541 | fail_dir_item: | |
5542 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { | |
5543 | u64 local_index; | |
5544 | int err; | |
5545 | err = btrfs_del_root_ref(trans, root->fs_info->tree_root, | |
5546 | key.objectid, root->root_key.objectid, | |
5547 | parent_ino, &local_index, name, name_len); | |
5548 | ||
5549 | } else if (add_backref) { | |
5550 | u64 local_index; | |
5551 | int err; | |
5552 | ||
5553 | err = btrfs_del_inode_ref(trans, root, name, name_len, | |
5554 | ino, parent_ino, &local_index); | |
5555 | } | |
5556 | return ret; | |
39279cc3 CM |
5557 | } |
5558 | ||
5559 | static int btrfs_add_nondir(struct btrfs_trans_handle *trans, | |
a1b075d2 JB |
5560 | struct inode *dir, struct dentry *dentry, |
5561 | struct inode *inode, int backref, u64 index) | |
39279cc3 | 5562 | { |
a1b075d2 JB |
5563 | int err = btrfs_add_link(trans, dir, inode, |
5564 | dentry->d_name.name, dentry->d_name.len, | |
5565 | backref, index); | |
39279cc3 CM |
5566 | if (err > 0) |
5567 | err = -EEXIST; | |
5568 | return err; | |
5569 | } | |
5570 | ||
618e21d5 | 5571 | static int btrfs_mknod(struct inode *dir, struct dentry *dentry, |
1a67aafb | 5572 | umode_t mode, dev_t rdev) |
618e21d5 JB |
5573 | { |
5574 | struct btrfs_trans_handle *trans; | |
5575 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 5576 | struct inode *inode = NULL; |
618e21d5 JB |
5577 | int err; |
5578 | int drop_inode = 0; | |
5579 | u64 objectid; | |
00e4e6b3 | 5580 | u64 index = 0; |
618e21d5 JB |
5581 | |
5582 | if (!new_valid_dev(rdev)) | |
5583 | return -EINVAL; | |
5584 | ||
9ed74f2d JB |
5585 | /* |
5586 | * 2 for inode item and ref | |
5587 | * 2 for dir items | |
5588 | * 1 for xattr if selinux is on | |
5589 | */ | |
a22285a6 YZ |
5590 | trans = btrfs_start_transaction(root, 5); |
5591 | if (IS_ERR(trans)) | |
5592 | return PTR_ERR(trans); | |
1832a6d5 | 5593 | |
581bb050 LZ |
5594 | err = btrfs_find_free_ino(root, &objectid); |
5595 | if (err) | |
5596 | goto out_unlock; | |
5597 | ||
aec7477b | 5598 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
33345d01 | 5599 | dentry->d_name.len, btrfs_ino(dir), objectid, |
d82a6f1d | 5600 | mode, &index); |
7cf96da3 TI |
5601 | if (IS_ERR(inode)) { |
5602 | err = PTR_ERR(inode); | |
618e21d5 | 5603 | goto out_unlock; |
7cf96da3 | 5604 | } |
618e21d5 | 5605 | |
2a7dba39 | 5606 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
5607 | if (err) { |
5608 | drop_inode = 1; | |
5609 | goto out_unlock; | |
5610 | } | |
5611 | ||
ad19db71 CS |
5612 | /* |
5613 | * If the active LSM wants to access the inode during | |
5614 | * d_instantiate it needs these. Smack checks to see | |
5615 | * if the filesystem supports xattrs by looking at the | |
5616 | * ops vector. | |
5617 | */ | |
5618 | ||
5619 | inode->i_op = &btrfs_special_inode_operations; | |
a1b075d2 | 5620 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
618e21d5 JB |
5621 | if (err) |
5622 | drop_inode = 1; | |
5623 | else { | |
618e21d5 | 5624 | init_special_inode(inode, inode->i_mode, rdev); |
1b4ab1bb | 5625 | btrfs_update_inode(trans, root, inode); |
08c422c2 | 5626 | d_instantiate(dentry, inode); |
618e21d5 | 5627 | } |
618e21d5 | 5628 | out_unlock: |
7ad85bb7 | 5629 | btrfs_end_transaction(trans, root); |
b53d3f5d | 5630 | btrfs_btree_balance_dirty(root); |
618e21d5 JB |
5631 | if (drop_inode) { |
5632 | inode_dec_link_count(inode); | |
5633 | iput(inode); | |
5634 | } | |
618e21d5 JB |
5635 | return err; |
5636 | } | |
5637 | ||
39279cc3 | 5638 | static int btrfs_create(struct inode *dir, struct dentry *dentry, |
ebfc3b49 | 5639 | umode_t mode, bool excl) |
39279cc3 CM |
5640 | { |
5641 | struct btrfs_trans_handle *trans; | |
5642 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
1832a6d5 | 5643 | struct inode *inode = NULL; |
43baa579 | 5644 | int drop_inode_on_err = 0; |
a22285a6 | 5645 | int err; |
39279cc3 | 5646 | u64 objectid; |
00e4e6b3 | 5647 | u64 index = 0; |
39279cc3 | 5648 | |
9ed74f2d JB |
5649 | /* |
5650 | * 2 for inode item and ref | |
5651 | * 2 for dir items | |
5652 | * 1 for xattr if selinux is on | |
5653 | */ | |
a22285a6 YZ |
5654 | trans = btrfs_start_transaction(root, 5); |
5655 | if (IS_ERR(trans)) | |
5656 | return PTR_ERR(trans); | |
9ed74f2d | 5657 | |
581bb050 LZ |
5658 | err = btrfs_find_free_ino(root, &objectid); |
5659 | if (err) | |
5660 | goto out_unlock; | |
5661 | ||
aec7477b | 5662 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
33345d01 | 5663 | dentry->d_name.len, btrfs_ino(dir), objectid, |
d82a6f1d | 5664 | mode, &index); |
7cf96da3 TI |
5665 | if (IS_ERR(inode)) { |
5666 | err = PTR_ERR(inode); | |
39279cc3 | 5667 | goto out_unlock; |
7cf96da3 | 5668 | } |
43baa579 | 5669 | drop_inode_on_err = 1; |
39279cc3 | 5670 | |
2a7dba39 | 5671 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
43baa579 | 5672 | if (err) |
33268eaf | 5673 | goto out_unlock; |
33268eaf | 5674 | |
9185aa58 FB |
5675 | err = btrfs_update_inode(trans, root, inode); |
5676 | if (err) | |
5677 | goto out_unlock; | |
5678 | ||
ad19db71 CS |
5679 | /* |
5680 | * If the active LSM wants to access the inode during | |
5681 | * d_instantiate it needs these. Smack checks to see | |
5682 | * if the filesystem supports xattrs by looking at the | |
5683 | * ops vector. | |
5684 | */ | |
5685 | inode->i_fop = &btrfs_file_operations; | |
5686 | inode->i_op = &btrfs_file_inode_operations; | |
5687 | ||
a1b075d2 | 5688 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
39279cc3 | 5689 | if (err) |
43baa579 FB |
5690 | goto out_unlock; |
5691 | ||
5692 | inode->i_mapping->a_ops = &btrfs_aops; | |
5693 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | |
5694 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | |
5695 | d_instantiate(dentry, inode); | |
5696 | ||
39279cc3 | 5697 | out_unlock: |
7ad85bb7 | 5698 | btrfs_end_transaction(trans, root); |
43baa579 | 5699 | if (err && drop_inode_on_err) { |
39279cc3 CM |
5700 | inode_dec_link_count(inode); |
5701 | iput(inode); | |
5702 | } | |
b53d3f5d | 5703 | btrfs_btree_balance_dirty(root); |
39279cc3 CM |
5704 | return err; |
5705 | } | |
5706 | ||
5707 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |
5708 | struct dentry *dentry) | |
5709 | { | |
5710 | struct btrfs_trans_handle *trans; | |
5711 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
5712 | struct inode *inode = old_dentry->d_inode; | |
00e4e6b3 | 5713 | u64 index; |
39279cc3 CM |
5714 | int err; |
5715 | int drop_inode = 0; | |
5716 | ||
4a8be425 TH |
5717 | /* do not allow sys_link's with other subvols of the same device */ |
5718 | if (root->objectid != BTRFS_I(inode)->root->objectid) | |
3ab3564f | 5719 | return -EXDEV; |
4a8be425 | 5720 | |
f186373f | 5721 | if (inode->i_nlink >= BTRFS_LINK_MAX) |
c055e99e | 5722 | return -EMLINK; |
4a8be425 | 5723 | |
3de4586c | 5724 | err = btrfs_set_inode_index(dir, &index); |
aec7477b JB |
5725 | if (err) |
5726 | goto fail; | |
5727 | ||
a22285a6 | 5728 | /* |
7e6b6465 | 5729 | * 2 items for inode and inode ref |
a22285a6 | 5730 | * 2 items for dir items |
7e6b6465 | 5731 | * 1 item for parent inode |
a22285a6 | 5732 | */ |
7e6b6465 | 5733 | trans = btrfs_start_transaction(root, 5); |
a22285a6 YZ |
5734 | if (IS_ERR(trans)) { |
5735 | err = PTR_ERR(trans); | |
5736 | goto fail; | |
5737 | } | |
5f39d397 | 5738 | |
8b558c5f | 5739 | inc_nlink(inode); |
0c4d2d95 | 5740 | inode_inc_iversion(inode); |
3153495d | 5741 | inode->i_ctime = CURRENT_TIME; |
7de9c6ee | 5742 | ihold(inode); |
e9976151 | 5743 | set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); |
aec7477b | 5744 | |
a1b075d2 | 5745 | err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); |
5f39d397 | 5746 | |
a5719521 | 5747 | if (err) { |
54aa1f4d | 5748 | drop_inode = 1; |
a5719521 | 5749 | } else { |
10d9f309 | 5750 | struct dentry *parent = dentry->d_parent; |
a5719521 | 5751 | err = btrfs_update_inode(trans, root, inode); |
79787eaa JM |
5752 | if (err) |
5753 | goto fail; | |
08c422c2 | 5754 | d_instantiate(dentry, inode); |
6a912213 | 5755 | btrfs_log_new_name(trans, inode, NULL, parent); |
a5719521 | 5756 | } |
39279cc3 | 5757 | |
7ad85bb7 | 5758 | btrfs_end_transaction(trans, root); |
1832a6d5 | 5759 | fail: |
39279cc3 CM |
5760 | if (drop_inode) { |
5761 | inode_dec_link_count(inode); | |
5762 | iput(inode); | |
5763 | } | |
b53d3f5d | 5764 | btrfs_btree_balance_dirty(root); |
39279cc3 CM |
5765 | return err; |
5766 | } | |
5767 | ||
18bb1db3 | 5768 | static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
39279cc3 | 5769 | { |
b9d86667 | 5770 | struct inode *inode = NULL; |
39279cc3 CM |
5771 | struct btrfs_trans_handle *trans; |
5772 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
5773 | int err = 0; | |
5774 | int drop_on_err = 0; | |
b9d86667 | 5775 | u64 objectid = 0; |
00e4e6b3 | 5776 | u64 index = 0; |
39279cc3 | 5777 | |
9ed74f2d JB |
5778 | /* |
5779 | * 2 items for inode and ref | |
5780 | * 2 items for dir items | |
5781 | * 1 for xattr if selinux is on | |
5782 | */ | |
a22285a6 YZ |
5783 | trans = btrfs_start_transaction(root, 5); |
5784 | if (IS_ERR(trans)) | |
5785 | return PTR_ERR(trans); | |
39279cc3 | 5786 | |
581bb050 LZ |
5787 | err = btrfs_find_free_ino(root, &objectid); |
5788 | if (err) | |
5789 | goto out_fail; | |
5790 | ||
aec7477b | 5791 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
33345d01 | 5792 | dentry->d_name.len, btrfs_ino(dir), objectid, |
d82a6f1d | 5793 | S_IFDIR | mode, &index); |
39279cc3 CM |
5794 | if (IS_ERR(inode)) { |
5795 | err = PTR_ERR(inode); | |
5796 | goto out_fail; | |
5797 | } | |
5f39d397 | 5798 | |
39279cc3 | 5799 | drop_on_err = 1; |
33268eaf | 5800 | |
2a7dba39 | 5801 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
5802 | if (err) |
5803 | goto out_fail; | |
5804 | ||
39279cc3 CM |
5805 | inode->i_op = &btrfs_dir_inode_operations; |
5806 | inode->i_fop = &btrfs_dir_file_operations; | |
39279cc3 | 5807 | |
dbe674a9 | 5808 | btrfs_i_size_write(inode, 0); |
39279cc3 CM |
5809 | err = btrfs_update_inode(trans, root, inode); |
5810 | if (err) | |
5811 | goto out_fail; | |
5f39d397 | 5812 | |
a1b075d2 JB |
5813 | err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, |
5814 | dentry->d_name.len, 0, index); | |
39279cc3 CM |
5815 | if (err) |
5816 | goto out_fail; | |
5f39d397 | 5817 | |
39279cc3 CM |
5818 | d_instantiate(dentry, inode); |
5819 | drop_on_err = 0; | |
39279cc3 CM |
5820 | |
5821 | out_fail: | |
7ad85bb7 | 5822 | btrfs_end_transaction(trans, root); |
39279cc3 CM |
5823 | if (drop_on_err) |
5824 | iput(inode); | |
b53d3f5d | 5825 | btrfs_btree_balance_dirty(root); |
39279cc3 CM |
5826 | return err; |
5827 | } | |
5828 | ||
d352ac68 CM |
5829 | /* helper for btfs_get_extent. Given an existing extent in the tree, |
5830 | * and an extent that you want to insert, deal with overlap and insert | |
5831 | * the new extent into the tree. | |
5832 | */ | |
3b951516 CM |
5833 | static int merge_extent_mapping(struct extent_map_tree *em_tree, |
5834 | struct extent_map *existing, | |
e6dcd2dc CM |
5835 | struct extent_map *em, |
5836 | u64 map_start, u64 map_len) | |
3b951516 CM |
5837 | { |
5838 | u64 start_diff; | |
3b951516 | 5839 | |
e6dcd2dc CM |
5840 | BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); |
5841 | start_diff = map_start - em->start; | |
5842 | em->start = map_start; | |
5843 | em->len = map_len; | |
c8b97818 CM |
5844 | if (em->block_start < EXTENT_MAP_LAST_BYTE && |
5845 | !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { | |
e6dcd2dc | 5846 | em->block_start += start_diff; |
c8b97818 CM |
5847 | em->block_len -= start_diff; |
5848 | } | |
09a2a8f9 | 5849 | return add_extent_mapping(em_tree, em, 0); |
3b951516 CM |
5850 | } |
5851 | ||
c8b97818 CM |
5852 | static noinline int uncompress_inline(struct btrfs_path *path, |
5853 | struct inode *inode, struct page *page, | |
5854 | size_t pg_offset, u64 extent_offset, | |
5855 | struct btrfs_file_extent_item *item) | |
5856 | { | |
5857 | int ret; | |
5858 | struct extent_buffer *leaf = path->nodes[0]; | |
5859 | char *tmp; | |
5860 | size_t max_size; | |
5861 | unsigned long inline_size; | |
5862 | unsigned long ptr; | |
261507a0 | 5863 | int compress_type; |
c8b97818 CM |
5864 | |
5865 | WARN_ON(pg_offset != 0); | |
261507a0 | 5866 | compress_type = btrfs_file_extent_compression(leaf, item); |
c8b97818 CM |
5867 | max_size = btrfs_file_extent_ram_bytes(leaf, item); |
5868 | inline_size = btrfs_file_extent_inline_item_len(leaf, | |
dd3cc16b | 5869 | btrfs_item_nr(path->slots[0])); |
c8b97818 | 5870 | tmp = kmalloc(inline_size, GFP_NOFS); |
8d413713 TI |
5871 | if (!tmp) |
5872 | return -ENOMEM; | |
c8b97818 CM |
5873 | ptr = btrfs_file_extent_inline_start(item); |
5874 | ||
5875 | read_extent_buffer(leaf, tmp, ptr, inline_size); | |
5876 | ||
5b050f04 | 5877 | max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); |
261507a0 LZ |
5878 | ret = btrfs_decompress(compress_type, tmp, page, |
5879 | extent_offset, inline_size, max_size); | |
c8b97818 | 5880 | if (ret) { |
7ac687d9 | 5881 | char *kaddr = kmap_atomic(page); |
c8b97818 CM |
5882 | unsigned long copy_size = min_t(u64, |
5883 | PAGE_CACHE_SIZE - pg_offset, | |
5884 | max_size - extent_offset); | |
5885 | memset(kaddr + pg_offset, 0, copy_size); | |
7ac687d9 | 5886 | kunmap_atomic(kaddr); |
c8b97818 CM |
5887 | } |
5888 | kfree(tmp); | |
5889 | return 0; | |
5890 | } | |
5891 | ||
d352ac68 CM |
5892 | /* |
5893 | * a bit scary, this does extent mapping from logical file offset to the disk. | |
d397712b CM |
5894 | * the ugly parts come from merging extents from the disk with the in-ram |
5895 | * representation. This gets more complex because of the data=ordered code, | |
d352ac68 CM |
5896 | * where the in-ram extents might be locked pending data=ordered completion. |
5897 | * | |
5898 | * This also copies inline extents directly into the page. | |
5899 | */ | |
d397712b | 5900 | |
a52d9a80 | 5901 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, |
70dec807 | 5902 | size_t pg_offset, u64 start, u64 len, |
a52d9a80 CM |
5903 | int create) |
5904 | { | |
5905 | int ret; | |
5906 | int err = 0; | |
db94535d | 5907 | u64 bytenr; |
a52d9a80 CM |
5908 | u64 extent_start = 0; |
5909 | u64 extent_end = 0; | |
33345d01 | 5910 | u64 objectid = btrfs_ino(inode); |
a52d9a80 | 5911 | u32 found_type; |
f421950f | 5912 | struct btrfs_path *path = NULL; |
a52d9a80 CM |
5913 | struct btrfs_root *root = BTRFS_I(inode)->root; |
5914 | struct btrfs_file_extent_item *item; | |
5f39d397 CM |
5915 | struct extent_buffer *leaf; |
5916 | struct btrfs_key found_key; | |
a52d9a80 CM |
5917 | struct extent_map *em = NULL; |
5918 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
d1310b2e | 5919 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
a52d9a80 | 5920 | struct btrfs_trans_handle *trans = NULL; |
261507a0 | 5921 | int compress_type; |
a52d9a80 | 5922 | |
a52d9a80 | 5923 | again: |
890871be | 5924 | read_lock(&em_tree->lock); |
d1310b2e | 5925 | em = lookup_extent_mapping(em_tree, start, len); |
a061fc8d CM |
5926 | if (em) |
5927 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
890871be | 5928 | read_unlock(&em_tree->lock); |
d1310b2e | 5929 | |
a52d9a80 | 5930 | if (em) { |
e1c4b745 CM |
5931 | if (em->start > start || em->start + em->len <= start) |
5932 | free_extent_map(em); | |
5933 | else if (em->block_start == EXTENT_MAP_INLINE && page) | |
70dec807 CM |
5934 | free_extent_map(em); |
5935 | else | |
5936 | goto out; | |
a52d9a80 | 5937 | } |
172ddd60 | 5938 | em = alloc_extent_map(); |
a52d9a80 | 5939 | if (!em) { |
d1310b2e CM |
5940 | err = -ENOMEM; |
5941 | goto out; | |
a52d9a80 | 5942 | } |
e6dcd2dc | 5943 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
d1310b2e | 5944 | em->start = EXTENT_MAP_HOLE; |
445a6944 | 5945 | em->orig_start = EXTENT_MAP_HOLE; |
d1310b2e | 5946 | em->len = (u64)-1; |
c8b97818 | 5947 | em->block_len = (u64)-1; |
f421950f CM |
5948 | |
5949 | if (!path) { | |
5950 | path = btrfs_alloc_path(); | |
026fd317 JB |
5951 | if (!path) { |
5952 | err = -ENOMEM; | |
5953 | goto out; | |
5954 | } | |
5955 | /* | |
5956 | * Chances are we'll be called again, so go ahead and do | |
5957 | * readahead | |
5958 | */ | |
5959 | path->reada = 1; | |
f421950f CM |
5960 | } |
5961 | ||
179e29e4 CM |
5962 | ret = btrfs_lookup_file_extent(trans, root, path, |
5963 | objectid, start, trans != NULL); | |
a52d9a80 CM |
5964 | if (ret < 0) { |
5965 | err = ret; | |
5966 | goto out; | |
5967 | } | |
5968 | ||
5969 | if (ret != 0) { | |
5970 | if (path->slots[0] == 0) | |
5971 | goto not_found; | |
5972 | path->slots[0]--; | |
5973 | } | |
5974 | ||
5f39d397 CM |
5975 | leaf = path->nodes[0]; |
5976 | item = btrfs_item_ptr(leaf, path->slots[0], | |
a52d9a80 | 5977 | struct btrfs_file_extent_item); |
a52d9a80 | 5978 | /* are we inside the extent that was found? */ |
5f39d397 CM |
5979 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
5980 | found_type = btrfs_key_type(&found_key); | |
5981 | if (found_key.objectid != objectid || | |
a52d9a80 | 5982 | found_type != BTRFS_EXTENT_DATA_KEY) { |
25a50341 JB |
5983 | /* |
5984 | * If we backup past the first extent we want to move forward | |
5985 | * and see if there is an extent in front of us, otherwise we'll | |
5986 | * say there is a hole for our whole search range which can | |
5987 | * cause problems. | |
5988 | */ | |
5989 | extent_end = start; | |
5990 | goto next; | |
a52d9a80 CM |
5991 | } |
5992 | ||
5f39d397 CM |
5993 | found_type = btrfs_file_extent_type(leaf, item); |
5994 | extent_start = found_key.offset; | |
261507a0 | 5995 | compress_type = btrfs_file_extent_compression(leaf, item); |
d899e052 YZ |
5996 | if (found_type == BTRFS_FILE_EXTENT_REG || |
5997 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
a52d9a80 | 5998 | extent_end = extent_start + |
db94535d | 5999 | btrfs_file_extent_num_bytes(leaf, item); |
9036c102 YZ |
6000 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { |
6001 | size_t size; | |
6002 | size = btrfs_file_extent_inline_len(leaf, item); | |
fda2832f | 6003 | extent_end = ALIGN(extent_start + size, root->sectorsize); |
9036c102 | 6004 | } |
25a50341 | 6005 | next: |
9036c102 YZ |
6006 | if (start >= extent_end) { |
6007 | path->slots[0]++; | |
6008 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
6009 | ret = btrfs_next_leaf(root, path); | |
6010 | if (ret < 0) { | |
6011 | err = ret; | |
6012 | goto out; | |
a52d9a80 | 6013 | } |
9036c102 YZ |
6014 | if (ret > 0) |
6015 | goto not_found; | |
6016 | leaf = path->nodes[0]; | |
a52d9a80 | 6017 | } |
9036c102 YZ |
6018 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
6019 | if (found_key.objectid != objectid || | |
6020 | found_key.type != BTRFS_EXTENT_DATA_KEY) | |
6021 | goto not_found; | |
6022 | if (start + len <= found_key.offset) | |
6023 | goto not_found; | |
6024 | em->start = start; | |
70c8a91c | 6025 | em->orig_start = start; |
9036c102 YZ |
6026 | em->len = found_key.offset - start; |
6027 | goto not_found_em; | |
6028 | } | |
6029 | ||
cc95bef6 | 6030 | em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item); |
d899e052 YZ |
6031 | if (found_type == BTRFS_FILE_EXTENT_REG || |
6032 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
9036c102 YZ |
6033 | em->start = extent_start; |
6034 | em->len = extent_end - extent_start; | |
ff5b7ee3 YZ |
6035 | em->orig_start = extent_start - |
6036 | btrfs_file_extent_offset(leaf, item); | |
b4939680 JB |
6037 | em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, |
6038 | item); | |
db94535d CM |
6039 | bytenr = btrfs_file_extent_disk_bytenr(leaf, item); |
6040 | if (bytenr == 0) { | |
5f39d397 | 6041 | em->block_start = EXTENT_MAP_HOLE; |
a52d9a80 CM |
6042 | goto insert; |
6043 | } | |
261507a0 | 6044 | if (compress_type != BTRFS_COMPRESS_NONE) { |
c8b97818 | 6045 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
261507a0 | 6046 | em->compress_type = compress_type; |
c8b97818 | 6047 | em->block_start = bytenr; |
b4939680 | 6048 | em->block_len = em->orig_block_len; |
c8b97818 CM |
6049 | } else { |
6050 | bytenr += btrfs_file_extent_offset(leaf, item); | |
6051 | em->block_start = bytenr; | |
6052 | em->block_len = em->len; | |
d899e052 YZ |
6053 | if (found_type == BTRFS_FILE_EXTENT_PREALLOC) |
6054 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
c8b97818 | 6055 | } |
a52d9a80 CM |
6056 | goto insert; |
6057 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | |
5f39d397 | 6058 | unsigned long ptr; |
a52d9a80 | 6059 | char *map; |
3326d1b0 CM |
6060 | size_t size; |
6061 | size_t extent_offset; | |
6062 | size_t copy_size; | |
a52d9a80 | 6063 | |
689f9346 | 6064 | em->block_start = EXTENT_MAP_INLINE; |
c8b97818 | 6065 | if (!page || create) { |
689f9346 | 6066 | em->start = extent_start; |
9036c102 | 6067 | em->len = extent_end - extent_start; |
689f9346 Y |
6068 | goto out; |
6069 | } | |
5f39d397 | 6070 | |
9036c102 YZ |
6071 | size = btrfs_file_extent_inline_len(leaf, item); |
6072 | extent_offset = page_offset(page) + pg_offset - extent_start; | |
70dec807 | 6073 | copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, |
3326d1b0 | 6074 | size - extent_offset); |
3326d1b0 | 6075 | em->start = extent_start + extent_offset; |
fda2832f | 6076 | em->len = ALIGN(copy_size, root->sectorsize); |
b4939680 | 6077 | em->orig_block_len = em->len; |
70c8a91c | 6078 | em->orig_start = em->start; |
261507a0 | 6079 | if (compress_type) { |
c8b97818 | 6080 | set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); |
261507a0 LZ |
6081 | em->compress_type = compress_type; |
6082 | } | |
689f9346 | 6083 | ptr = btrfs_file_extent_inline_start(item) + extent_offset; |
179e29e4 | 6084 | if (create == 0 && !PageUptodate(page)) { |
261507a0 LZ |
6085 | if (btrfs_file_extent_compression(leaf, item) != |
6086 | BTRFS_COMPRESS_NONE) { | |
c8b97818 CM |
6087 | ret = uncompress_inline(path, inode, page, |
6088 | pg_offset, | |
6089 | extent_offset, item); | |
79787eaa | 6090 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 CM |
6091 | } else { |
6092 | map = kmap(page); | |
6093 | read_extent_buffer(leaf, map + pg_offset, ptr, | |
6094 | copy_size); | |
93c82d57 CM |
6095 | if (pg_offset + copy_size < PAGE_CACHE_SIZE) { |
6096 | memset(map + pg_offset + copy_size, 0, | |
6097 | PAGE_CACHE_SIZE - pg_offset - | |
6098 | copy_size); | |
6099 | } | |
c8b97818 CM |
6100 | kunmap(page); |
6101 | } | |
179e29e4 CM |
6102 | flush_dcache_page(page); |
6103 | } else if (create && PageUptodate(page)) { | |
6bf7e080 | 6104 | BUG(); |
179e29e4 CM |
6105 | if (!trans) { |
6106 | kunmap(page); | |
6107 | free_extent_map(em); | |
6108 | em = NULL; | |
ff5714cc | 6109 | |
b3b4aa74 | 6110 | btrfs_release_path(path); |
7a7eaa40 | 6111 | trans = btrfs_join_transaction(root); |
ff5714cc | 6112 | |
3612b495 TI |
6113 | if (IS_ERR(trans)) |
6114 | return ERR_CAST(trans); | |
179e29e4 CM |
6115 | goto again; |
6116 | } | |
c8b97818 | 6117 | map = kmap(page); |
70dec807 | 6118 | write_extent_buffer(leaf, map + pg_offset, ptr, |
179e29e4 | 6119 | copy_size); |
c8b97818 | 6120 | kunmap(page); |
179e29e4 | 6121 | btrfs_mark_buffer_dirty(leaf); |
a52d9a80 | 6122 | } |
d1310b2e | 6123 | set_extent_uptodate(io_tree, em->start, |
507903b8 | 6124 | extent_map_end(em) - 1, NULL, GFP_NOFS); |
a52d9a80 CM |
6125 | goto insert; |
6126 | } else { | |
31b1a2bd | 6127 | WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type); |
a52d9a80 CM |
6128 | } |
6129 | not_found: | |
6130 | em->start = start; | |
70c8a91c | 6131 | em->orig_start = start; |
d1310b2e | 6132 | em->len = len; |
a52d9a80 | 6133 | not_found_em: |
5f39d397 | 6134 | em->block_start = EXTENT_MAP_HOLE; |
9036c102 | 6135 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); |
a52d9a80 | 6136 | insert: |
b3b4aa74 | 6137 | btrfs_release_path(path); |
d1310b2e | 6138 | if (em->start > start || extent_map_end(em) <= start) { |
c2cf52eb | 6139 | btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]", |
c1c9ff7c | 6140 | em->start, em->len, start, len); |
a52d9a80 CM |
6141 | err = -EIO; |
6142 | goto out; | |
6143 | } | |
d1310b2e CM |
6144 | |
6145 | err = 0; | |
890871be | 6146 | write_lock(&em_tree->lock); |
09a2a8f9 | 6147 | ret = add_extent_mapping(em_tree, em, 0); |
3b951516 CM |
6148 | /* it is possible that someone inserted the extent into the tree |
6149 | * while we had the lock dropped. It is also possible that | |
6150 | * an overlapping map exists in the tree | |
6151 | */ | |
a52d9a80 | 6152 | if (ret == -EEXIST) { |
3b951516 | 6153 | struct extent_map *existing; |
e6dcd2dc CM |
6154 | |
6155 | ret = 0; | |
6156 | ||
3b951516 | 6157 | existing = lookup_extent_mapping(em_tree, start, len); |
e1c4b745 CM |
6158 | if (existing && (existing->start > start || |
6159 | existing->start + existing->len <= start)) { | |
6160 | free_extent_map(existing); | |
6161 | existing = NULL; | |
6162 | } | |
3b951516 CM |
6163 | if (!existing) { |
6164 | existing = lookup_extent_mapping(em_tree, em->start, | |
6165 | em->len); | |
6166 | if (existing) { | |
6167 | err = merge_extent_mapping(em_tree, existing, | |
e6dcd2dc CM |
6168 | em, start, |
6169 | root->sectorsize); | |
3b951516 CM |
6170 | free_extent_map(existing); |
6171 | if (err) { | |
6172 | free_extent_map(em); | |
6173 | em = NULL; | |
6174 | } | |
6175 | } else { | |
6176 | err = -EIO; | |
3b951516 CM |
6177 | free_extent_map(em); |
6178 | em = NULL; | |
6179 | } | |
6180 | } else { | |
6181 | free_extent_map(em); | |
6182 | em = existing; | |
e6dcd2dc | 6183 | err = 0; |
a52d9a80 | 6184 | } |
a52d9a80 | 6185 | } |
890871be | 6186 | write_unlock(&em_tree->lock); |
a52d9a80 | 6187 | out: |
1abe9b8a | 6188 | |
f0bd95ea TI |
6189 | if (em) |
6190 | trace_btrfs_get_extent(root, em); | |
1abe9b8a | 6191 | |
f421950f CM |
6192 | if (path) |
6193 | btrfs_free_path(path); | |
a52d9a80 CM |
6194 | if (trans) { |
6195 | ret = btrfs_end_transaction(trans, root); | |
d397712b | 6196 | if (!err) |
a52d9a80 CM |
6197 | err = ret; |
6198 | } | |
a52d9a80 CM |
6199 | if (err) { |
6200 | free_extent_map(em); | |
a52d9a80 CM |
6201 | return ERR_PTR(err); |
6202 | } | |
79787eaa | 6203 | BUG_ON(!em); /* Error is always set */ |
a52d9a80 CM |
6204 | return em; |
6205 | } | |
6206 | ||
ec29ed5b CM |
6207 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, |
6208 | size_t pg_offset, u64 start, u64 len, | |
6209 | int create) | |
6210 | { | |
6211 | struct extent_map *em; | |
6212 | struct extent_map *hole_em = NULL; | |
6213 | u64 range_start = start; | |
6214 | u64 end; | |
6215 | u64 found; | |
6216 | u64 found_end; | |
6217 | int err = 0; | |
6218 | ||
6219 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | |
6220 | if (IS_ERR(em)) | |
6221 | return em; | |
6222 | if (em) { | |
6223 | /* | |
f9e4fb53 LB |
6224 | * if our em maps to |
6225 | * - a hole or | |
6226 | * - a pre-alloc extent, | |
6227 | * there might actually be delalloc bytes behind it. | |
ec29ed5b | 6228 | */ |
f9e4fb53 LB |
6229 | if (em->block_start != EXTENT_MAP_HOLE && |
6230 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
ec29ed5b CM |
6231 | return em; |
6232 | else | |
6233 | hole_em = em; | |
6234 | } | |
6235 | ||
6236 | /* check to see if we've wrapped (len == -1 or similar) */ | |
6237 | end = start + len; | |
6238 | if (end < start) | |
6239 | end = (u64)-1; | |
6240 | else | |
6241 | end -= 1; | |
6242 | ||
6243 | em = NULL; | |
6244 | ||
6245 | /* ok, we didn't find anything, lets look for delalloc */ | |
6246 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | |
6247 | end, len, EXTENT_DELALLOC, 1); | |
6248 | found_end = range_start + found; | |
6249 | if (found_end < range_start) | |
6250 | found_end = (u64)-1; | |
6251 | ||
6252 | /* | |
6253 | * we didn't find anything useful, return | |
6254 | * the original results from get_extent() | |
6255 | */ | |
6256 | if (range_start > end || found_end <= start) { | |
6257 | em = hole_em; | |
6258 | hole_em = NULL; | |
6259 | goto out; | |
6260 | } | |
6261 | ||
6262 | /* adjust the range_start to make sure it doesn't | |
6263 | * go backwards from the start they passed in | |
6264 | */ | |
6265 | range_start = max(start,range_start); | |
6266 | found = found_end - range_start; | |
6267 | ||
6268 | if (found > 0) { | |
6269 | u64 hole_start = start; | |
6270 | u64 hole_len = len; | |
6271 | ||
172ddd60 | 6272 | em = alloc_extent_map(); |
ec29ed5b CM |
6273 | if (!em) { |
6274 | err = -ENOMEM; | |
6275 | goto out; | |
6276 | } | |
6277 | /* | |
6278 | * when btrfs_get_extent can't find anything it | |
6279 | * returns one huge hole | |
6280 | * | |
6281 | * make sure what it found really fits our range, and | |
6282 | * adjust to make sure it is based on the start from | |
6283 | * the caller | |
6284 | */ | |
6285 | if (hole_em) { | |
6286 | u64 calc_end = extent_map_end(hole_em); | |
6287 | ||
6288 | if (calc_end <= start || (hole_em->start > end)) { | |
6289 | free_extent_map(hole_em); | |
6290 | hole_em = NULL; | |
6291 | } else { | |
6292 | hole_start = max(hole_em->start, start); | |
6293 | hole_len = calc_end - hole_start; | |
6294 | } | |
6295 | } | |
6296 | em->bdev = NULL; | |
6297 | if (hole_em && range_start > hole_start) { | |
6298 | /* our hole starts before our delalloc, so we | |
6299 | * have to return just the parts of the hole | |
6300 | * that go until the delalloc starts | |
6301 | */ | |
6302 | em->len = min(hole_len, | |
6303 | range_start - hole_start); | |
6304 | em->start = hole_start; | |
6305 | em->orig_start = hole_start; | |
6306 | /* | |
6307 | * don't adjust block start at all, | |
6308 | * it is fixed at EXTENT_MAP_HOLE | |
6309 | */ | |
6310 | em->block_start = hole_em->block_start; | |
6311 | em->block_len = hole_len; | |
f9e4fb53 LB |
6312 | if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) |
6313 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
ec29ed5b CM |
6314 | } else { |
6315 | em->start = range_start; | |
6316 | em->len = found; | |
6317 | em->orig_start = range_start; | |
6318 | em->block_start = EXTENT_MAP_DELALLOC; | |
6319 | em->block_len = found; | |
6320 | } | |
6321 | } else if (hole_em) { | |
6322 | return hole_em; | |
6323 | } | |
6324 | out: | |
6325 | ||
6326 | free_extent_map(hole_em); | |
6327 | if (err) { | |
6328 | free_extent_map(em); | |
6329 | return ERR_PTR(err); | |
6330 | } | |
6331 | return em; | |
6332 | } | |
6333 | ||
4b46fce2 JB |
6334 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
6335 | u64 start, u64 len) | |
6336 | { | |
6337 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
70c8a91c | 6338 | struct extent_map *em; |
4b46fce2 JB |
6339 | struct btrfs_key ins; |
6340 | u64 alloc_hint; | |
6341 | int ret; | |
4b46fce2 | 6342 | |
4b46fce2 | 6343 | alloc_hint = get_extent_allocation_hint(inode, start, len); |
00361589 | 6344 | ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, |
81c9ad23 | 6345 | alloc_hint, &ins, 1); |
00361589 JB |
6346 | if (ret) |
6347 | return ERR_PTR(ret); | |
4b46fce2 | 6348 | |
70c8a91c | 6349 | em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, |
cc95bef6 | 6350 | ins.offset, ins.offset, ins.offset, 0); |
00361589 JB |
6351 | if (IS_ERR(em)) { |
6352 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
6353 | return em; | |
6354 | } | |
4b46fce2 JB |
6355 | |
6356 | ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, | |
6357 | ins.offset, ins.offset, 0); | |
6358 | if (ret) { | |
6359 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset); | |
00361589 JB |
6360 | free_extent_map(em); |
6361 | return ERR_PTR(ret); | |
4b46fce2 | 6362 | } |
00361589 | 6363 | |
4b46fce2 JB |
6364 | return em; |
6365 | } | |
6366 | ||
46bfbb5c CM |
6367 | /* |
6368 | * returns 1 when the nocow is safe, < 1 on error, 0 if the | |
6369 | * block must be cow'd | |
6370 | */ | |
00361589 | 6371 | noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, |
7ee9e440 JB |
6372 | u64 *orig_start, u64 *orig_block_len, |
6373 | u64 *ram_bytes) | |
46bfbb5c | 6374 | { |
00361589 | 6375 | struct btrfs_trans_handle *trans; |
46bfbb5c CM |
6376 | struct btrfs_path *path; |
6377 | int ret; | |
6378 | struct extent_buffer *leaf; | |
6379 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6380 | struct btrfs_file_extent_item *fi; | |
6381 | struct btrfs_key key; | |
6382 | u64 disk_bytenr; | |
6383 | u64 backref_offset; | |
6384 | u64 extent_end; | |
6385 | u64 num_bytes; | |
6386 | int slot; | |
6387 | int found_type; | |
7ee9e440 | 6388 | bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); |
46bfbb5c CM |
6389 | path = btrfs_alloc_path(); |
6390 | if (!path) | |
6391 | return -ENOMEM; | |
6392 | ||
00361589 | 6393 | ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), |
46bfbb5c CM |
6394 | offset, 0); |
6395 | if (ret < 0) | |
6396 | goto out; | |
6397 | ||
6398 | slot = path->slots[0]; | |
6399 | if (ret == 1) { | |
6400 | if (slot == 0) { | |
6401 | /* can't find the item, must cow */ | |
6402 | ret = 0; | |
6403 | goto out; | |
6404 | } | |
6405 | slot--; | |
6406 | } | |
6407 | ret = 0; | |
6408 | leaf = path->nodes[0]; | |
6409 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
33345d01 | 6410 | if (key.objectid != btrfs_ino(inode) || |
46bfbb5c CM |
6411 | key.type != BTRFS_EXTENT_DATA_KEY) { |
6412 | /* not our file or wrong item type, must cow */ | |
6413 | goto out; | |
6414 | } | |
6415 | ||
6416 | if (key.offset > offset) { | |
6417 | /* Wrong offset, must cow */ | |
6418 | goto out; | |
6419 | } | |
6420 | ||
6421 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
6422 | found_type = btrfs_file_extent_type(leaf, fi); | |
6423 | if (found_type != BTRFS_FILE_EXTENT_REG && | |
6424 | found_type != BTRFS_FILE_EXTENT_PREALLOC) { | |
6425 | /* not a regular extent, must cow */ | |
6426 | goto out; | |
6427 | } | |
7ee9e440 JB |
6428 | |
6429 | if (!nocow && found_type == BTRFS_FILE_EXTENT_REG) | |
6430 | goto out; | |
6431 | ||
46bfbb5c | 6432 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
7ee9e440 JB |
6433 | if (disk_bytenr == 0) |
6434 | goto out; | |
6435 | ||
6436 | if (btrfs_file_extent_compression(leaf, fi) || | |
6437 | btrfs_file_extent_encryption(leaf, fi) || | |
6438 | btrfs_file_extent_other_encoding(leaf, fi)) | |
6439 | goto out; | |
6440 | ||
46bfbb5c CM |
6441 | backref_offset = btrfs_file_extent_offset(leaf, fi); |
6442 | ||
7ee9e440 JB |
6443 | if (orig_start) { |
6444 | *orig_start = key.offset - backref_offset; | |
6445 | *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); | |
6446 | *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); | |
6447 | } | |
eb384b55 | 6448 | |
46bfbb5c | 6449 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); |
46bfbb5c CM |
6450 | |
6451 | if (btrfs_extent_readonly(root, disk_bytenr)) | |
6452 | goto out; | |
1bda19eb | 6453 | btrfs_release_path(path); |
46bfbb5c CM |
6454 | |
6455 | /* | |
6456 | * look for other files referencing this extent, if we | |
6457 | * find any we must cow | |
6458 | */ | |
00361589 JB |
6459 | trans = btrfs_join_transaction(root); |
6460 | if (IS_ERR(trans)) { | |
6461 | ret = 0; | |
46bfbb5c | 6462 | goto out; |
00361589 JB |
6463 | } |
6464 | ||
6465 | ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), | |
6466 | key.offset - backref_offset, disk_bytenr); | |
6467 | btrfs_end_transaction(trans, root); | |
6468 | if (ret) { | |
6469 | ret = 0; | |
6470 | goto out; | |
6471 | } | |
46bfbb5c CM |
6472 | |
6473 | /* | |
6474 | * adjust disk_bytenr and num_bytes to cover just the bytes | |
6475 | * in this extent we are about to write. If there | |
6476 | * are any csums in that range we have to cow in order | |
6477 | * to keep the csums correct | |
6478 | */ | |
6479 | disk_bytenr += backref_offset; | |
6480 | disk_bytenr += offset - key.offset; | |
eb384b55 | 6481 | num_bytes = min(offset + *len, extent_end) - offset; |
46bfbb5c CM |
6482 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) |
6483 | goto out; | |
6484 | /* | |
6485 | * all of the above have passed, it is safe to overwrite this extent | |
6486 | * without cow | |
6487 | */ | |
eb384b55 | 6488 | *len = num_bytes; |
46bfbb5c CM |
6489 | ret = 1; |
6490 | out: | |
6491 | btrfs_free_path(path); | |
6492 | return ret; | |
6493 | } | |
6494 | ||
eb838e73 JB |
6495 | static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, |
6496 | struct extent_state **cached_state, int writing) | |
6497 | { | |
6498 | struct btrfs_ordered_extent *ordered; | |
6499 | int ret = 0; | |
6500 | ||
6501 | while (1) { | |
6502 | lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
6503 | 0, cached_state); | |
6504 | /* | |
6505 | * We're concerned with the entire range that we're going to be | |
6506 | * doing DIO to, so we need to make sure theres no ordered | |
6507 | * extents in this range. | |
6508 | */ | |
6509 | ordered = btrfs_lookup_ordered_range(inode, lockstart, | |
6510 | lockend - lockstart + 1); | |
6511 | ||
6512 | /* | |
6513 | * We need to make sure there are no buffered pages in this | |
6514 | * range either, we could have raced between the invalidate in | |
6515 | * generic_file_direct_write and locking the extent. The | |
6516 | * invalidate needs to happen so that reads after a write do not | |
6517 | * get stale data. | |
6518 | */ | |
6519 | if (!ordered && (!writing || | |
6520 | !test_range_bit(&BTRFS_I(inode)->io_tree, | |
6521 | lockstart, lockend, EXTENT_UPTODATE, 0, | |
6522 | *cached_state))) | |
6523 | break; | |
6524 | ||
6525 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, | |
6526 | cached_state, GFP_NOFS); | |
6527 | ||
6528 | if (ordered) { | |
6529 | btrfs_start_ordered_extent(inode, ordered, 1); | |
6530 | btrfs_put_ordered_extent(ordered); | |
6531 | } else { | |
6532 | /* Screw you mmap */ | |
6533 | ret = filemap_write_and_wait_range(inode->i_mapping, | |
6534 | lockstart, | |
6535 | lockend); | |
6536 | if (ret) | |
6537 | break; | |
6538 | ||
6539 | /* | |
6540 | * If we found a page that couldn't be invalidated just | |
6541 | * fall back to buffered. | |
6542 | */ | |
6543 | ret = invalidate_inode_pages2_range(inode->i_mapping, | |
6544 | lockstart >> PAGE_CACHE_SHIFT, | |
6545 | lockend >> PAGE_CACHE_SHIFT); | |
6546 | if (ret) | |
6547 | break; | |
6548 | } | |
6549 | ||
6550 | cond_resched(); | |
6551 | } | |
6552 | ||
6553 | return ret; | |
6554 | } | |
6555 | ||
69ffb543 JB |
6556 | static struct extent_map *create_pinned_em(struct inode *inode, u64 start, |
6557 | u64 len, u64 orig_start, | |
6558 | u64 block_start, u64 block_len, | |
cc95bef6 JB |
6559 | u64 orig_block_len, u64 ram_bytes, |
6560 | int type) | |
69ffb543 JB |
6561 | { |
6562 | struct extent_map_tree *em_tree; | |
6563 | struct extent_map *em; | |
6564 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6565 | int ret; | |
6566 | ||
6567 | em_tree = &BTRFS_I(inode)->extent_tree; | |
6568 | em = alloc_extent_map(); | |
6569 | if (!em) | |
6570 | return ERR_PTR(-ENOMEM); | |
6571 | ||
6572 | em->start = start; | |
6573 | em->orig_start = orig_start; | |
2ab28f32 JB |
6574 | em->mod_start = start; |
6575 | em->mod_len = len; | |
69ffb543 JB |
6576 | em->len = len; |
6577 | em->block_len = block_len; | |
6578 | em->block_start = block_start; | |
6579 | em->bdev = root->fs_info->fs_devices->latest_bdev; | |
b4939680 | 6580 | em->orig_block_len = orig_block_len; |
cc95bef6 | 6581 | em->ram_bytes = ram_bytes; |
70c8a91c | 6582 | em->generation = -1; |
69ffb543 JB |
6583 | set_bit(EXTENT_FLAG_PINNED, &em->flags); |
6584 | if (type == BTRFS_ORDERED_PREALLOC) | |
b11e234d | 6585 | set_bit(EXTENT_FLAG_FILLING, &em->flags); |
69ffb543 JB |
6586 | |
6587 | do { | |
6588 | btrfs_drop_extent_cache(inode, em->start, | |
6589 | em->start + em->len - 1, 0); | |
6590 | write_lock(&em_tree->lock); | |
09a2a8f9 | 6591 | ret = add_extent_mapping(em_tree, em, 1); |
69ffb543 JB |
6592 | write_unlock(&em_tree->lock); |
6593 | } while (ret == -EEXIST); | |
6594 | ||
6595 | if (ret) { | |
6596 | free_extent_map(em); | |
6597 | return ERR_PTR(ret); | |
6598 | } | |
6599 | ||
6600 | return em; | |
6601 | } | |
6602 | ||
6603 | ||
4b46fce2 JB |
6604 | static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
6605 | struct buffer_head *bh_result, int create) | |
6606 | { | |
6607 | struct extent_map *em; | |
6608 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
eb838e73 | 6609 | struct extent_state *cached_state = NULL; |
4b46fce2 | 6610 | u64 start = iblock << inode->i_blkbits; |
eb838e73 | 6611 | u64 lockstart, lockend; |
4b46fce2 | 6612 | u64 len = bh_result->b_size; |
eb838e73 | 6613 | int unlock_bits = EXTENT_LOCKED; |
0934856d | 6614 | int ret = 0; |
eb838e73 | 6615 | |
172a5049 | 6616 | if (create) |
eb838e73 | 6617 | unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; |
172a5049 | 6618 | else |
c329861d | 6619 | len = min_t(u64, len, root->sectorsize); |
eb838e73 | 6620 | |
c329861d JB |
6621 | lockstart = start; |
6622 | lockend = start + len - 1; | |
6623 | ||
eb838e73 JB |
6624 | /* |
6625 | * If this errors out it's because we couldn't invalidate pagecache for | |
6626 | * this range and we need to fallback to buffered. | |
6627 | */ | |
6628 | if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) | |
6629 | return -ENOTBLK; | |
6630 | ||
4b46fce2 | 6631 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); |
eb838e73 JB |
6632 | if (IS_ERR(em)) { |
6633 | ret = PTR_ERR(em); | |
6634 | goto unlock_err; | |
6635 | } | |
4b46fce2 JB |
6636 | |
6637 | /* | |
6638 | * Ok for INLINE and COMPRESSED extents we need to fallback on buffered | |
6639 | * io. INLINE is special, and we could probably kludge it in here, but | |
6640 | * it's still buffered so for safety lets just fall back to the generic | |
6641 | * buffered path. | |
6642 | * | |
6643 | * For COMPRESSED we _have_ to read the entire extent in so we can | |
6644 | * decompress it, so there will be buffering required no matter what we | |
6645 | * do, so go ahead and fallback to buffered. | |
6646 | * | |
6647 | * We return -ENOTBLK because thats what makes DIO go ahead and go back | |
6648 | * to buffered IO. Don't blame me, this is the price we pay for using | |
6649 | * the generic code. | |
6650 | */ | |
6651 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || | |
6652 | em->block_start == EXTENT_MAP_INLINE) { | |
6653 | free_extent_map(em); | |
eb838e73 JB |
6654 | ret = -ENOTBLK; |
6655 | goto unlock_err; | |
4b46fce2 JB |
6656 | } |
6657 | ||
6658 | /* Just a good old fashioned hole, return */ | |
6659 | if (!create && (em->block_start == EXTENT_MAP_HOLE || | |
6660 | test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | |
6661 | free_extent_map(em); | |
eb838e73 | 6662 | goto unlock_err; |
4b46fce2 JB |
6663 | } |
6664 | ||
6665 | /* | |
6666 | * We don't allocate a new extent in the following cases | |
6667 | * | |
6668 | * 1) The inode is marked as NODATACOW. In this case we'll just use the | |
6669 | * existing extent. | |
6670 | * 2) The extent is marked as PREALLOC. We're good to go here and can | |
6671 | * just use the extent. | |
6672 | * | |
6673 | */ | |
46bfbb5c | 6674 | if (!create) { |
eb838e73 JB |
6675 | len = min(len, em->len - (start - em->start)); |
6676 | lockstart = start + len; | |
6677 | goto unlock; | |
46bfbb5c | 6678 | } |
4b46fce2 JB |
6679 | |
6680 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || | |
6681 | ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && | |
6682 | em->block_start != EXTENT_MAP_HOLE)) { | |
4b46fce2 JB |
6683 | int type; |
6684 | int ret; | |
eb384b55 | 6685 | u64 block_start, orig_start, orig_block_len, ram_bytes; |
4b46fce2 JB |
6686 | |
6687 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
6688 | type = BTRFS_ORDERED_PREALLOC; | |
6689 | else | |
6690 | type = BTRFS_ORDERED_NOCOW; | |
46bfbb5c | 6691 | len = min(len, em->len - (start - em->start)); |
4b46fce2 | 6692 | block_start = em->block_start + (start - em->start); |
46bfbb5c | 6693 | |
00361589 | 6694 | if (can_nocow_extent(inode, start, &len, &orig_start, |
7ee9e440 | 6695 | &orig_block_len, &ram_bytes) == 1) { |
69ffb543 JB |
6696 | if (type == BTRFS_ORDERED_PREALLOC) { |
6697 | free_extent_map(em); | |
6698 | em = create_pinned_em(inode, start, len, | |
6699 | orig_start, | |
b4939680 | 6700 | block_start, len, |
cc95bef6 JB |
6701 | orig_block_len, |
6702 | ram_bytes, type); | |
00361589 | 6703 | if (IS_ERR(em)) |
69ffb543 | 6704 | goto unlock_err; |
69ffb543 JB |
6705 | } |
6706 | ||
46bfbb5c CM |
6707 | ret = btrfs_add_ordered_extent_dio(inode, start, |
6708 | block_start, len, len, type); | |
46bfbb5c CM |
6709 | if (ret) { |
6710 | free_extent_map(em); | |
eb838e73 | 6711 | goto unlock_err; |
46bfbb5c CM |
6712 | } |
6713 | goto unlock; | |
4b46fce2 | 6714 | } |
4b46fce2 | 6715 | } |
00361589 | 6716 | |
46bfbb5c CM |
6717 | /* |
6718 | * this will cow the extent, reset the len in case we changed | |
6719 | * it above | |
6720 | */ | |
6721 | len = bh_result->b_size; | |
70c8a91c JB |
6722 | free_extent_map(em); |
6723 | em = btrfs_new_extent_direct(inode, start, len); | |
eb838e73 JB |
6724 | if (IS_ERR(em)) { |
6725 | ret = PTR_ERR(em); | |
6726 | goto unlock_err; | |
6727 | } | |
46bfbb5c CM |
6728 | len = min(len, em->len - (start - em->start)); |
6729 | unlock: | |
4b46fce2 JB |
6730 | bh_result->b_blocknr = (em->block_start + (start - em->start)) >> |
6731 | inode->i_blkbits; | |
46bfbb5c | 6732 | bh_result->b_size = len; |
4b46fce2 JB |
6733 | bh_result->b_bdev = em->bdev; |
6734 | set_buffer_mapped(bh_result); | |
c3473e83 JB |
6735 | if (create) { |
6736 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) | |
6737 | set_buffer_new(bh_result); | |
6738 | ||
6739 | /* | |
6740 | * Need to update the i_size under the extent lock so buffered | |
6741 | * readers will get the updated i_size when we unlock. | |
6742 | */ | |
6743 | if (start + len > i_size_read(inode)) | |
6744 | i_size_write(inode, start + len); | |
0934856d | 6745 | |
172a5049 MX |
6746 | spin_lock(&BTRFS_I(inode)->lock); |
6747 | BTRFS_I(inode)->outstanding_extents++; | |
6748 | spin_unlock(&BTRFS_I(inode)->lock); | |
6749 | ||
0934856d MX |
6750 | ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, |
6751 | lockstart + len - 1, EXTENT_DELALLOC, NULL, | |
6752 | &cached_state, GFP_NOFS); | |
6753 | BUG_ON(ret); | |
c3473e83 | 6754 | } |
4b46fce2 | 6755 | |
eb838e73 JB |
6756 | /* |
6757 | * In the case of write we need to clear and unlock the entire range, | |
6758 | * in the case of read we need to unlock only the end area that we | |
6759 | * aren't using if there is any left over space. | |
6760 | */ | |
24c03fa5 | 6761 | if (lockstart < lockend) { |
0934856d MX |
6762 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, |
6763 | lockend, unlock_bits, 1, 0, | |
6764 | &cached_state, GFP_NOFS); | |
24c03fa5 | 6765 | } else { |
eb838e73 | 6766 | free_extent_state(cached_state); |
24c03fa5 | 6767 | } |
eb838e73 | 6768 | |
4b46fce2 JB |
6769 | free_extent_map(em); |
6770 | ||
6771 | return 0; | |
eb838e73 JB |
6772 | |
6773 | unlock_err: | |
eb838e73 JB |
6774 | clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, |
6775 | unlock_bits, 1, 0, &cached_state, GFP_NOFS); | |
6776 | return ret; | |
4b46fce2 JB |
6777 | } |
6778 | ||
4b46fce2 JB |
6779 | static void btrfs_endio_direct_read(struct bio *bio, int err) |
6780 | { | |
e65e1535 | 6781 | struct btrfs_dio_private *dip = bio->bi_private; |
4b46fce2 JB |
6782 | struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; |
6783 | struct bio_vec *bvec = bio->bi_io_vec; | |
4b46fce2 JB |
6784 | struct inode *inode = dip->inode; |
6785 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
9be3395b | 6786 | struct bio *dio_bio; |
facc8a22 MX |
6787 | u32 *csums = (u32 *)dip->csum; |
6788 | int index = 0; | |
4b46fce2 | 6789 | u64 start; |
4b46fce2 JB |
6790 | |
6791 | start = dip->logical_offset; | |
6792 | do { | |
6793 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { | |
6794 | struct page *page = bvec->bv_page; | |
6795 | char *kaddr; | |
6796 | u32 csum = ~(u32)0; | |
6797 | unsigned long flags; | |
6798 | ||
6799 | local_irq_save(flags); | |
7ac687d9 | 6800 | kaddr = kmap_atomic(page); |
b0496686 | 6801 | csum = btrfs_csum_data(kaddr + bvec->bv_offset, |
4b46fce2 JB |
6802 | csum, bvec->bv_len); |
6803 | btrfs_csum_final(csum, (char *)&csum); | |
7ac687d9 | 6804 | kunmap_atomic(kaddr); |
4b46fce2 JB |
6805 | local_irq_restore(flags); |
6806 | ||
6807 | flush_dcache_page(bvec->bv_page); | |
facc8a22 MX |
6808 | if (csum != csums[index]) { |
6809 | btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", | |
c1c9ff7c GU |
6810 | btrfs_ino(inode), start, csum, |
6811 | csums[index]); | |
4b46fce2 JB |
6812 | err = -EIO; |
6813 | } | |
6814 | } | |
6815 | ||
6816 | start += bvec->bv_len; | |
4b46fce2 | 6817 | bvec++; |
facc8a22 | 6818 | index++; |
4b46fce2 JB |
6819 | } while (bvec <= bvec_end); |
6820 | ||
6821 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, | |
d0082371 | 6822 | dip->logical_offset + dip->bytes - 1); |
9be3395b | 6823 | dio_bio = dip->dio_bio; |
4b46fce2 | 6824 | |
4b46fce2 | 6825 | kfree(dip); |
c0da7aa1 JB |
6826 | |
6827 | /* If we had a csum failure make sure to clear the uptodate flag */ | |
6828 | if (err) | |
9be3395b CM |
6829 | clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); |
6830 | dio_end_io(dio_bio, err); | |
6831 | bio_put(bio); | |
4b46fce2 JB |
6832 | } |
6833 | ||
6834 | static void btrfs_endio_direct_write(struct bio *bio, int err) | |
6835 | { | |
6836 | struct btrfs_dio_private *dip = bio->bi_private; | |
6837 | struct inode *inode = dip->inode; | |
6838 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4b46fce2 | 6839 | struct btrfs_ordered_extent *ordered = NULL; |
163cf09c CM |
6840 | u64 ordered_offset = dip->logical_offset; |
6841 | u64 ordered_bytes = dip->bytes; | |
9be3395b | 6842 | struct bio *dio_bio; |
4b46fce2 JB |
6843 | int ret; |
6844 | ||
6845 | if (err) | |
6846 | goto out_done; | |
163cf09c CM |
6847 | again: |
6848 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | |
6849 | &ordered_offset, | |
5fd02043 | 6850 | ordered_bytes, !err); |
4b46fce2 | 6851 | if (!ret) |
163cf09c | 6852 | goto out_test; |
4b46fce2 | 6853 | |
5fd02043 JB |
6854 | ordered->work.func = finish_ordered_fn; |
6855 | ordered->work.flags = 0; | |
6856 | btrfs_queue_worker(&root->fs_info->endio_write_workers, | |
6857 | &ordered->work); | |
163cf09c CM |
6858 | out_test: |
6859 | /* | |
6860 | * our bio might span multiple ordered extents. If we haven't | |
6861 | * completed the accounting for the whole dio, go back and try again | |
6862 | */ | |
6863 | if (ordered_offset < dip->logical_offset + dip->bytes) { | |
6864 | ordered_bytes = dip->logical_offset + dip->bytes - | |
6865 | ordered_offset; | |
5fd02043 | 6866 | ordered = NULL; |
163cf09c CM |
6867 | goto again; |
6868 | } | |
4b46fce2 | 6869 | out_done: |
9be3395b | 6870 | dio_bio = dip->dio_bio; |
4b46fce2 | 6871 | |
4b46fce2 | 6872 | kfree(dip); |
c0da7aa1 JB |
6873 | |
6874 | /* If we had an error make sure to clear the uptodate flag */ | |
6875 | if (err) | |
9be3395b CM |
6876 | clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); |
6877 | dio_end_io(dio_bio, err); | |
6878 | bio_put(bio); | |
4b46fce2 JB |
6879 | } |
6880 | ||
eaf25d93 CM |
6881 | static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, |
6882 | struct bio *bio, int mirror_num, | |
6883 | unsigned long bio_flags, u64 offset) | |
6884 | { | |
6885 | int ret; | |
6886 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6887 | ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); | |
79787eaa | 6888 | BUG_ON(ret); /* -ENOMEM */ |
eaf25d93 CM |
6889 | return 0; |
6890 | } | |
6891 | ||
e65e1535 MX |
6892 | static void btrfs_end_dio_bio(struct bio *bio, int err) |
6893 | { | |
6894 | struct btrfs_dio_private *dip = bio->bi_private; | |
6895 | ||
6896 | if (err) { | |
33345d01 | 6897 | printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " |
3dd1462e | 6898 | "sector %#Lx len %u err no %d\n", |
c1c9ff7c | 6899 | btrfs_ino(dip->inode), bio->bi_rw, |
3dd1462e | 6900 | (unsigned long long)bio->bi_sector, bio->bi_size, err); |
e65e1535 MX |
6901 | dip->errors = 1; |
6902 | ||
6903 | /* | |
6904 | * before atomic variable goto zero, we must make sure | |
6905 | * dip->errors is perceived to be set. | |
6906 | */ | |
6907 | smp_mb__before_atomic_dec(); | |
6908 | } | |
6909 | ||
6910 | /* if there are more bios still pending for this dio, just exit */ | |
6911 | if (!atomic_dec_and_test(&dip->pending_bios)) | |
6912 | goto out; | |
6913 | ||
9be3395b | 6914 | if (dip->errors) { |
e65e1535 | 6915 | bio_io_error(dip->orig_bio); |
9be3395b CM |
6916 | } else { |
6917 | set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); | |
e65e1535 MX |
6918 | bio_endio(dip->orig_bio, 0); |
6919 | } | |
6920 | out: | |
6921 | bio_put(bio); | |
6922 | } | |
6923 | ||
6924 | static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, | |
6925 | u64 first_sector, gfp_t gfp_flags) | |
6926 | { | |
6927 | int nr_vecs = bio_get_nr_vecs(bdev); | |
6928 | return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); | |
6929 | } | |
6930 | ||
6931 | static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, | |
6932 | int rw, u64 file_offset, int skip_sum, | |
c329861d | 6933 | int async_submit) |
e65e1535 | 6934 | { |
facc8a22 | 6935 | struct btrfs_dio_private *dip = bio->bi_private; |
e65e1535 MX |
6936 | int write = rw & REQ_WRITE; |
6937 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
6938 | int ret; | |
6939 | ||
b812ce28 JB |
6940 | if (async_submit) |
6941 | async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); | |
6942 | ||
e65e1535 | 6943 | bio_get(bio); |
5fd02043 JB |
6944 | |
6945 | if (!write) { | |
6946 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | |
6947 | if (ret) | |
6948 | goto err; | |
6949 | } | |
e65e1535 | 6950 | |
1ae39938 JB |
6951 | if (skip_sum) |
6952 | goto map; | |
6953 | ||
6954 | if (write && async_submit) { | |
e65e1535 MX |
6955 | ret = btrfs_wq_submit_bio(root->fs_info, |
6956 | inode, rw, bio, 0, 0, | |
6957 | file_offset, | |
6958 | __btrfs_submit_bio_start_direct_io, | |
6959 | __btrfs_submit_bio_done); | |
6960 | goto err; | |
1ae39938 JB |
6961 | } else if (write) { |
6962 | /* | |
6963 | * If we aren't doing async submit, calculate the csum of the | |
6964 | * bio now. | |
6965 | */ | |
6966 | ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); | |
6967 | if (ret) | |
6968 | goto err; | |
c2db1073 | 6969 | } else if (!skip_sum) { |
facc8a22 MX |
6970 | ret = btrfs_lookup_bio_sums_dio(root, inode, dip, bio, |
6971 | file_offset); | |
c2db1073 TI |
6972 | if (ret) |
6973 | goto err; | |
6974 | } | |
e65e1535 | 6975 | |
1ae39938 JB |
6976 | map: |
6977 | ret = btrfs_map_bio(root, rw, bio, 0, async_submit); | |
e65e1535 MX |
6978 | err: |
6979 | bio_put(bio); | |
6980 | return ret; | |
6981 | } | |
6982 | ||
6983 | static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, | |
6984 | int skip_sum) | |
6985 | { | |
6986 | struct inode *inode = dip->inode; | |
6987 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
e65e1535 MX |
6988 | struct bio *bio; |
6989 | struct bio *orig_bio = dip->orig_bio; | |
6990 | struct bio_vec *bvec = orig_bio->bi_io_vec; | |
6991 | u64 start_sector = orig_bio->bi_sector; | |
6992 | u64 file_offset = dip->logical_offset; | |
6993 | u64 submit_len = 0; | |
6994 | u64 map_length; | |
6995 | int nr_pages = 0; | |
e65e1535 | 6996 | int ret = 0; |
1ae39938 | 6997 | int async_submit = 0; |
e65e1535 | 6998 | |
e65e1535 | 6999 | map_length = orig_bio->bi_size; |
53b381b3 | 7000 | ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, |
e65e1535 MX |
7001 | &map_length, NULL, 0); |
7002 | if (ret) { | |
64728bbb | 7003 | bio_put(orig_bio); |
e65e1535 MX |
7004 | return -EIO; |
7005 | } | |
facc8a22 | 7006 | |
02f57c7a JB |
7007 | if (map_length >= orig_bio->bi_size) { |
7008 | bio = orig_bio; | |
7009 | goto submit; | |
7010 | } | |
7011 | ||
53b381b3 DW |
7012 | /* async crcs make it difficult to collect full stripe writes. */ |
7013 | if (btrfs_get_alloc_profile(root, 1) & | |
7014 | (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) | |
7015 | async_submit = 0; | |
7016 | else | |
7017 | async_submit = 1; | |
7018 | ||
02f57c7a JB |
7019 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); |
7020 | if (!bio) | |
7021 | return -ENOMEM; | |
7022 | bio->bi_private = dip; | |
7023 | bio->bi_end_io = btrfs_end_dio_bio; | |
7024 | atomic_inc(&dip->pending_bios); | |
7025 | ||
e65e1535 MX |
7026 | while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { |
7027 | if (unlikely(map_length < submit_len + bvec->bv_len || | |
7028 | bio_add_page(bio, bvec->bv_page, bvec->bv_len, | |
7029 | bvec->bv_offset) < bvec->bv_len)) { | |
7030 | /* | |
7031 | * inc the count before we submit the bio so | |
7032 | * we know the end IO handler won't happen before | |
7033 | * we inc the count. Otherwise, the dip might get freed | |
7034 | * before we're done setting it up | |
7035 | */ | |
7036 | atomic_inc(&dip->pending_bios); | |
7037 | ret = __btrfs_submit_dio_bio(bio, inode, rw, | |
7038 | file_offset, skip_sum, | |
c329861d | 7039 | async_submit); |
e65e1535 MX |
7040 | if (ret) { |
7041 | bio_put(bio); | |
7042 | atomic_dec(&dip->pending_bios); | |
7043 | goto out_err; | |
7044 | } | |
7045 | ||
e65e1535 MX |
7046 | start_sector += submit_len >> 9; |
7047 | file_offset += submit_len; | |
7048 | ||
7049 | submit_len = 0; | |
7050 | nr_pages = 0; | |
7051 | ||
7052 | bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, | |
7053 | start_sector, GFP_NOFS); | |
7054 | if (!bio) | |
7055 | goto out_err; | |
7056 | bio->bi_private = dip; | |
7057 | bio->bi_end_io = btrfs_end_dio_bio; | |
7058 | ||
7059 | map_length = orig_bio->bi_size; | |
53b381b3 | 7060 | ret = btrfs_map_block(root->fs_info, rw, |
3ec706c8 | 7061 | start_sector << 9, |
e65e1535 MX |
7062 | &map_length, NULL, 0); |
7063 | if (ret) { | |
7064 | bio_put(bio); | |
7065 | goto out_err; | |
7066 | } | |
7067 | } else { | |
7068 | submit_len += bvec->bv_len; | |
7069 | nr_pages ++; | |
7070 | bvec++; | |
7071 | } | |
7072 | } | |
7073 | ||
02f57c7a | 7074 | submit: |
e65e1535 | 7075 | ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, |
c329861d | 7076 | async_submit); |
e65e1535 MX |
7077 | if (!ret) |
7078 | return 0; | |
7079 | ||
7080 | bio_put(bio); | |
7081 | out_err: | |
7082 | dip->errors = 1; | |
7083 | /* | |
7084 | * before atomic variable goto zero, we must | |
7085 | * make sure dip->errors is perceived to be set. | |
7086 | */ | |
7087 | smp_mb__before_atomic_dec(); | |
7088 | if (atomic_dec_and_test(&dip->pending_bios)) | |
7089 | bio_io_error(dip->orig_bio); | |
7090 | ||
7091 | /* bio_end_io() will handle error, so we needn't return it */ | |
7092 | return 0; | |
7093 | } | |
7094 | ||
9be3395b CM |
7095 | static void btrfs_submit_direct(int rw, struct bio *dio_bio, |
7096 | struct inode *inode, loff_t file_offset) | |
4b46fce2 JB |
7097 | { |
7098 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
7099 | struct btrfs_dio_private *dip; | |
9be3395b | 7100 | struct bio *io_bio; |
4b46fce2 | 7101 | int skip_sum; |
facc8a22 | 7102 | int sum_len; |
7b6d91da | 7103 | int write = rw & REQ_WRITE; |
4b46fce2 | 7104 | int ret = 0; |
facc8a22 | 7105 | u16 csum_size; |
4b46fce2 JB |
7106 | |
7107 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | |
7108 | ||
9be3395b | 7109 | io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS); |
9be3395b CM |
7110 | if (!io_bio) { |
7111 | ret = -ENOMEM; | |
7112 | goto free_ordered; | |
7113 | } | |
7114 | ||
facc8a22 MX |
7115 | if (!skip_sum && !write) { |
7116 | csum_size = btrfs_super_csum_size(root->fs_info->super_copy); | |
7117 | sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; | |
7118 | sum_len *= csum_size; | |
7119 | } else { | |
7120 | sum_len = 0; | |
7121 | } | |
7122 | ||
7123 | dip = kmalloc(sizeof(*dip) + sum_len, GFP_NOFS); | |
4b46fce2 JB |
7124 | if (!dip) { |
7125 | ret = -ENOMEM; | |
9be3395b | 7126 | goto free_io_bio; |
4b46fce2 | 7127 | } |
4b46fce2 | 7128 | |
9be3395b | 7129 | dip->private = dio_bio->bi_private; |
4b46fce2 JB |
7130 | dip->inode = inode; |
7131 | dip->logical_offset = file_offset; | |
e6da5d2e | 7132 | dip->bytes = dio_bio->bi_size; |
9be3395b CM |
7133 | dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; |
7134 | io_bio->bi_private = dip; | |
e65e1535 | 7135 | dip->errors = 0; |
9be3395b CM |
7136 | dip->orig_bio = io_bio; |
7137 | dip->dio_bio = dio_bio; | |
e65e1535 | 7138 | atomic_set(&dip->pending_bios, 0); |
4b46fce2 JB |
7139 | |
7140 | if (write) | |
9be3395b | 7141 | io_bio->bi_end_io = btrfs_endio_direct_write; |
4b46fce2 | 7142 | else |
9be3395b | 7143 | io_bio->bi_end_io = btrfs_endio_direct_read; |
4b46fce2 | 7144 | |
e65e1535 MX |
7145 | ret = btrfs_submit_direct_hook(rw, dip, skip_sum); |
7146 | if (!ret) | |
eaf25d93 | 7147 | return; |
9be3395b CM |
7148 | |
7149 | free_io_bio: | |
7150 | bio_put(io_bio); | |
7151 | ||
4b46fce2 JB |
7152 | free_ordered: |
7153 | /* | |
7154 | * If this is a write, we need to clean up the reserved space and kill | |
7155 | * the ordered extent. | |
7156 | */ | |
7157 | if (write) { | |
7158 | struct btrfs_ordered_extent *ordered; | |
955256f2 | 7159 | ordered = btrfs_lookup_ordered_extent(inode, file_offset); |
4b46fce2 JB |
7160 | if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && |
7161 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) | |
7162 | btrfs_free_reserved_extent(root, ordered->start, | |
7163 | ordered->disk_len); | |
7164 | btrfs_put_ordered_extent(ordered); | |
7165 | btrfs_put_ordered_extent(ordered); | |
7166 | } | |
9be3395b | 7167 | bio_endio(dio_bio, ret); |
4b46fce2 JB |
7168 | } |
7169 | ||
5a5f79b5 CM |
7170 | static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, |
7171 | const struct iovec *iov, loff_t offset, | |
7172 | unsigned long nr_segs) | |
7173 | { | |
7174 | int seg; | |
a1b75f7d | 7175 | int i; |
5a5f79b5 CM |
7176 | size_t size; |
7177 | unsigned long addr; | |
7178 | unsigned blocksize_mask = root->sectorsize - 1; | |
7179 | ssize_t retval = -EINVAL; | |
7180 | loff_t end = offset; | |
7181 | ||
7182 | if (offset & blocksize_mask) | |
7183 | goto out; | |
7184 | ||
7185 | /* Check the memory alignment. Blocks cannot straddle pages */ | |
7186 | for (seg = 0; seg < nr_segs; seg++) { | |
7187 | addr = (unsigned long)iov[seg].iov_base; | |
7188 | size = iov[seg].iov_len; | |
7189 | end += size; | |
a1b75f7d | 7190 | if ((addr & blocksize_mask) || (size & blocksize_mask)) |
5a5f79b5 | 7191 | goto out; |
a1b75f7d JB |
7192 | |
7193 | /* If this is a write we don't need to check anymore */ | |
7194 | if (rw & WRITE) | |
7195 | continue; | |
7196 | ||
7197 | /* | |
7198 | * Check to make sure we don't have duplicate iov_base's in this | |
7199 | * iovec, if so return EINVAL, otherwise we'll get csum errors | |
7200 | * when reading back. | |
7201 | */ | |
7202 | for (i = seg + 1; i < nr_segs; i++) { | |
7203 | if (iov[seg].iov_base == iov[i].iov_base) | |
7204 | goto out; | |
7205 | } | |
5a5f79b5 CM |
7206 | } |
7207 | retval = 0; | |
7208 | out: | |
7209 | return retval; | |
7210 | } | |
eb838e73 | 7211 | |
16432985 CM |
7212 | static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, |
7213 | const struct iovec *iov, loff_t offset, | |
7214 | unsigned long nr_segs) | |
7215 | { | |
4b46fce2 JB |
7216 | struct file *file = iocb->ki_filp; |
7217 | struct inode *inode = file->f_mapping->host; | |
0934856d | 7218 | size_t count = 0; |
2e60a51e | 7219 | int flags = 0; |
38851cc1 MX |
7220 | bool wakeup = true; |
7221 | bool relock = false; | |
0934856d | 7222 | ssize_t ret; |
4b46fce2 | 7223 | |
5a5f79b5 | 7224 | if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, |
eb838e73 | 7225 | offset, nr_segs)) |
5a5f79b5 | 7226 | return 0; |
3f7c579c | 7227 | |
38851cc1 MX |
7228 | atomic_inc(&inode->i_dio_count); |
7229 | smp_mb__after_atomic_inc(); | |
7230 | ||
0e267c44 JB |
7231 | /* |
7232 | * The generic stuff only does filemap_write_and_wait_range, which isn't | |
7233 | * enough if we've written compressed pages to this area, so we need to | |
7234 | * call btrfs_wait_ordered_range to make absolutely sure that any | |
7235 | * outstanding dirty pages are on disk. | |
7236 | */ | |
7237 | count = iov_length(iov, nr_segs); | |
0ef8b726 JB |
7238 | ret = btrfs_wait_ordered_range(inode, offset, count); |
7239 | if (ret) | |
7240 | return ret; | |
0e267c44 | 7241 | |
0934856d | 7242 | if (rw & WRITE) { |
38851cc1 MX |
7243 | /* |
7244 | * If the write DIO is beyond the EOF, we need update | |
7245 | * the isize, but it is protected by i_mutex. So we can | |
7246 | * not unlock the i_mutex at this case. | |
7247 | */ | |
7248 | if (offset + count <= inode->i_size) { | |
7249 | mutex_unlock(&inode->i_mutex); | |
7250 | relock = true; | |
7251 | } | |
0934856d MX |
7252 | ret = btrfs_delalloc_reserve_space(inode, count); |
7253 | if (ret) | |
38851cc1 MX |
7254 | goto out; |
7255 | } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK, | |
7256 | &BTRFS_I(inode)->runtime_flags))) { | |
7257 | inode_dio_done(inode); | |
7258 | flags = DIO_LOCKING | DIO_SKIP_HOLES; | |
7259 | wakeup = false; | |
0934856d MX |
7260 | } |
7261 | ||
7262 | ret = __blockdev_direct_IO(rw, iocb, inode, | |
7263 | BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, | |
7264 | iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, | |
2e60a51e | 7265 | btrfs_submit_direct, flags); |
0934856d MX |
7266 | if (rw & WRITE) { |
7267 | if (ret < 0 && ret != -EIOCBQUEUED) | |
7268 | btrfs_delalloc_release_space(inode, count); | |
172a5049 | 7269 | else if (ret >= 0 && (size_t)ret < count) |
0934856d MX |
7270 | btrfs_delalloc_release_space(inode, |
7271 | count - (size_t)ret); | |
172a5049 MX |
7272 | else |
7273 | btrfs_delalloc_release_metadata(inode, 0); | |
0934856d | 7274 | } |
38851cc1 | 7275 | out: |
2e60a51e MX |
7276 | if (wakeup) |
7277 | inode_dio_done(inode); | |
38851cc1 MX |
7278 | if (relock) |
7279 | mutex_lock(&inode->i_mutex); | |
0934856d MX |
7280 | |
7281 | return ret; | |
16432985 CM |
7282 | } |
7283 | ||
05dadc09 TI |
7284 | #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) |
7285 | ||
1506fcc8 YS |
7286 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
7287 | __u64 start, __u64 len) | |
7288 | { | |
05dadc09 TI |
7289 | int ret; |
7290 | ||
7291 | ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS); | |
7292 | if (ret) | |
7293 | return ret; | |
7294 | ||
ec29ed5b | 7295 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
1506fcc8 YS |
7296 | } |
7297 | ||
a52d9a80 | 7298 | int btrfs_readpage(struct file *file, struct page *page) |
9ebefb18 | 7299 | { |
d1310b2e CM |
7300 | struct extent_io_tree *tree; |
7301 | tree = &BTRFS_I(page->mapping->host)->io_tree; | |
8ddc7d9c | 7302 | return extent_read_full_page(tree, page, btrfs_get_extent, 0); |
9ebefb18 | 7303 | } |
1832a6d5 | 7304 | |
a52d9a80 | 7305 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) |
39279cc3 | 7306 | { |
d1310b2e | 7307 | struct extent_io_tree *tree; |
b888db2b CM |
7308 | |
7309 | ||
7310 | if (current->flags & PF_MEMALLOC) { | |
7311 | redirty_page_for_writepage(wbc, page); | |
7312 | unlock_page(page); | |
7313 | return 0; | |
7314 | } | |
d1310b2e | 7315 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
a52d9a80 | 7316 | return extent_write_full_page(tree, page, btrfs_get_extent, wbc); |
9ebefb18 CM |
7317 | } |
7318 | ||
48a3b636 ES |
7319 | static int btrfs_writepages(struct address_space *mapping, |
7320 | struct writeback_control *wbc) | |
b293f02e | 7321 | { |
d1310b2e | 7322 | struct extent_io_tree *tree; |
771ed689 | 7323 | |
d1310b2e | 7324 | tree = &BTRFS_I(mapping->host)->io_tree; |
b293f02e CM |
7325 | return extent_writepages(tree, mapping, btrfs_get_extent, wbc); |
7326 | } | |
7327 | ||
3ab2fb5a CM |
7328 | static int |
7329 | btrfs_readpages(struct file *file, struct address_space *mapping, | |
7330 | struct list_head *pages, unsigned nr_pages) | |
7331 | { | |
d1310b2e CM |
7332 | struct extent_io_tree *tree; |
7333 | tree = &BTRFS_I(mapping->host)->io_tree; | |
3ab2fb5a CM |
7334 | return extent_readpages(tree, mapping, pages, nr_pages, |
7335 | btrfs_get_extent); | |
7336 | } | |
e6dcd2dc | 7337 | static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
9ebefb18 | 7338 | { |
d1310b2e CM |
7339 | struct extent_io_tree *tree; |
7340 | struct extent_map_tree *map; | |
a52d9a80 | 7341 | int ret; |
8c2383c3 | 7342 | |
d1310b2e CM |
7343 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
7344 | map = &BTRFS_I(page->mapping->host)->extent_tree; | |
70dec807 | 7345 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); |
a52d9a80 CM |
7346 | if (ret == 1) { |
7347 | ClearPagePrivate(page); | |
7348 | set_page_private(page, 0); | |
7349 | page_cache_release(page); | |
39279cc3 | 7350 | } |
a52d9a80 | 7351 | return ret; |
39279cc3 CM |
7352 | } |
7353 | ||
e6dcd2dc CM |
7354 | static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) |
7355 | { | |
98509cfc CM |
7356 | if (PageWriteback(page) || PageDirty(page)) |
7357 | return 0; | |
b335b003 | 7358 | return __btrfs_releasepage(page, gfp_flags & GFP_NOFS); |
e6dcd2dc CM |
7359 | } |
7360 | ||
d47992f8 LC |
7361 | static void btrfs_invalidatepage(struct page *page, unsigned int offset, |
7362 | unsigned int length) | |
39279cc3 | 7363 | { |
5fd02043 | 7364 | struct inode *inode = page->mapping->host; |
d1310b2e | 7365 | struct extent_io_tree *tree; |
e6dcd2dc | 7366 | struct btrfs_ordered_extent *ordered; |
2ac55d41 | 7367 | struct extent_state *cached_state = NULL; |
e6dcd2dc CM |
7368 | u64 page_start = page_offset(page); |
7369 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | |
39279cc3 | 7370 | |
8b62b72b CM |
7371 | /* |
7372 | * we have the page locked, so new writeback can't start, | |
7373 | * and the dirty bit won't be cleared while we are here. | |
7374 | * | |
7375 | * Wait for IO on this page so that we can safely clear | |
7376 | * the PagePrivate2 bit and do ordered accounting | |
7377 | */ | |
e6dcd2dc | 7378 | wait_on_page_writeback(page); |
8b62b72b | 7379 | |
5fd02043 | 7380 | tree = &BTRFS_I(inode)->io_tree; |
e6dcd2dc CM |
7381 | if (offset) { |
7382 | btrfs_releasepage(page, GFP_NOFS); | |
7383 | return; | |
7384 | } | |
d0082371 | 7385 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); |
4eee4fa4 | 7386 | ordered = btrfs_lookup_ordered_extent(inode, page_offset(page)); |
e6dcd2dc | 7387 | if (ordered) { |
eb84ae03 CM |
7388 | /* |
7389 | * IO on this page will never be started, so we need | |
7390 | * to account for any ordered extents now | |
7391 | */ | |
e6dcd2dc CM |
7392 | clear_extent_bit(tree, page_start, page_end, |
7393 | EXTENT_DIRTY | EXTENT_DELALLOC | | |
9e8a4a8b LB |
7394 | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | |
7395 | EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS); | |
8b62b72b CM |
7396 | /* |
7397 | * whoever cleared the private bit is responsible | |
7398 | * for the finish_ordered_io | |
7399 | */ | |
77cef2ec JB |
7400 | if (TestClearPagePrivate2(page)) { |
7401 | struct btrfs_ordered_inode_tree *tree; | |
7402 | u64 new_len; | |
7403 | ||
7404 | tree = &BTRFS_I(inode)->ordered_tree; | |
7405 | ||
7406 | spin_lock_irq(&tree->lock); | |
7407 | set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); | |
7408 | new_len = page_start - ordered->file_offset; | |
7409 | if (new_len < ordered->truncated_len) | |
7410 | ordered->truncated_len = new_len; | |
7411 | spin_unlock_irq(&tree->lock); | |
7412 | ||
7413 | if (btrfs_dec_test_ordered_pending(inode, &ordered, | |
7414 | page_start, | |
7415 | PAGE_CACHE_SIZE, 1)) | |
7416 | btrfs_finish_ordered_io(ordered); | |
8b62b72b | 7417 | } |
e6dcd2dc | 7418 | btrfs_put_ordered_extent(ordered); |
2ac55d41 | 7419 | cached_state = NULL; |
d0082371 | 7420 | lock_extent_bits(tree, page_start, page_end, 0, &cached_state); |
e6dcd2dc CM |
7421 | } |
7422 | clear_extent_bit(tree, page_start, page_end, | |
32c00aff | 7423 | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | |
9e8a4a8b LB |
7424 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1, |
7425 | &cached_state, GFP_NOFS); | |
e6dcd2dc CM |
7426 | __btrfs_releasepage(page, GFP_NOFS); |
7427 | ||
4a096752 | 7428 | ClearPageChecked(page); |
9ad6b7bc | 7429 | if (PagePrivate(page)) { |
9ad6b7bc CM |
7430 | ClearPagePrivate(page); |
7431 | set_page_private(page, 0); | |
7432 | page_cache_release(page); | |
7433 | } | |
39279cc3 CM |
7434 | } |
7435 | ||
9ebefb18 CM |
7436 | /* |
7437 | * btrfs_page_mkwrite() is not allowed to change the file size as it gets | |
7438 | * called from a page fault handler when a page is first dirtied. Hence we must | |
7439 | * be careful to check for EOF conditions here. We set the page up correctly | |
7440 | * for a written page which means we get ENOSPC checking when writing into | |
7441 | * holes and correct delalloc and unwritten extent mapping on filesystems that | |
7442 | * support these features. | |
7443 | * | |
7444 | * We are not allowed to take the i_mutex here so we have to play games to | |
7445 | * protect against truncate races as the page could now be beyond EOF. Because | |
7446 | * vmtruncate() writes the inode size before removing pages, once we have the | |
7447 | * page lock we can determine safely if the page is beyond EOF. If it is not | |
7448 | * beyond EOF, then the page is guaranteed safe against truncation until we | |
7449 | * unlock the page. | |
7450 | */ | |
c2ec175c | 7451 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
9ebefb18 | 7452 | { |
c2ec175c | 7453 | struct page *page = vmf->page; |
496ad9aa | 7454 | struct inode *inode = file_inode(vma->vm_file); |
1832a6d5 | 7455 | struct btrfs_root *root = BTRFS_I(inode)->root; |
e6dcd2dc CM |
7456 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
7457 | struct btrfs_ordered_extent *ordered; | |
2ac55d41 | 7458 | struct extent_state *cached_state = NULL; |
e6dcd2dc CM |
7459 | char *kaddr; |
7460 | unsigned long zero_start; | |
9ebefb18 | 7461 | loff_t size; |
1832a6d5 | 7462 | int ret; |
9998eb70 | 7463 | int reserved = 0; |
a52d9a80 | 7464 | u64 page_start; |
e6dcd2dc | 7465 | u64 page_end; |
9ebefb18 | 7466 | |
b2b5ef5c | 7467 | sb_start_pagefault(inode->i_sb); |
0ca1f7ce | 7468 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); |
9998eb70 | 7469 | if (!ret) { |
e41f941a | 7470 | ret = file_update_time(vma->vm_file); |
9998eb70 CM |
7471 | reserved = 1; |
7472 | } | |
56a76f82 NP |
7473 | if (ret) { |
7474 | if (ret == -ENOMEM) | |
7475 | ret = VM_FAULT_OOM; | |
7476 | else /* -ENOSPC, -EIO, etc */ | |
7477 | ret = VM_FAULT_SIGBUS; | |
9998eb70 CM |
7478 | if (reserved) |
7479 | goto out; | |
7480 | goto out_noreserve; | |
56a76f82 | 7481 | } |
1832a6d5 | 7482 | |
56a76f82 | 7483 | ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ |
e6dcd2dc | 7484 | again: |
9ebefb18 | 7485 | lock_page(page); |
9ebefb18 | 7486 | size = i_size_read(inode); |
e6dcd2dc CM |
7487 | page_start = page_offset(page); |
7488 | page_end = page_start + PAGE_CACHE_SIZE - 1; | |
a52d9a80 | 7489 | |
9ebefb18 | 7490 | if ((page->mapping != inode->i_mapping) || |
e6dcd2dc | 7491 | (page_start >= size)) { |
9ebefb18 CM |
7492 | /* page got truncated out from underneath us */ |
7493 | goto out_unlock; | |
7494 | } | |
e6dcd2dc CM |
7495 | wait_on_page_writeback(page); |
7496 | ||
d0082371 | 7497 | lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state); |
e6dcd2dc CM |
7498 | set_page_extent_mapped(page); |
7499 | ||
eb84ae03 CM |
7500 | /* |
7501 | * we can't set the delalloc bits if there are pending ordered | |
7502 | * extents. Drop our locks and wait for them to finish | |
7503 | */ | |
e6dcd2dc CM |
7504 | ordered = btrfs_lookup_ordered_extent(inode, page_start); |
7505 | if (ordered) { | |
2ac55d41 JB |
7506 | unlock_extent_cached(io_tree, page_start, page_end, |
7507 | &cached_state, GFP_NOFS); | |
e6dcd2dc | 7508 | unlock_page(page); |
eb84ae03 | 7509 | btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc CM |
7510 | btrfs_put_ordered_extent(ordered); |
7511 | goto again; | |
7512 | } | |
7513 | ||
fbf19087 JB |
7514 | /* |
7515 | * XXX - page_mkwrite gets called every time the page is dirtied, even | |
7516 | * if it was already dirty, so for space accounting reasons we need to | |
7517 | * clear any delalloc bits for the range we are fixing to save. There | |
7518 | * is probably a better way to do this, but for now keep consistent with | |
7519 | * prepare_pages in the normal write path. | |
7520 | */ | |
2ac55d41 | 7521 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, |
9e8a4a8b LB |
7522 | EXTENT_DIRTY | EXTENT_DELALLOC | |
7523 | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, | |
2ac55d41 | 7524 | 0, 0, &cached_state, GFP_NOFS); |
fbf19087 | 7525 | |
2ac55d41 JB |
7526 | ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
7527 | &cached_state); | |
9ed74f2d | 7528 | if (ret) { |
2ac55d41 JB |
7529 | unlock_extent_cached(io_tree, page_start, page_end, |
7530 | &cached_state, GFP_NOFS); | |
9ed74f2d JB |
7531 | ret = VM_FAULT_SIGBUS; |
7532 | goto out_unlock; | |
7533 | } | |
e6dcd2dc | 7534 | ret = 0; |
9ebefb18 CM |
7535 | |
7536 | /* page is wholly or partially inside EOF */ | |
a52d9a80 | 7537 | if (page_start + PAGE_CACHE_SIZE > size) |
e6dcd2dc | 7538 | zero_start = size & ~PAGE_CACHE_MASK; |
9ebefb18 | 7539 | else |
e6dcd2dc | 7540 | zero_start = PAGE_CACHE_SIZE; |
9ebefb18 | 7541 | |
e6dcd2dc CM |
7542 | if (zero_start != PAGE_CACHE_SIZE) { |
7543 | kaddr = kmap(page); | |
7544 | memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); | |
7545 | flush_dcache_page(page); | |
7546 | kunmap(page); | |
7547 | } | |
247e743c | 7548 | ClearPageChecked(page); |
e6dcd2dc | 7549 | set_page_dirty(page); |
50a9b214 | 7550 | SetPageUptodate(page); |
5a3f23d5 | 7551 | |
257c62e1 CM |
7552 | BTRFS_I(inode)->last_trans = root->fs_info->generation; |
7553 | BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; | |
46d8bc34 | 7554 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; |
257c62e1 | 7555 | |
2ac55d41 | 7556 | unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); |
9ebefb18 CM |
7557 | |
7558 | out_unlock: | |
b2b5ef5c JK |
7559 | if (!ret) { |
7560 | sb_end_pagefault(inode->i_sb); | |
50a9b214 | 7561 | return VM_FAULT_LOCKED; |
b2b5ef5c | 7562 | } |
9ebefb18 | 7563 | unlock_page(page); |
1832a6d5 | 7564 | out: |
ec39e180 | 7565 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); |
9998eb70 | 7566 | out_noreserve: |
b2b5ef5c | 7567 | sb_end_pagefault(inode->i_sb); |
9ebefb18 CM |
7568 | return ret; |
7569 | } | |
7570 | ||
a41ad394 | 7571 | static int btrfs_truncate(struct inode *inode) |
39279cc3 CM |
7572 | { |
7573 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
fcb80c2a | 7574 | struct btrfs_block_rsv *rsv; |
a71754fc | 7575 | int ret = 0; |
3893e33b | 7576 | int err = 0; |
39279cc3 | 7577 | struct btrfs_trans_handle *trans; |
dbe674a9 | 7578 | u64 mask = root->sectorsize - 1; |
07127184 | 7579 | u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); |
39279cc3 | 7580 | |
0ef8b726 JB |
7581 | ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), |
7582 | (u64)-1); | |
7583 | if (ret) | |
7584 | return ret; | |
39279cc3 | 7585 | |
fcb80c2a JB |
7586 | /* |
7587 | * Yes ladies and gentelment, this is indeed ugly. The fact is we have | |
7588 | * 3 things going on here | |
7589 | * | |
7590 | * 1) We need to reserve space for our orphan item and the space to | |
7591 | * delete our orphan item. Lord knows we don't want to have a dangling | |
7592 | * orphan item because we didn't reserve space to remove it. | |
7593 | * | |
7594 | * 2) We need to reserve space to update our inode. | |
7595 | * | |
7596 | * 3) We need to have something to cache all the space that is going to | |
7597 | * be free'd up by the truncate operation, but also have some slack | |
7598 | * space reserved in case it uses space during the truncate (thank you | |
7599 | * very much snapshotting). | |
7600 | * | |
7601 | * And we need these to all be seperate. The fact is we can use alot of | |
7602 | * space doing the truncate, and we have no earthly idea how much space | |
7603 | * we will use, so we need the truncate reservation to be seperate so it | |
7604 | * doesn't end up using space reserved for updating the inode or | |
7605 | * removing the orphan item. We also need to be able to stop the | |
7606 | * transaction and start a new one, which means we need to be able to | |
7607 | * update the inode several times, and we have no idea of knowing how | |
7608 | * many times that will be, so we can't just reserve 1 item for the | |
7609 | * entirety of the opration, so that has to be done seperately as well. | |
7610 | * Then there is the orphan item, which does indeed need to be held on | |
7611 | * to for the whole operation, and we need nobody to touch this reserved | |
7612 | * space except the orphan code. | |
7613 | * | |
7614 | * So that leaves us with | |
7615 | * | |
7616 | * 1) root->orphan_block_rsv - for the orphan deletion. | |
7617 | * 2) rsv - for the truncate reservation, which we will steal from the | |
7618 | * transaction reservation. | |
7619 | * 3) fs_info->trans_block_rsv - this will have 1 items worth left for | |
7620 | * updating the inode. | |
7621 | */ | |
66d8f3dd | 7622 | rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP); |
fcb80c2a JB |
7623 | if (!rsv) |
7624 | return -ENOMEM; | |
4a338542 | 7625 | rsv->size = min_size; |
ca7e70f5 | 7626 | rsv->failfast = 1; |
f0cd846e | 7627 | |
907cbceb | 7628 | /* |
07127184 | 7629 | * 1 for the truncate slack space |
907cbceb JB |
7630 | * 1 for updating the inode. |
7631 | */ | |
f3fe820c | 7632 | trans = btrfs_start_transaction(root, 2); |
fcb80c2a JB |
7633 | if (IS_ERR(trans)) { |
7634 | err = PTR_ERR(trans); | |
7635 | goto out; | |
7636 | } | |
f0cd846e | 7637 | |
907cbceb JB |
7638 | /* Migrate the slack space for the truncate to our reserve */ |
7639 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, | |
7640 | min_size); | |
fcb80c2a | 7641 | BUG_ON(ret); |
f0cd846e | 7642 | |
5a3f23d5 CM |
7643 | /* |
7644 | * setattr is responsible for setting the ordered_data_close flag, | |
7645 | * but that is only tested during the last file release. That | |
7646 | * could happen well after the next commit, leaving a great big | |
7647 | * window where new writes may get lost if someone chooses to write | |
7648 | * to this file after truncating to zero | |
7649 | * | |
7650 | * The inode doesn't have any dirty data here, and so if we commit | |
7651 | * this is a noop. If someone immediately starts writing to the inode | |
7652 | * it is very likely we'll catch some of their writes in this | |
7653 | * transaction, and the commit will find this file on the ordered | |
7654 | * data list with good things to send down. | |
7655 | * | |
7656 | * This is a best effort solution, there is still a window where | |
7657 | * using truncate to replace the contents of the file will | |
7658 | * end up with a zero length file after a crash. | |
7659 | */ | |
72ac3c0d JB |
7660 | if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, |
7661 | &BTRFS_I(inode)->runtime_flags)) | |
5a3f23d5 CM |
7662 | btrfs_add_ordered_operation(trans, root, inode); |
7663 | ||
5dc562c5 JB |
7664 | /* |
7665 | * So if we truncate and then write and fsync we normally would just | |
7666 | * write the extents that changed, which is a problem if we need to | |
7667 | * first truncate that entire inode. So set this flag so we write out | |
7668 | * all of the extents in the inode to the sync log so we're completely | |
7669 | * safe. | |
7670 | */ | |
7671 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); | |
ca7e70f5 | 7672 | trans->block_rsv = rsv; |
907cbceb | 7673 | |
8082510e YZ |
7674 | while (1) { |
7675 | ret = btrfs_truncate_inode_items(trans, root, inode, | |
7676 | inode->i_size, | |
7677 | BTRFS_EXTENT_DATA_KEY); | |
ca7e70f5 | 7678 | if (ret != -ENOSPC) { |
3893e33b | 7679 | err = ret; |
8082510e | 7680 | break; |
3893e33b | 7681 | } |
39279cc3 | 7682 | |
fcb80c2a | 7683 | trans->block_rsv = &root->fs_info->trans_block_rsv; |
8082510e | 7684 | ret = btrfs_update_inode(trans, root, inode); |
3893e33b JB |
7685 | if (ret) { |
7686 | err = ret; | |
7687 | break; | |
7688 | } | |
ca7e70f5 | 7689 | |
8082510e | 7690 | btrfs_end_transaction(trans, root); |
b53d3f5d | 7691 | btrfs_btree_balance_dirty(root); |
ca7e70f5 JB |
7692 | |
7693 | trans = btrfs_start_transaction(root, 2); | |
7694 | if (IS_ERR(trans)) { | |
7695 | ret = err = PTR_ERR(trans); | |
7696 | trans = NULL; | |
7697 | break; | |
7698 | } | |
7699 | ||
7700 | ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, | |
7701 | rsv, min_size); | |
7702 | BUG_ON(ret); /* shouldn't happen */ | |
7703 | trans->block_rsv = rsv; | |
8082510e YZ |
7704 | } |
7705 | ||
7706 | if (ret == 0 && inode->i_nlink > 0) { | |
fcb80c2a | 7707 | trans->block_rsv = root->orphan_block_rsv; |
8082510e | 7708 | ret = btrfs_orphan_del(trans, inode); |
3893e33b JB |
7709 | if (ret) |
7710 | err = ret; | |
8082510e YZ |
7711 | } |
7712 | ||
917c16b2 CM |
7713 | if (trans) { |
7714 | trans->block_rsv = &root->fs_info->trans_block_rsv; | |
7715 | ret = btrfs_update_inode(trans, root, inode); | |
7716 | if (ret && !err) | |
7717 | err = ret; | |
7b128766 | 7718 | |
7ad85bb7 | 7719 | ret = btrfs_end_transaction(trans, root); |
b53d3f5d | 7720 | btrfs_btree_balance_dirty(root); |
917c16b2 | 7721 | } |
fcb80c2a JB |
7722 | |
7723 | out: | |
7724 | btrfs_free_block_rsv(root, rsv); | |
7725 | ||
3893e33b JB |
7726 | if (ret && !err) |
7727 | err = ret; | |
a41ad394 | 7728 | |
3893e33b | 7729 | return err; |
39279cc3 CM |
7730 | } |
7731 | ||
d352ac68 CM |
7732 | /* |
7733 | * create a new subvolume directory/inode (helper for the ioctl). | |
7734 | */ | |
d2fb3437 | 7735 | int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, |
d82a6f1d | 7736 | struct btrfs_root *new_root, u64 new_dirid) |
39279cc3 | 7737 | { |
39279cc3 | 7738 | struct inode *inode; |
76dda93c | 7739 | int err; |
00e4e6b3 | 7740 | u64 index = 0; |
39279cc3 | 7741 | |
12fc9d09 FA |
7742 | inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, |
7743 | new_dirid, new_dirid, | |
7744 | S_IFDIR | (~current_umask() & S_IRWXUGO), | |
7745 | &index); | |
54aa1f4d | 7746 | if (IS_ERR(inode)) |
f46b5a66 | 7747 | return PTR_ERR(inode); |
39279cc3 CM |
7748 | inode->i_op = &btrfs_dir_inode_operations; |
7749 | inode->i_fop = &btrfs_dir_file_operations; | |
7750 | ||
bfe86848 | 7751 | set_nlink(inode, 1); |
dbe674a9 | 7752 | btrfs_i_size_write(inode, 0); |
3b96362c | 7753 | |
76dda93c | 7754 | err = btrfs_update_inode(trans, new_root, inode); |
cb8e7090 | 7755 | |
76dda93c | 7756 | iput(inode); |
ce598979 | 7757 | return err; |
39279cc3 CM |
7758 | } |
7759 | ||
39279cc3 CM |
7760 | struct inode *btrfs_alloc_inode(struct super_block *sb) |
7761 | { | |
7762 | struct btrfs_inode *ei; | |
2ead6ae7 | 7763 | struct inode *inode; |
39279cc3 CM |
7764 | |
7765 | ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); | |
7766 | if (!ei) | |
7767 | return NULL; | |
2ead6ae7 YZ |
7768 | |
7769 | ei->root = NULL; | |
2ead6ae7 | 7770 | ei->generation = 0; |
15ee9bc7 | 7771 | ei->last_trans = 0; |
257c62e1 | 7772 | ei->last_sub_trans = 0; |
e02119d5 | 7773 | ei->logged_trans = 0; |
2ead6ae7 | 7774 | ei->delalloc_bytes = 0; |
2ead6ae7 YZ |
7775 | ei->disk_i_size = 0; |
7776 | ei->flags = 0; | |
7709cde3 | 7777 | ei->csum_bytes = 0; |
2ead6ae7 YZ |
7778 | ei->index_cnt = (u64)-1; |
7779 | ei->last_unlink_trans = 0; | |
46d8bc34 | 7780 | ei->last_log_commit = 0; |
2ead6ae7 | 7781 | |
9e0baf60 JB |
7782 | spin_lock_init(&ei->lock); |
7783 | ei->outstanding_extents = 0; | |
7784 | ei->reserved_extents = 0; | |
2ead6ae7 | 7785 | |
72ac3c0d | 7786 | ei->runtime_flags = 0; |
261507a0 | 7787 | ei->force_compress = BTRFS_COMPRESS_NONE; |
2ead6ae7 | 7788 | |
16cdcec7 MX |
7789 | ei->delayed_node = NULL; |
7790 | ||
2ead6ae7 | 7791 | inode = &ei->vfs_inode; |
a8067e02 | 7792 | extent_map_tree_init(&ei->extent_tree); |
f993c883 DS |
7793 | extent_io_tree_init(&ei->io_tree, &inode->i_data); |
7794 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); | |
0b32f4bb JB |
7795 | ei->io_tree.track_uptodate = 1; |
7796 | ei->io_failure_tree.track_uptodate = 1; | |
b812ce28 | 7797 | atomic_set(&ei->sync_writers, 0); |
2ead6ae7 | 7798 | mutex_init(&ei->log_mutex); |
f248679e | 7799 | mutex_init(&ei->delalloc_mutex); |
e6dcd2dc | 7800 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
2ead6ae7 | 7801 | INIT_LIST_HEAD(&ei->delalloc_inodes); |
5a3f23d5 | 7802 | INIT_LIST_HEAD(&ei->ordered_operations); |
2ead6ae7 YZ |
7803 | RB_CLEAR_NODE(&ei->rb_node); |
7804 | ||
7805 | return inode; | |
39279cc3 CM |
7806 | } |
7807 | ||
aaedb55b JB |
7808 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
7809 | void btrfs_test_destroy_inode(struct inode *inode) | |
7810 | { | |
7811 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); | |
7812 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); | |
7813 | } | |
7814 | #endif | |
7815 | ||
fa0d7e3d NP |
7816 | static void btrfs_i_callback(struct rcu_head *head) |
7817 | { | |
7818 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
fa0d7e3d NP |
7819 | kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); |
7820 | } | |
7821 | ||
39279cc3 CM |
7822 | void btrfs_destroy_inode(struct inode *inode) |
7823 | { | |
e6dcd2dc | 7824 | struct btrfs_ordered_extent *ordered; |
5a3f23d5 CM |
7825 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7826 | ||
b3d9b7a3 | 7827 | WARN_ON(!hlist_empty(&inode->i_dentry)); |
39279cc3 | 7828 | WARN_ON(inode->i_data.nrpages); |
9e0baf60 JB |
7829 | WARN_ON(BTRFS_I(inode)->outstanding_extents); |
7830 | WARN_ON(BTRFS_I(inode)->reserved_extents); | |
7709cde3 JB |
7831 | WARN_ON(BTRFS_I(inode)->delalloc_bytes); |
7832 | WARN_ON(BTRFS_I(inode)->csum_bytes); | |
39279cc3 | 7833 | |
a6dbd429 JB |
7834 | /* |
7835 | * This can happen where we create an inode, but somebody else also | |
7836 | * created the same inode and we need to destroy the one we already | |
7837 | * created. | |
7838 | */ | |
7839 | if (!root) | |
7840 | goto free; | |
7841 | ||
5a3f23d5 CM |
7842 | /* |
7843 | * Make sure we're properly removed from the ordered operation | |
7844 | * lists. | |
7845 | */ | |
7846 | smp_mb(); | |
7847 | if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { | |
199c2a9c | 7848 | spin_lock(&root->fs_info->ordered_root_lock); |
5a3f23d5 | 7849 | list_del_init(&BTRFS_I(inode)->ordered_operations); |
199c2a9c | 7850 | spin_unlock(&root->fs_info->ordered_root_lock); |
5a3f23d5 CM |
7851 | } |
7852 | ||
8a35d95f JB |
7853 | if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
7854 | &BTRFS_I(inode)->runtime_flags)) { | |
c2cf52eb | 7855 | btrfs_info(root->fs_info, "inode %llu still on the orphan list", |
c1c9ff7c | 7856 | btrfs_ino(inode)); |
8a35d95f | 7857 | atomic_dec(&root->orphan_inodes); |
7b128766 | 7858 | } |
7b128766 | 7859 | |
d397712b | 7860 | while (1) { |
e6dcd2dc CM |
7861 | ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); |
7862 | if (!ordered) | |
7863 | break; | |
7864 | else { | |
c2cf52eb | 7865 | btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup", |
c1c9ff7c | 7866 | ordered->file_offset, ordered->len); |
e6dcd2dc CM |
7867 | btrfs_remove_ordered_extent(inode, ordered); |
7868 | btrfs_put_ordered_extent(ordered); | |
7869 | btrfs_put_ordered_extent(ordered); | |
7870 | } | |
7871 | } | |
5d4f98a2 | 7872 | inode_tree_del(inode); |
5b21f2ed | 7873 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); |
a6dbd429 | 7874 | free: |
fa0d7e3d | 7875 | call_rcu(&inode->i_rcu, btrfs_i_callback); |
39279cc3 CM |
7876 | } |
7877 | ||
45321ac5 | 7878 | int btrfs_drop_inode(struct inode *inode) |
76dda93c YZ |
7879 | { |
7880 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
45321ac5 | 7881 | |
6379ef9f NA |
7882 | if (root == NULL) |
7883 | return 1; | |
7884 | ||
fa6ac876 | 7885 | /* the snap/subvol tree is on deleting */ |
69e9c6c6 | 7886 | if (btrfs_root_refs(&root->root_item) == 0) |
45321ac5 | 7887 | return 1; |
76dda93c | 7888 | else |
45321ac5 | 7889 | return generic_drop_inode(inode); |
76dda93c YZ |
7890 | } |
7891 | ||
0ee0fda0 | 7892 | static void init_once(void *foo) |
39279cc3 CM |
7893 | { |
7894 | struct btrfs_inode *ei = (struct btrfs_inode *) foo; | |
7895 | ||
7896 | inode_init_once(&ei->vfs_inode); | |
7897 | } | |
7898 | ||
7899 | void btrfs_destroy_cachep(void) | |
7900 | { | |
8c0a8537 KS |
7901 | /* |
7902 | * Make sure all delayed rcu free inodes are flushed before we | |
7903 | * destroy cache. | |
7904 | */ | |
7905 | rcu_barrier(); | |
39279cc3 CM |
7906 | if (btrfs_inode_cachep) |
7907 | kmem_cache_destroy(btrfs_inode_cachep); | |
7908 | if (btrfs_trans_handle_cachep) | |
7909 | kmem_cache_destroy(btrfs_trans_handle_cachep); | |
7910 | if (btrfs_transaction_cachep) | |
7911 | kmem_cache_destroy(btrfs_transaction_cachep); | |
39279cc3 CM |
7912 | if (btrfs_path_cachep) |
7913 | kmem_cache_destroy(btrfs_path_cachep); | |
dc89e982 JB |
7914 | if (btrfs_free_space_cachep) |
7915 | kmem_cache_destroy(btrfs_free_space_cachep); | |
8ccf6f19 MX |
7916 | if (btrfs_delalloc_work_cachep) |
7917 | kmem_cache_destroy(btrfs_delalloc_work_cachep); | |
39279cc3 CM |
7918 | } |
7919 | ||
7920 | int btrfs_init_cachep(void) | |
7921 | { | |
837e1972 | 7922 | btrfs_inode_cachep = kmem_cache_create("btrfs_inode", |
9601e3f6 CH |
7923 | sizeof(struct btrfs_inode), 0, |
7924 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once); | |
39279cc3 CM |
7925 | if (!btrfs_inode_cachep) |
7926 | goto fail; | |
9601e3f6 | 7927 | |
837e1972 | 7928 | btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", |
9601e3f6 CH |
7929 | sizeof(struct btrfs_trans_handle), 0, |
7930 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
7931 | if (!btrfs_trans_handle_cachep) |
7932 | goto fail; | |
9601e3f6 | 7933 | |
837e1972 | 7934 | btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction", |
9601e3f6 CH |
7935 | sizeof(struct btrfs_transaction), 0, |
7936 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
7937 | if (!btrfs_transaction_cachep) |
7938 | goto fail; | |
9601e3f6 | 7939 | |
837e1972 | 7940 | btrfs_path_cachep = kmem_cache_create("btrfs_path", |
9601e3f6 CH |
7941 | sizeof(struct btrfs_path), 0, |
7942 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
39279cc3 CM |
7943 | if (!btrfs_path_cachep) |
7944 | goto fail; | |
9601e3f6 | 7945 | |
837e1972 | 7946 | btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space", |
dc89e982 JB |
7947 | sizeof(struct btrfs_free_space), 0, |
7948 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
7949 | if (!btrfs_free_space_cachep) | |
7950 | goto fail; | |
7951 | ||
8ccf6f19 MX |
7952 | btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work", |
7953 | sizeof(struct btrfs_delalloc_work), 0, | |
7954 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
7955 | NULL); | |
7956 | if (!btrfs_delalloc_work_cachep) | |
7957 | goto fail; | |
7958 | ||
39279cc3 CM |
7959 | return 0; |
7960 | fail: | |
7961 | btrfs_destroy_cachep(); | |
7962 | return -ENOMEM; | |
7963 | } | |
7964 | ||
7965 | static int btrfs_getattr(struct vfsmount *mnt, | |
7966 | struct dentry *dentry, struct kstat *stat) | |
7967 | { | |
df0af1a5 | 7968 | u64 delalloc_bytes; |
39279cc3 | 7969 | struct inode *inode = dentry->d_inode; |
fadc0d8b DS |
7970 | u32 blocksize = inode->i_sb->s_blocksize; |
7971 | ||
39279cc3 | 7972 | generic_fillattr(inode, stat); |
0ee5dc67 | 7973 | stat->dev = BTRFS_I(inode)->root->anon_dev; |
d6667462 | 7974 | stat->blksize = PAGE_CACHE_SIZE; |
df0af1a5 MX |
7975 | |
7976 | spin_lock(&BTRFS_I(inode)->lock); | |
7977 | delalloc_bytes = BTRFS_I(inode)->delalloc_bytes; | |
7978 | spin_unlock(&BTRFS_I(inode)->lock); | |
fadc0d8b | 7979 | stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + |
df0af1a5 | 7980 | ALIGN(delalloc_bytes, blocksize)) >> 9; |
39279cc3 CM |
7981 | return 0; |
7982 | } | |
7983 | ||
d397712b CM |
7984 | static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
7985 | struct inode *new_dir, struct dentry *new_dentry) | |
39279cc3 CM |
7986 | { |
7987 | struct btrfs_trans_handle *trans; | |
7988 | struct btrfs_root *root = BTRFS_I(old_dir)->root; | |
4df27c4d | 7989 | struct btrfs_root *dest = BTRFS_I(new_dir)->root; |
39279cc3 CM |
7990 | struct inode *new_inode = new_dentry->d_inode; |
7991 | struct inode *old_inode = old_dentry->d_inode; | |
7992 | struct timespec ctime = CURRENT_TIME; | |
00e4e6b3 | 7993 | u64 index = 0; |
4df27c4d | 7994 | u64 root_objectid; |
39279cc3 | 7995 | int ret; |
33345d01 | 7996 | u64 old_ino = btrfs_ino(old_inode); |
39279cc3 | 7997 | |
33345d01 | 7998 | if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
f679a840 YZ |
7999 | return -EPERM; |
8000 | ||
4df27c4d | 8001 | /* we only allow rename subvolume link between subvolumes */ |
33345d01 | 8002 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
3394e160 CM |
8003 | return -EXDEV; |
8004 | ||
33345d01 LZ |
8005 | if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || |
8006 | (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) | |
39279cc3 | 8007 | return -ENOTEMPTY; |
5f39d397 | 8008 | |
4df27c4d YZ |
8009 | if (S_ISDIR(old_inode->i_mode) && new_inode && |
8010 | new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) | |
8011 | return -ENOTEMPTY; | |
9c52057c CM |
8012 | |
8013 | ||
8014 | /* check for collisions, even if the name isn't there */ | |
4871c158 | 8015 | ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, |
9c52057c CM |
8016 | new_dentry->d_name.name, |
8017 | new_dentry->d_name.len); | |
8018 | ||
8019 | if (ret) { | |
8020 | if (ret == -EEXIST) { | |
8021 | /* we shouldn't get | |
8022 | * eexist without a new_inode */ | |
fae7f21c | 8023 | if (WARN_ON(!new_inode)) { |
9c52057c CM |
8024 | return ret; |
8025 | } | |
8026 | } else { | |
8027 | /* maybe -EOVERFLOW */ | |
8028 | return ret; | |
8029 | } | |
8030 | } | |
8031 | ret = 0; | |
8032 | ||
5a3f23d5 CM |
8033 | /* |
8034 | * we're using rename to replace one file with another. | |
8035 | * and the replacement file is large. Start IO on it now so | |
8036 | * we don't add too much work to the end of the transaction | |
8037 | */ | |
4baf8c92 | 8038 | if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && |
5a3f23d5 CM |
8039 | old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
8040 | filemap_flush(old_inode->i_mapping); | |
8041 | ||
76dda93c | 8042 | /* close the racy window with snapshot create/destroy ioctl */ |
33345d01 | 8043 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
76dda93c | 8044 | down_read(&root->fs_info->subvol_sem); |
a22285a6 YZ |
8045 | /* |
8046 | * We want to reserve the absolute worst case amount of items. So if | |
8047 | * both inodes are subvols and we need to unlink them then that would | |
8048 | * require 4 item modifications, but if they are both normal inodes it | |
8049 | * would require 5 item modifications, so we'll assume their normal | |
8050 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items | |
8051 | * should cover the worst case number of items we'll modify. | |
8052 | */ | |
6e137ed3 | 8053 | trans = btrfs_start_transaction(root, 11); |
b44c59a8 JL |
8054 | if (IS_ERR(trans)) { |
8055 | ret = PTR_ERR(trans); | |
8056 | goto out_notrans; | |
8057 | } | |
76dda93c | 8058 | |
4df27c4d YZ |
8059 | if (dest != root) |
8060 | btrfs_record_root_in_trans(trans, dest); | |
5f39d397 | 8061 | |
a5719521 YZ |
8062 | ret = btrfs_set_inode_index(new_dir, &index); |
8063 | if (ret) | |
8064 | goto out_fail; | |
5a3f23d5 | 8065 | |
33345d01 | 8066 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
8067 | /* force full log commit if subvolume involved. */ |
8068 | root->fs_info->last_trans_log_full_commit = trans->transid; | |
8069 | } else { | |
a5719521 YZ |
8070 | ret = btrfs_insert_inode_ref(trans, dest, |
8071 | new_dentry->d_name.name, | |
8072 | new_dentry->d_name.len, | |
33345d01 LZ |
8073 | old_ino, |
8074 | btrfs_ino(new_dir), index); | |
a5719521 YZ |
8075 | if (ret) |
8076 | goto out_fail; | |
4df27c4d YZ |
8077 | /* |
8078 | * this is an ugly little race, but the rename is required | |
8079 | * to make sure that if we crash, the inode is either at the | |
8080 | * old name or the new one. pinning the log transaction lets | |
8081 | * us make sure we don't allow a log commit to come in after | |
8082 | * we unlink the name but before we add the new name back in. | |
8083 | */ | |
8084 | btrfs_pin_log_trans(root); | |
8085 | } | |
5a3f23d5 CM |
8086 | /* |
8087 | * make sure the inode gets flushed if it is replacing | |
8088 | * something. | |
8089 | */ | |
33345d01 | 8090 | if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) |
5a3f23d5 | 8091 | btrfs_add_ordered_operation(trans, root, old_inode); |
5a3f23d5 | 8092 | |
0c4d2d95 JB |
8093 | inode_inc_iversion(old_dir); |
8094 | inode_inc_iversion(new_dir); | |
8095 | inode_inc_iversion(old_inode); | |
39279cc3 CM |
8096 | old_dir->i_ctime = old_dir->i_mtime = ctime; |
8097 | new_dir->i_ctime = new_dir->i_mtime = ctime; | |
8098 | old_inode->i_ctime = ctime; | |
5f39d397 | 8099 | |
12fcfd22 CM |
8100 | if (old_dentry->d_parent != new_dentry->d_parent) |
8101 | btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); | |
8102 | ||
33345d01 | 8103 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4df27c4d YZ |
8104 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; |
8105 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, | |
8106 | old_dentry->d_name.name, | |
8107 | old_dentry->d_name.len); | |
8108 | } else { | |
92986796 AV |
8109 | ret = __btrfs_unlink_inode(trans, root, old_dir, |
8110 | old_dentry->d_inode, | |
8111 | old_dentry->d_name.name, | |
8112 | old_dentry->d_name.len); | |
8113 | if (!ret) | |
8114 | ret = btrfs_update_inode(trans, root, old_inode); | |
4df27c4d | 8115 | } |
79787eaa JM |
8116 | if (ret) { |
8117 | btrfs_abort_transaction(trans, root, ret); | |
8118 | goto out_fail; | |
8119 | } | |
39279cc3 CM |
8120 | |
8121 | if (new_inode) { | |
0c4d2d95 | 8122 | inode_inc_iversion(new_inode); |
39279cc3 | 8123 | new_inode->i_ctime = CURRENT_TIME; |
33345d01 | 8124 | if (unlikely(btrfs_ino(new_inode) == |
4df27c4d YZ |
8125 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
8126 | root_objectid = BTRFS_I(new_inode)->location.objectid; | |
8127 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | |
8128 | root_objectid, | |
8129 | new_dentry->d_name.name, | |
8130 | new_dentry->d_name.len); | |
8131 | BUG_ON(new_inode->i_nlink == 0); | |
8132 | } else { | |
8133 | ret = btrfs_unlink_inode(trans, dest, new_dir, | |
8134 | new_dentry->d_inode, | |
8135 | new_dentry->d_name.name, | |
8136 | new_dentry->d_name.len); | |
8137 | } | |
4ef31a45 | 8138 | if (!ret && new_inode->i_nlink == 0) |
e02119d5 | 8139 | ret = btrfs_orphan_add(trans, new_dentry->d_inode); |
79787eaa JM |
8140 | if (ret) { |
8141 | btrfs_abort_transaction(trans, root, ret); | |
8142 | goto out_fail; | |
8143 | } | |
39279cc3 | 8144 | } |
aec7477b | 8145 | |
4df27c4d YZ |
8146 | ret = btrfs_add_link(trans, new_dir, old_inode, |
8147 | new_dentry->d_name.name, | |
a5719521 | 8148 | new_dentry->d_name.len, 0, index); |
79787eaa JM |
8149 | if (ret) { |
8150 | btrfs_abort_transaction(trans, root, ret); | |
8151 | goto out_fail; | |
8152 | } | |
39279cc3 | 8153 | |
33345d01 | 8154 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { |
10d9f309 | 8155 | struct dentry *parent = new_dentry->d_parent; |
6a912213 | 8156 | btrfs_log_new_name(trans, old_inode, old_dir, parent); |
4df27c4d YZ |
8157 | btrfs_end_log_trans(root); |
8158 | } | |
39279cc3 | 8159 | out_fail: |
7ad85bb7 | 8160 | btrfs_end_transaction(trans, root); |
b44c59a8 | 8161 | out_notrans: |
33345d01 | 8162 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
76dda93c | 8163 | up_read(&root->fs_info->subvol_sem); |
9ed74f2d | 8164 | |
39279cc3 CM |
8165 | return ret; |
8166 | } | |
8167 | ||
8ccf6f19 MX |
8168 | static void btrfs_run_delalloc_work(struct btrfs_work *work) |
8169 | { | |
8170 | struct btrfs_delalloc_work *delalloc_work; | |
9f23e289 | 8171 | struct inode *inode; |
8ccf6f19 MX |
8172 | |
8173 | delalloc_work = container_of(work, struct btrfs_delalloc_work, | |
8174 | work); | |
9f23e289 JB |
8175 | inode = delalloc_work->inode; |
8176 | if (delalloc_work->wait) { | |
8177 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
8178 | } else { | |
8179 | filemap_flush(inode->i_mapping); | |
8180 | if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, | |
8181 | &BTRFS_I(inode)->runtime_flags)) | |
8182 | filemap_flush(inode->i_mapping); | |
8183 | } | |
8ccf6f19 MX |
8184 | |
8185 | if (delalloc_work->delay_iput) | |
9f23e289 | 8186 | btrfs_add_delayed_iput(inode); |
8ccf6f19 | 8187 | else |
9f23e289 | 8188 | iput(inode); |
8ccf6f19 MX |
8189 | complete(&delalloc_work->completion); |
8190 | } | |
8191 | ||
8192 | struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, | |
8193 | int wait, int delay_iput) | |
8194 | { | |
8195 | struct btrfs_delalloc_work *work; | |
8196 | ||
8197 | work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS); | |
8198 | if (!work) | |
8199 | return NULL; | |
8200 | ||
8201 | init_completion(&work->completion); | |
8202 | INIT_LIST_HEAD(&work->list); | |
8203 | work->inode = inode; | |
8204 | work->wait = wait; | |
8205 | work->delay_iput = delay_iput; | |
8206 | work->work.func = btrfs_run_delalloc_work; | |
8207 | ||
8208 | return work; | |
8209 | } | |
8210 | ||
8211 | void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work) | |
8212 | { | |
8213 | wait_for_completion(&work->completion); | |
8214 | kmem_cache_free(btrfs_delalloc_work_cachep, work); | |
8215 | } | |
8216 | ||
d352ac68 CM |
8217 | /* |
8218 | * some fairly slow code that needs optimization. This walks the list | |
8219 | * of all the inodes with pending delalloc and forces them to disk. | |
8220 | */ | |
eb73c1b7 | 8221 | static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput) |
ea8c2819 | 8222 | { |
ea8c2819 | 8223 | struct btrfs_inode *binode; |
5b21f2ed | 8224 | struct inode *inode; |
8ccf6f19 MX |
8225 | struct btrfs_delalloc_work *work, *next; |
8226 | struct list_head works; | |
1eafa6c7 | 8227 | struct list_head splice; |
8ccf6f19 | 8228 | int ret = 0; |
ea8c2819 | 8229 | |
8ccf6f19 | 8230 | INIT_LIST_HEAD(&works); |
1eafa6c7 | 8231 | INIT_LIST_HEAD(&splice); |
63607cc8 | 8232 | |
eb73c1b7 MX |
8233 | spin_lock(&root->delalloc_lock); |
8234 | list_splice_init(&root->delalloc_inodes, &splice); | |
1eafa6c7 MX |
8235 | while (!list_empty(&splice)) { |
8236 | binode = list_entry(splice.next, struct btrfs_inode, | |
ea8c2819 | 8237 | delalloc_inodes); |
1eafa6c7 | 8238 | |
eb73c1b7 MX |
8239 | list_move_tail(&binode->delalloc_inodes, |
8240 | &root->delalloc_inodes); | |
5b21f2ed | 8241 | inode = igrab(&binode->vfs_inode); |
df0af1a5 | 8242 | if (!inode) { |
eb73c1b7 | 8243 | cond_resched_lock(&root->delalloc_lock); |
1eafa6c7 | 8244 | continue; |
df0af1a5 | 8245 | } |
eb73c1b7 | 8246 | spin_unlock(&root->delalloc_lock); |
1eafa6c7 MX |
8247 | |
8248 | work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); | |
8249 | if (unlikely(!work)) { | |
f4ab9ea7 JB |
8250 | if (delay_iput) |
8251 | btrfs_add_delayed_iput(inode); | |
8252 | else | |
8253 | iput(inode); | |
1eafa6c7 MX |
8254 | ret = -ENOMEM; |
8255 | goto out; | |
5b21f2ed | 8256 | } |
1eafa6c7 MX |
8257 | list_add_tail(&work->list, &works); |
8258 | btrfs_queue_worker(&root->fs_info->flush_workers, | |
8259 | &work->work); | |
8260 | ||
5b21f2ed | 8261 | cond_resched(); |
eb73c1b7 | 8262 | spin_lock(&root->delalloc_lock); |
ea8c2819 | 8263 | } |
eb73c1b7 | 8264 | spin_unlock(&root->delalloc_lock); |
8c8bee1d | 8265 | |
1eafa6c7 MX |
8266 | list_for_each_entry_safe(work, next, &works, list) { |
8267 | list_del_init(&work->list); | |
8268 | btrfs_wait_and_free_delalloc_work(work); | |
8269 | } | |
eb73c1b7 MX |
8270 | return 0; |
8271 | out: | |
8272 | list_for_each_entry_safe(work, next, &works, list) { | |
8273 | list_del_init(&work->list); | |
8274 | btrfs_wait_and_free_delalloc_work(work); | |
8275 | } | |
8276 | ||
8277 | if (!list_empty_careful(&splice)) { | |
8278 | spin_lock(&root->delalloc_lock); | |
8279 | list_splice_tail(&splice, &root->delalloc_inodes); | |
8280 | spin_unlock(&root->delalloc_lock); | |
8281 | } | |
8282 | return ret; | |
8283 | } | |
1eafa6c7 | 8284 | |
eb73c1b7 MX |
8285 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) |
8286 | { | |
8287 | int ret; | |
1eafa6c7 | 8288 | |
eb73c1b7 MX |
8289 | if (root->fs_info->sb->s_flags & MS_RDONLY) |
8290 | return -EROFS; | |
8291 | ||
8292 | ret = __start_delalloc_inodes(root, delay_iput); | |
8293 | /* | |
8294 | * the filemap_flush will queue IO into the worker threads, but | |
8c8bee1d CM |
8295 | * we have to make sure the IO is actually started and that |
8296 | * ordered extents get created before we return | |
8297 | */ | |
8298 | atomic_inc(&root->fs_info->async_submit_draining); | |
d397712b | 8299 | while (atomic_read(&root->fs_info->nr_async_submits) || |
771ed689 | 8300 | atomic_read(&root->fs_info->async_delalloc_pages)) { |
8c8bee1d | 8301 | wait_event(root->fs_info->async_submit_wait, |
771ed689 CM |
8302 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && |
8303 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | |
8c8bee1d CM |
8304 | } |
8305 | atomic_dec(&root->fs_info->async_submit_draining); | |
eb73c1b7 MX |
8306 | return ret; |
8307 | } | |
8308 | ||
8309 | int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info, | |
8310 | int delay_iput) | |
8311 | { | |
8312 | struct btrfs_root *root; | |
8313 | struct list_head splice; | |
8314 | int ret; | |
8315 | ||
8316 | if (fs_info->sb->s_flags & MS_RDONLY) | |
8317 | return -EROFS; | |
8318 | ||
8319 | INIT_LIST_HEAD(&splice); | |
8320 | ||
8321 | spin_lock(&fs_info->delalloc_root_lock); | |
8322 | list_splice_init(&fs_info->delalloc_roots, &splice); | |
8323 | while (!list_empty(&splice)) { | |
8324 | root = list_first_entry(&splice, struct btrfs_root, | |
8325 | delalloc_root); | |
8326 | root = btrfs_grab_fs_root(root); | |
8327 | BUG_ON(!root); | |
8328 | list_move_tail(&root->delalloc_root, | |
8329 | &fs_info->delalloc_roots); | |
8330 | spin_unlock(&fs_info->delalloc_root_lock); | |
8331 | ||
8332 | ret = __start_delalloc_inodes(root, delay_iput); | |
8333 | btrfs_put_fs_root(root); | |
8334 | if (ret) | |
8335 | goto out; | |
8336 | ||
8337 | spin_lock(&fs_info->delalloc_root_lock); | |
8ccf6f19 | 8338 | } |
eb73c1b7 | 8339 | spin_unlock(&fs_info->delalloc_root_lock); |
1eafa6c7 | 8340 | |
eb73c1b7 MX |
8341 | atomic_inc(&fs_info->async_submit_draining); |
8342 | while (atomic_read(&fs_info->nr_async_submits) || | |
8343 | atomic_read(&fs_info->async_delalloc_pages)) { | |
8344 | wait_event(fs_info->async_submit_wait, | |
8345 | (atomic_read(&fs_info->nr_async_submits) == 0 && | |
8346 | atomic_read(&fs_info->async_delalloc_pages) == 0)); | |
8347 | } | |
8348 | atomic_dec(&fs_info->async_submit_draining); | |
8349 | return 0; | |
8350 | out: | |
1eafa6c7 | 8351 | if (!list_empty_careful(&splice)) { |
eb73c1b7 MX |
8352 | spin_lock(&fs_info->delalloc_root_lock); |
8353 | list_splice_tail(&splice, &fs_info->delalloc_roots); | |
8354 | spin_unlock(&fs_info->delalloc_root_lock); | |
1eafa6c7 | 8355 | } |
8ccf6f19 | 8356 | return ret; |
ea8c2819 CM |
8357 | } |
8358 | ||
39279cc3 CM |
8359 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, |
8360 | const char *symname) | |
8361 | { | |
8362 | struct btrfs_trans_handle *trans; | |
8363 | struct btrfs_root *root = BTRFS_I(dir)->root; | |
8364 | struct btrfs_path *path; | |
8365 | struct btrfs_key key; | |
1832a6d5 | 8366 | struct inode *inode = NULL; |
39279cc3 CM |
8367 | int err; |
8368 | int drop_inode = 0; | |
8369 | u64 objectid; | |
00e4e6b3 | 8370 | u64 index = 0 ; |
39279cc3 CM |
8371 | int name_len; |
8372 | int datasize; | |
5f39d397 | 8373 | unsigned long ptr; |
39279cc3 | 8374 | struct btrfs_file_extent_item *ei; |
5f39d397 | 8375 | struct extent_buffer *leaf; |
39279cc3 | 8376 | |
f06becc4 | 8377 | name_len = strlen(symname); |
39279cc3 CM |
8378 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) |
8379 | return -ENAMETOOLONG; | |
1832a6d5 | 8380 | |
9ed74f2d JB |
8381 | /* |
8382 | * 2 items for inode item and ref | |
8383 | * 2 items for dir items | |
8384 | * 1 item for xattr if selinux is on | |
8385 | */ | |
a22285a6 YZ |
8386 | trans = btrfs_start_transaction(root, 5); |
8387 | if (IS_ERR(trans)) | |
8388 | return PTR_ERR(trans); | |
1832a6d5 | 8389 | |
581bb050 LZ |
8390 | err = btrfs_find_free_ino(root, &objectid); |
8391 | if (err) | |
8392 | goto out_unlock; | |
8393 | ||
aec7477b | 8394 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
33345d01 | 8395 | dentry->d_name.len, btrfs_ino(dir), objectid, |
d82a6f1d | 8396 | S_IFLNK|S_IRWXUGO, &index); |
7cf96da3 TI |
8397 | if (IS_ERR(inode)) { |
8398 | err = PTR_ERR(inode); | |
39279cc3 | 8399 | goto out_unlock; |
7cf96da3 | 8400 | } |
39279cc3 | 8401 | |
2a7dba39 | 8402 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
33268eaf JB |
8403 | if (err) { |
8404 | drop_inode = 1; | |
8405 | goto out_unlock; | |
8406 | } | |
8407 | ||
ad19db71 CS |
8408 | /* |
8409 | * If the active LSM wants to access the inode during | |
8410 | * d_instantiate it needs these. Smack checks to see | |
8411 | * if the filesystem supports xattrs by looking at the | |
8412 | * ops vector. | |
8413 | */ | |
8414 | inode->i_fop = &btrfs_file_operations; | |
8415 | inode->i_op = &btrfs_file_inode_operations; | |
8416 | ||
a1b075d2 | 8417 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
39279cc3 CM |
8418 | if (err) |
8419 | drop_inode = 1; | |
8420 | else { | |
8421 | inode->i_mapping->a_ops = &btrfs_aops; | |
04160088 | 8422 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d1310b2e | 8423 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
39279cc3 | 8424 | } |
39279cc3 CM |
8425 | if (drop_inode) |
8426 | goto out_unlock; | |
8427 | ||
8428 | path = btrfs_alloc_path(); | |
d8926bb3 MF |
8429 | if (!path) { |
8430 | err = -ENOMEM; | |
8431 | drop_inode = 1; | |
8432 | goto out_unlock; | |
8433 | } | |
33345d01 | 8434 | key.objectid = btrfs_ino(inode); |
39279cc3 | 8435 | key.offset = 0; |
39279cc3 CM |
8436 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); |
8437 | datasize = btrfs_file_extent_calc_inline_size(name_len); | |
8438 | err = btrfs_insert_empty_item(trans, root, path, &key, | |
8439 | datasize); | |
54aa1f4d CM |
8440 | if (err) { |
8441 | drop_inode = 1; | |
b0839166 | 8442 | btrfs_free_path(path); |
54aa1f4d CM |
8443 | goto out_unlock; |
8444 | } | |
5f39d397 CM |
8445 | leaf = path->nodes[0]; |
8446 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
8447 | struct btrfs_file_extent_item); | |
8448 | btrfs_set_file_extent_generation(leaf, ei, trans->transid); | |
8449 | btrfs_set_file_extent_type(leaf, ei, | |
39279cc3 | 8450 | BTRFS_FILE_EXTENT_INLINE); |
c8b97818 CM |
8451 | btrfs_set_file_extent_encryption(leaf, ei, 0); |
8452 | btrfs_set_file_extent_compression(leaf, ei, 0); | |
8453 | btrfs_set_file_extent_other_encoding(leaf, ei, 0); | |
8454 | btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); | |
8455 | ||
39279cc3 | 8456 | ptr = btrfs_file_extent_inline_start(ei); |
5f39d397 CM |
8457 | write_extent_buffer(leaf, symname, ptr, name_len); |
8458 | btrfs_mark_buffer_dirty(leaf); | |
39279cc3 | 8459 | btrfs_free_path(path); |
5f39d397 | 8460 | |
39279cc3 CM |
8461 | inode->i_op = &btrfs_symlink_inode_operations; |
8462 | inode->i_mapping->a_ops = &btrfs_symlink_aops; | |
04160088 | 8463 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
d899e052 | 8464 | inode_set_bytes(inode, name_len); |
f06becc4 | 8465 | btrfs_i_size_write(inode, name_len); |
54aa1f4d CM |
8466 | err = btrfs_update_inode(trans, root, inode); |
8467 | if (err) | |
8468 | drop_inode = 1; | |
39279cc3 CM |
8469 | |
8470 | out_unlock: | |
08c422c2 AV |
8471 | if (!err) |
8472 | d_instantiate(dentry, inode); | |
7ad85bb7 | 8473 | btrfs_end_transaction(trans, root); |
39279cc3 CM |
8474 | if (drop_inode) { |
8475 | inode_dec_link_count(inode); | |
8476 | iput(inode); | |
8477 | } | |
b53d3f5d | 8478 | btrfs_btree_balance_dirty(root); |
39279cc3 CM |
8479 | return err; |
8480 | } | |
16432985 | 8481 | |
0af3d00b JB |
8482 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, |
8483 | u64 start, u64 num_bytes, u64 min_size, | |
8484 | loff_t actual_len, u64 *alloc_hint, | |
8485 | struct btrfs_trans_handle *trans) | |
d899e052 | 8486 | { |
5dc562c5 JB |
8487 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
8488 | struct extent_map *em; | |
d899e052 YZ |
8489 | struct btrfs_root *root = BTRFS_I(inode)->root; |
8490 | struct btrfs_key ins; | |
d899e052 | 8491 | u64 cur_offset = start; |
55a61d1d | 8492 | u64 i_size; |
154ea289 | 8493 | u64 cur_bytes; |
d899e052 | 8494 | int ret = 0; |
0af3d00b | 8495 | bool own_trans = true; |
d899e052 | 8496 | |
0af3d00b JB |
8497 | if (trans) |
8498 | own_trans = false; | |
d899e052 | 8499 | while (num_bytes > 0) { |
0af3d00b JB |
8500 | if (own_trans) { |
8501 | trans = btrfs_start_transaction(root, 3); | |
8502 | if (IS_ERR(trans)) { | |
8503 | ret = PTR_ERR(trans); | |
8504 | break; | |
8505 | } | |
5a303d5d YZ |
8506 | } |
8507 | ||
154ea289 CM |
8508 | cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); |
8509 | cur_bytes = max(cur_bytes, min_size); | |
00361589 JB |
8510 | ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, |
8511 | *alloc_hint, &ins, 1); | |
5a303d5d | 8512 | if (ret) { |
0af3d00b JB |
8513 | if (own_trans) |
8514 | btrfs_end_transaction(trans, root); | |
a22285a6 | 8515 | break; |
d899e052 | 8516 | } |
5a303d5d | 8517 | |
d899e052 YZ |
8518 | ret = insert_reserved_file_extent(trans, inode, |
8519 | cur_offset, ins.objectid, | |
8520 | ins.offset, ins.offset, | |
920bbbfb | 8521 | ins.offset, 0, 0, 0, |
d899e052 | 8522 | BTRFS_FILE_EXTENT_PREALLOC); |
79787eaa | 8523 | if (ret) { |
857cc2fc JB |
8524 | btrfs_free_reserved_extent(root, ins.objectid, |
8525 | ins.offset); | |
79787eaa JM |
8526 | btrfs_abort_transaction(trans, root, ret); |
8527 | if (own_trans) | |
8528 | btrfs_end_transaction(trans, root); | |
8529 | break; | |
8530 | } | |
a1ed835e CM |
8531 | btrfs_drop_extent_cache(inode, cur_offset, |
8532 | cur_offset + ins.offset -1, 0); | |
5a303d5d | 8533 | |
5dc562c5 JB |
8534 | em = alloc_extent_map(); |
8535 | if (!em) { | |
8536 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | |
8537 | &BTRFS_I(inode)->runtime_flags); | |
8538 | goto next; | |
8539 | } | |
8540 | ||
8541 | em->start = cur_offset; | |
8542 | em->orig_start = cur_offset; | |
8543 | em->len = ins.offset; | |
8544 | em->block_start = ins.objectid; | |
8545 | em->block_len = ins.offset; | |
b4939680 | 8546 | em->orig_block_len = ins.offset; |
cc95bef6 | 8547 | em->ram_bytes = ins.offset; |
5dc562c5 JB |
8548 | em->bdev = root->fs_info->fs_devices->latest_bdev; |
8549 | set_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
8550 | em->generation = trans->transid; | |
8551 | ||
8552 | while (1) { | |
8553 | write_lock(&em_tree->lock); | |
09a2a8f9 | 8554 | ret = add_extent_mapping(em_tree, em, 1); |
5dc562c5 JB |
8555 | write_unlock(&em_tree->lock); |
8556 | if (ret != -EEXIST) | |
8557 | break; | |
8558 | btrfs_drop_extent_cache(inode, cur_offset, | |
8559 | cur_offset + ins.offset - 1, | |
8560 | 0); | |
8561 | } | |
8562 | free_extent_map(em); | |
8563 | next: | |
d899e052 YZ |
8564 | num_bytes -= ins.offset; |
8565 | cur_offset += ins.offset; | |
efa56464 | 8566 | *alloc_hint = ins.objectid + ins.offset; |
5a303d5d | 8567 | |
0c4d2d95 | 8568 | inode_inc_iversion(inode); |
d899e052 | 8569 | inode->i_ctime = CURRENT_TIME; |
6cbff00f | 8570 | BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; |
d899e052 | 8571 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
efa56464 YZ |
8572 | (actual_len > inode->i_size) && |
8573 | (cur_offset > inode->i_size)) { | |
d1ea6a61 | 8574 | if (cur_offset > actual_len) |
55a61d1d | 8575 | i_size = actual_len; |
d1ea6a61 | 8576 | else |
55a61d1d JB |
8577 | i_size = cur_offset; |
8578 | i_size_write(inode, i_size); | |
8579 | btrfs_ordered_update_i_size(inode, i_size, NULL); | |
5a303d5d YZ |
8580 | } |
8581 | ||
d899e052 | 8582 | ret = btrfs_update_inode(trans, root, inode); |
79787eaa JM |
8583 | |
8584 | if (ret) { | |
8585 | btrfs_abort_transaction(trans, root, ret); | |
8586 | if (own_trans) | |
8587 | btrfs_end_transaction(trans, root); | |
8588 | break; | |
8589 | } | |
d899e052 | 8590 | |
0af3d00b JB |
8591 | if (own_trans) |
8592 | btrfs_end_transaction(trans, root); | |
5a303d5d | 8593 | } |
d899e052 YZ |
8594 | return ret; |
8595 | } | |
8596 | ||
0af3d00b JB |
8597 | int btrfs_prealloc_file_range(struct inode *inode, int mode, |
8598 | u64 start, u64 num_bytes, u64 min_size, | |
8599 | loff_t actual_len, u64 *alloc_hint) | |
8600 | { | |
8601 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
8602 | min_size, actual_len, alloc_hint, | |
8603 | NULL); | |
8604 | } | |
8605 | ||
8606 | int btrfs_prealloc_file_range_trans(struct inode *inode, | |
8607 | struct btrfs_trans_handle *trans, int mode, | |
8608 | u64 start, u64 num_bytes, u64 min_size, | |
8609 | loff_t actual_len, u64 *alloc_hint) | |
8610 | { | |
8611 | return __btrfs_prealloc_file_range(inode, mode, start, num_bytes, | |
8612 | min_size, actual_len, alloc_hint, trans); | |
8613 | } | |
8614 | ||
e6dcd2dc CM |
8615 | static int btrfs_set_page_dirty(struct page *page) |
8616 | { | |
e6dcd2dc CM |
8617 | return __set_page_dirty_nobuffers(page); |
8618 | } | |
8619 | ||
10556cb2 | 8620 | static int btrfs_permission(struct inode *inode, int mask) |
fdebe2bd | 8621 | { |
b83cc969 | 8622 | struct btrfs_root *root = BTRFS_I(inode)->root; |
cb6db4e5 | 8623 | umode_t mode = inode->i_mode; |
b83cc969 | 8624 | |
cb6db4e5 JM |
8625 | if (mask & MAY_WRITE && |
8626 | (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { | |
8627 | if (btrfs_root_readonly(root)) | |
8628 | return -EROFS; | |
8629 | if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) | |
8630 | return -EACCES; | |
8631 | } | |
2830ba7f | 8632 | return generic_permission(inode, mask); |
fdebe2bd | 8633 | } |
39279cc3 | 8634 | |
6e1d5dcc | 8635 | static const struct inode_operations btrfs_dir_inode_operations = { |
3394e160 | 8636 | .getattr = btrfs_getattr, |
39279cc3 CM |
8637 | .lookup = btrfs_lookup, |
8638 | .create = btrfs_create, | |
8639 | .unlink = btrfs_unlink, | |
8640 | .link = btrfs_link, | |
8641 | .mkdir = btrfs_mkdir, | |
8642 | .rmdir = btrfs_rmdir, | |
8643 | .rename = btrfs_rename, | |
8644 | .symlink = btrfs_symlink, | |
8645 | .setattr = btrfs_setattr, | |
618e21d5 | 8646 | .mknod = btrfs_mknod, |
95819c05 CH |
8647 | .setxattr = btrfs_setxattr, |
8648 | .getxattr = btrfs_getxattr, | |
5103e947 | 8649 | .listxattr = btrfs_listxattr, |
95819c05 | 8650 | .removexattr = btrfs_removexattr, |
fdebe2bd | 8651 | .permission = btrfs_permission, |
4e34e719 | 8652 | .get_acl = btrfs_get_acl, |
93fd63c2 | 8653 | .update_time = btrfs_update_time, |
39279cc3 | 8654 | }; |
6e1d5dcc | 8655 | static const struct inode_operations btrfs_dir_ro_inode_operations = { |
39279cc3 | 8656 | .lookup = btrfs_lookup, |
fdebe2bd | 8657 | .permission = btrfs_permission, |
4e34e719 | 8658 | .get_acl = btrfs_get_acl, |
93fd63c2 | 8659 | .update_time = btrfs_update_time, |
39279cc3 | 8660 | }; |
76dda93c | 8661 | |
828c0950 | 8662 | static const struct file_operations btrfs_dir_file_operations = { |
39279cc3 CM |
8663 | .llseek = generic_file_llseek, |
8664 | .read = generic_read_dir, | |
9cdda8d3 | 8665 | .iterate = btrfs_real_readdir, |
34287aa3 | 8666 | .unlocked_ioctl = btrfs_ioctl, |
39279cc3 | 8667 | #ifdef CONFIG_COMPAT |
34287aa3 | 8668 | .compat_ioctl = btrfs_ioctl, |
39279cc3 | 8669 | #endif |
6bf13c0c | 8670 | .release = btrfs_release_file, |
e02119d5 | 8671 | .fsync = btrfs_sync_file, |
39279cc3 CM |
8672 | }; |
8673 | ||
d1310b2e | 8674 | static struct extent_io_ops btrfs_extent_io_ops = { |
07157aac | 8675 | .fill_delalloc = run_delalloc_range, |
065631f6 | 8676 | .submit_bio_hook = btrfs_submit_bio_hook, |
239b14b3 | 8677 | .merge_bio_hook = btrfs_merge_bio_hook, |
07157aac | 8678 | .readpage_end_io_hook = btrfs_readpage_end_io_hook, |
e6dcd2dc | 8679 | .writepage_end_io_hook = btrfs_writepage_end_io_hook, |
247e743c | 8680 | .writepage_start_hook = btrfs_writepage_start_hook, |
b0c68f8b CM |
8681 | .set_bit_hook = btrfs_set_bit_hook, |
8682 | .clear_bit_hook = btrfs_clear_bit_hook, | |
9ed74f2d JB |
8683 | .merge_extent_hook = btrfs_merge_extent_hook, |
8684 | .split_extent_hook = btrfs_split_extent_hook, | |
07157aac CM |
8685 | }; |
8686 | ||
35054394 CM |
8687 | /* |
8688 | * btrfs doesn't support the bmap operation because swapfiles | |
8689 | * use bmap to make a mapping of extents in the file. They assume | |
8690 | * these extents won't change over the life of the file and they | |
8691 | * use the bmap result to do IO directly to the drive. | |
8692 | * | |
8693 | * the btrfs bmap call would return logical addresses that aren't | |
8694 | * suitable for IO and they also will change frequently as COW | |
8695 | * operations happen. So, swapfile + btrfs == corruption. | |
8696 | * | |
8697 | * For now we're avoiding this by dropping bmap. | |
8698 | */ | |
7f09410b | 8699 | static const struct address_space_operations btrfs_aops = { |
39279cc3 CM |
8700 | .readpage = btrfs_readpage, |
8701 | .writepage = btrfs_writepage, | |
b293f02e | 8702 | .writepages = btrfs_writepages, |
3ab2fb5a | 8703 | .readpages = btrfs_readpages, |
16432985 | 8704 | .direct_IO = btrfs_direct_IO, |
a52d9a80 CM |
8705 | .invalidatepage = btrfs_invalidatepage, |
8706 | .releasepage = btrfs_releasepage, | |
e6dcd2dc | 8707 | .set_page_dirty = btrfs_set_page_dirty, |
465fdd97 | 8708 | .error_remove_page = generic_error_remove_page, |
39279cc3 CM |
8709 | }; |
8710 | ||
7f09410b | 8711 | static const struct address_space_operations btrfs_symlink_aops = { |
39279cc3 CM |
8712 | .readpage = btrfs_readpage, |
8713 | .writepage = btrfs_writepage, | |
2bf5a725 CM |
8714 | .invalidatepage = btrfs_invalidatepage, |
8715 | .releasepage = btrfs_releasepage, | |
39279cc3 CM |
8716 | }; |
8717 | ||
6e1d5dcc | 8718 | static const struct inode_operations btrfs_file_inode_operations = { |
39279cc3 CM |
8719 | .getattr = btrfs_getattr, |
8720 | .setattr = btrfs_setattr, | |
95819c05 CH |
8721 | .setxattr = btrfs_setxattr, |
8722 | .getxattr = btrfs_getxattr, | |
5103e947 | 8723 | .listxattr = btrfs_listxattr, |
95819c05 | 8724 | .removexattr = btrfs_removexattr, |
fdebe2bd | 8725 | .permission = btrfs_permission, |
1506fcc8 | 8726 | .fiemap = btrfs_fiemap, |
4e34e719 | 8727 | .get_acl = btrfs_get_acl, |
e41f941a | 8728 | .update_time = btrfs_update_time, |
39279cc3 | 8729 | }; |
6e1d5dcc | 8730 | static const struct inode_operations btrfs_special_inode_operations = { |
618e21d5 JB |
8731 | .getattr = btrfs_getattr, |
8732 | .setattr = btrfs_setattr, | |
fdebe2bd | 8733 | .permission = btrfs_permission, |
95819c05 CH |
8734 | .setxattr = btrfs_setxattr, |
8735 | .getxattr = btrfs_getxattr, | |
33268eaf | 8736 | .listxattr = btrfs_listxattr, |
95819c05 | 8737 | .removexattr = btrfs_removexattr, |
4e34e719 | 8738 | .get_acl = btrfs_get_acl, |
e41f941a | 8739 | .update_time = btrfs_update_time, |
618e21d5 | 8740 | }; |
6e1d5dcc | 8741 | static const struct inode_operations btrfs_symlink_inode_operations = { |
39279cc3 CM |
8742 | .readlink = generic_readlink, |
8743 | .follow_link = page_follow_link_light, | |
8744 | .put_link = page_put_link, | |
f209561a | 8745 | .getattr = btrfs_getattr, |
22c44fe6 | 8746 | .setattr = btrfs_setattr, |
fdebe2bd | 8747 | .permission = btrfs_permission, |
0279b4cd JO |
8748 | .setxattr = btrfs_setxattr, |
8749 | .getxattr = btrfs_getxattr, | |
8750 | .listxattr = btrfs_listxattr, | |
8751 | .removexattr = btrfs_removexattr, | |
4e34e719 | 8752 | .get_acl = btrfs_get_acl, |
e41f941a | 8753 | .update_time = btrfs_update_time, |
39279cc3 | 8754 | }; |
76dda93c | 8755 | |
82d339d9 | 8756 | const struct dentry_operations btrfs_dentry_operations = { |
76dda93c | 8757 | .d_delete = btrfs_dentry_delete, |
b4aff1f8 | 8758 | .d_release = btrfs_dentry_release, |
76dda93c | 8759 | }; |