]> Git Repo - linux.git/blob - fs/bcachefs/fs-io.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / fs / bcachefs / fs-io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "clock.h"
10 #include "error.h"
11 #include "extents.h"
12 #include "extent_update.h"
13 #include "fs.h"
14 #include "fs-io.h"
15 #include "fs-io-buffered.h"
16 #include "fs-io-pagecache.h"
17 #include "fsck.h"
18 #include "inode.h"
19 #include "journal.h"
20 #include "io_misc.h"
21 #include "keylist.h"
22 #include "quota.h"
23 #include "reflink.h"
24 #include "trace.h"
25
26 #include <linux/aio.h>
27 #include <linux/backing-dev.h>
28 #include <linux/falloc.h>
29 #include <linux/migrate.h>
30 #include <linux/mmu_context.h>
31 #include <linux/pagevec.h>
32 #include <linux/rmap.h>
33 #include <linux/sched/signal.h>
34 #include <linux/task_io_accounting_ops.h>
35 #include <linux/uio.h>
36
37 #include <trace/events/writeback.h>
38
39 struct nocow_flush {
40         struct closure  *cl;
41         struct bch_dev  *ca;
42         struct bio      bio;
43 };
44
45 static void nocow_flush_endio(struct bio *_bio)
46 {
47
48         struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
49
50         closure_put(bio->cl);
51         percpu_ref_put(&bio->ca->io_ref);
52         bio_put(&bio->bio);
53 }
54
55 void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
56                                          struct bch_inode_info *inode,
57                                          struct closure *cl)
58 {
59         struct nocow_flush *bio;
60         struct bch_dev *ca;
61         struct bch_devs_mask devs;
62         unsigned dev;
63
64         dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
65         if (dev == BCH_SB_MEMBERS_MAX)
66                 return;
67
68         devs = inode->ei_devs_need_flush;
69         memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
70
71         for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
72                 rcu_read_lock();
73                 ca = rcu_dereference(c->devs[dev]);
74                 if (ca && !percpu_ref_tryget(&ca->io_ref))
75                         ca = NULL;
76                 rcu_read_unlock();
77
78                 if (!ca)
79                         continue;
80
81                 bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
82                                                     REQ_OP_WRITE|REQ_PREFLUSH,
83                                                     GFP_KERNEL,
84                                                     &c->nocow_flush_bioset),
85                                    struct nocow_flush, bio);
86                 bio->cl                 = cl;
87                 bio->ca                 = ca;
88                 bio->bio.bi_end_io      = nocow_flush_endio;
89                 closure_bio_submit(&bio->bio, cl);
90         }
91 }
92
93 static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
94                                          struct bch_inode_info *inode)
95 {
96         struct closure cl;
97
98         closure_init_stack(&cl);
99         bch2_inode_flush_nocow_writes_async(c, inode, &cl);
100         closure_sync(&cl);
101
102         return 0;
103 }
104
105 /* i_size updates: */
106
107 struct inode_new_size {
108         loff_t          new_size;
109         u64             now;
110         unsigned        fields;
111 };
112
113 static int inode_set_size(struct btree_trans *trans,
114                           struct bch_inode_info *inode,
115                           struct bch_inode_unpacked *bi,
116                           void *p)
117 {
118         struct inode_new_size *s = p;
119
120         bi->bi_size = s->new_size;
121         if (s->fields & ATTR_ATIME)
122                 bi->bi_atime = s->now;
123         if (s->fields & ATTR_MTIME)
124                 bi->bi_mtime = s->now;
125         if (s->fields & ATTR_CTIME)
126                 bi->bi_ctime = s->now;
127
128         return 0;
129 }
130
131 int __must_check bch2_write_inode_size(struct bch_fs *c,
132                                        struct bch_inode_info *inode,
133                                        loff_t new_size, unsigned fields)
134 {
135         struct inode_new_size s = {
136                 .new_size       = new_size,
137                 .now            = bch2_current_time(c),
138                 .fields         = fields,
139         };
140
141         return bch2_write_inode(c, inode, inode_set_size, &s, fields);
142 }
143
144 void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
145                            struct quota_res *quota_res, s64 sectors)
146 {
147         bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
148                                 "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
149                                 inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
150                                 inode->ei_inode.bi_sectors);
151         inode->v.i_blocks += sectors;
152
153 #ifdef CONFIG_BCACHEFS_QUOTA
154         if (quota_res &&
155             !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
156             sectors > 0) {
157                 BUG_ON(sectors > quota_res->sectors);
158                 BUG_ON(sectors > inode->ei_quota_reserved);
159
160                 quota_res->sectors -= sectors;
161                 inode->ei_quota_reserved -= sectors;
162         } else {
163                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
164         }
165 #endif
166 }
167
168 /* fsync: */
169
170 /*
171  * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
172  * insert trigger: look up the btree inode instead
173  */
174 static int bch2_flush_inode(struct bch_fs *c,
175                             struct bch_inode_info *inode)
176 {
177         if (c->opts.journal_flush_disabled)
178                 return 0;
179
180         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync))
181                 return -EROFS;
182
183         struct bch_inode_unpacked u;
184         int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?:
185                   bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
186                   bch2_inode_flush_nocow_writes(c, inode);
187         bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
188         return ret;
189 }
190
191 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
192 {
193         struct bch_inode_info *inode = file_bch_inode(file);
194         struct bch_fs *c = inode->v.i_sb->s_fs_info;
195         int ret;
196
197         ret = file_write_and_wait_range(file, start, end);
198         if (ret)
199                 goto out;
200         ret = sync_inode_metadata(&inode->v, 1);
201         if (ret)
202                 goto out;
203         ret = bch2_flush_inode(c, inode);
204 out:
205         ret = bch2_err_class(ret);
206         if (ret == -EROFS)
207                 ret = -EIO;
208         return ret;
209 }
210
211 /* truncate: */
212
213 static inline int range_has_data(struct bch_fs *c, u32 subvol,
214                                  struct bpos start,
215                                  struct bpos end)
216 {
217         struct btree_trans *trans = bch2_trans_get(c);
218         struct btree_iter iter;
219         struct bkey_s_c k;
220         int ret = 0;
221 retry:
222         bch2_trans_begin(trans);
223
224         ret = bch2_subvolume_get_snapshot(trans, subvol, &start.snapshot);
225         if (ret)
226                 goto err;
227
228         for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
229                 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
230                         ret = 1;
231                         break;
232                 }
233         start = iter.pos;
234         bch2_trans_iter_exit(trans, &iter);
235 err:
236         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
237                 goto retry;
238
239         bch2_trans_put(trans);
240         return ret;
241 }
242
243 static int __bch2_truncate_folio(struct bch_inode_info *inode,
244                                  pgoff_t index, loff_t start, loff_t end)
245 {
246         struct bch_fs *c = inode->v.i_sb->s_fs_info;
247         struct address_space *mapping = inode->v.i_mapping;
248         struct bch_folio *s;
249         unsigned start_offset;
250         unsigned end_offset;
251         unsigned i;
252         struct folio *folio;
253         s64 i_sectors_delta = 0;
254         int ret = 0;
255         u64 end_pos;
256
257         folio = filemap_lock_folio(mapping, index);
258         if (IS_ERR_OR_NULL(folio)) {
259                 /*
260                  * XXX: we're doing two index lookups when we end up reading the
261                  * folio
262                  */
263                 ret = range_has_data(c, inode->ei_subvol,
264                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
265                                 POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
266                 if (ret <= 0)
267                         return ret;
268
269                 folio = __filemap_get_folio(mapping, index,
270                                             FGP_LOCK|FGP_CREAT, GFP_KERNEL);
271                 if (IS_ERR_OR_NULL(folio)) {
272                         ret = -ENOMEM;
273                         goto out;
274                 }
275         }
276
277         BUG_ON(start    >= folio_end_pos(folio));
278         BUG_ON(end      <= folio_pos(folio));
279
280         start_offset    = max(start, folio_pos(folio)) - folio_pos(folio);
281         end_offset      = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
282
283         /* Folio boundary? Nothing to do */
284         if (start_offset == 0 &&
285             end_offset == folio_size(folio)) {
286                 ret = 0;
287                 goto unlock;
288         }
289
290         s = bch2_folio_create(folio, 0);
291         if (!s) {
292                 ret = -ENOMEM;
293                 goto unlock;
294         }
295
296         if (!folio_test_uptodate(folio)) {
297                 ret = bch2_read_single_folio(folio, mapping);
298                 if (ret)
299                         goto unlock;
300         }
301
302         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
303         if (ret)
304                 goto unlock;
305
306         for (i = round_up(start_offset, block_bytes(c)) >> 9;
307              i < round_down(end_offset, block_bytes(c)) >> 9;
308              i++) {
309                 s->s[i].nr_replicas     = 0;
310
311                 i_sectors_delta -= s->s[i].state == SECTOR_dirty;
312                 bch2_folio_sector_set(folio, s, i, SECTOR_unallocated);
313         }
314
315         bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
316
317         /*
318          * Caller needs to know whether this folio will be written out by
319          * writeback - doing an i_size update if necessary - or whether it will
320          * be responsible for the i_size update.
321          *
322          * Note that we shouldn't ever see a folio beyond EOF, but check and
323          * warn if so. This has been observed by failure to clean up folios
324          * after a short write and there's still a chance reclaim will fix
325          * things up.
326          */
327         WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
328         end_pos = folio_end_pos(folio);
329         if (inode->v.i_size > folio_pos(folio))
330                 end_pos = min_t(u64, inode->v.i_size, end_pos);
331         ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
332
333         folio_zero_segment(folio, start_offset, end_offset);
334
335         /*
336          * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
337          *
338          * XXX: because we aren't currently tracking whether the folio has actual
339          * data in it (vs. just 0s, or only partially written) this wrong. ick.
340          */
341         BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
342
343         /*
344          * This removes any writeable userspace mappings; we need to force
345          * .page_mkwrite to be called again before any mmapped writes, to
346          * redirty the full page:
347          */
348         folio_mkclean(folio);
349         filemap_dirty_folio(mapping, folio);
350 unlock:
351         folio_unlock(folio);
352         folio_put(folio);
353 out:
354         return ret;
355 }
356
357 static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
358 {
359         return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
360                                      from, ANYSINT_MAX(loff_t));
361 }
362
363 static int bch2_truncate_folios(struct bch_inode_info *inode,
364                                 loff_t start, loff_t end)
365 {
366         int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
367                                         start, end);
368
369         if (ret >= 0 &&
370             start >> PAGE_SHIFT != end >> PAGE_SHIFT)
371                 ret = __bch2_truncate_folio(inode,
372                                         (end - 1) >> PAGE_SHIFT,
373                                         start, end);
374         return ret;
375 }
376
377 static int bch2_extend(struct mnt_idmap *idmap,
378                        struct bch_inode_info *inode,
379                        struct bch_inode_unpacked *inode_u,
380                        struct iattr *iattr)
381 {
382         struct address_space *mapping = inode->v.i_mapping;
383         int ret;
384
385         /*
386          * sync appends:
387          *
388          * this has to be done _before_ extending i_size:
389          */
390         ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
391         if (ret)
392                 return ret;
393
394         truncate_setsize(&inode->v, iattr->ia_size);
395
396         return bch2_setattr_nonsize(idmap, inode, iattr);
397 }
398
399 int bchfs_truncate(struct mnt_idmap *idmap,
400                   struct bch_inode_info *inode, struct iattr *iattr)
401 {
402         struct bch_fs *c = inode->v.i_sb->s_fs_info;
403         struct address_space *mapping = inode->v.i_mapping;
404         struct bch_inode_unpacked inode_u;
405         s64 i_sectors_delta = 0;
406         int ret = 0;
407
408         /*
409          * If the truncate call with change the size of the file, the
410          * cmtimes should be updated. If the size will not change, we
411          * do not need to update the cmtimes.
412          */
413         if (iattr->ia_size != inode->v.i_size) {
414                 if (!(iattr->ia_valid & ATTR_MTIME))
415                         ktime_get_coarse_real_ts64(&iattr->ia_mtime);
416                 if (!(iattr->ia_valid & ATTR_CTIME))
417                         ktime_get_coarse_real_ts64(&iattr->ia_ctime);
418                 iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
419         }
420
421         inode_dio_wait(&inode->v);
422         bch2_pagecache_block_get(inode);
423
424         ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
425         if (ret)
426                 goto err;
427
428         /*
429          * check this before next assertion; on filesystem error our normal
430          * invariants are a bit broken (truncate has to truncate the page cache
431          * before the inode).
432          */
433         ret = bch2_journal_error(&c->journal);
434         if (ret)
435                 goto err;
436
437         WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
438                   inode->v.i_size < inode_u.bi_size,
439                   "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
440                   (u64) inode->v.i_size, inode_u.bi_size);
441
442         if (iattr->ia_size > inode->v.i_size) {
443                 ret = bch2_extend(idmap, inode, &inode_u, iattr);
444                 goto err;
445         }
446
447         iattr->ia_valid &= ~ATTR_SIZE;
448
449         ret = bch2_truncate_folio(inode, iattr->ia_size);
450         if (unlikely(ret < 0))
451                 goto err;
452
453         truncate_setsize(&inode->v, iattr->ia_size);
454
455         /*
456          * When extending, we're going to write the new i_size to disk
457          * immediately so we need to flush anything above the current on disk
458          * i_size first:
459          *
460          * Also, when extending we need to flush the page that i_size currently
461          * straddles - if it's mapped to userspace, we need to ensure that
462          * userspace has to redirty it and call .mkwrite -> set_page_dirty
463          * again to allocate the part of the page that was extended.
464          */
465         if (iattr->ia_size > inode_u.bi_size)
466                 ret = filemap_write_and_wait_range(mapping,
467                                 inode_u.bi_size,
468                                 iattr->ia_size - 1);
469         else if (iattr->ia_size & (PAGE_SIZE - 1))
470                 ret = filemap_write_and_wait_range(mapping,
471                                 round_down(iattr->ia_size, PAGE_SIZE),
472                                 iattr->ia_size - 1);
473         if (ret)
474                 goto err;
475
476         ret = bch2_truncate(c, inode_inum(inode), iattr->ia_size, &i_sectors_delta);
477         bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
478
479         if (unlikely(ret)) {
480                 /*
481                  * If we error here, VFS caches are now inconsistent with btree
482                  */
483                 set_bit(EI_INODE_ERROR, &inode->ei_flags);
484                 goto err;
485         }
486
487         bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
488                                 !bch2_journal_error(&c->journal), c,
489                                 "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
490                                 inode->v.i_ino, (u64) inode->v.i_blocks,
491                                 inode->ei_inode.bi_sectors);
492
493         ret = bch2_setattr_nonsize(idmap, inode, iattr);
494 err:
495         bch2_pagecache_block_put(inode);
496         return bch2_err_class(ret);
497 }
498
499 /* fallocate: */
500
501 static int inode_update_times_fn(struct btree_trans *trans,
502                                  struct bch_inode_info *inode,
503                                  struct bch_inode_unpacked *bi, void *p)
504 {
505         struct bch_fs *c = inode->v.i_sb->s_fs_info;
506
507         bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
508         return 0;
509 }
510
511 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
512 {
513         struct bch_fs *c = inode->v.i_sb->s_fs_info;
514         u64 end         = offset + len;
515         u64 block_start = round_up(offset, block_bytes(c));
516         u64 block_end   = round_down(end, block_bytes(c));
517         bool truncated_last_page;
518         int ret = 0;
519
520         ret = bch2_truncate_folios(inode, offset, end);
521         if (unlikely(ret < 0))
522                 goto err;
523
524         truncated_last_page = ret;
525
526         truncate_pagecache_range(&inode->v, offset, end - 1);
527
528         if (block_start < block_end) {
529                 s64 i_sectors_delta = 0;
530
531                 ret = bch2_fpunch(c, inode_inum(inode),
532                                   block_start >> 9, block_end >> 9,
533                                   &i_sectors_delta);
534                 bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
535         }
536
537         mutex_lock(&inode->ei_update_lock);
538         if (end >= inode->v.i_size && !truncated_last_page) {
539                 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
540                                             ATTR_MTIME|ATTR_CTIME);
541         } else {
542                 ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
543                                        ATTR_MTIME|ATTR_CTIME);
544         }
545         mutex_unlock(&inode->ei_update_lock);
546 err:
547         return ret;
548 }
549
550 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
551                                    loff_t offset, loff_t len,
552                                    bool insert)
553 {
554         struct bch_fs *c = inode->v.i_sb->s_fs_info;
555         struct address_space *mapping = inode->v.i_mapping;
556         s64 i_sectors_delta = 0;
557         int ret = 0;
558
559         if ((offset | len) & (block_bytes(c) - 1))
560                 return -EINVAL;
561
562         if (insert) {
563                 if (offset >= inode->v.i_size)
564                         return -EINVAL;
565         } else {
566                 if (offset + len >= inode->v.i_size)
567                         return -EINVAL;
568         }
569
570         ret = bch2_write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
571         if (ret)
572                 return ret;
573
574         if (insert)
575                 i_size_write(&inode->v, inode->v.i_size + len);
576
577         ret = bch2_fcollapse_finsert(c, inode_inum(inode), offset >> 9, len >> 9,
578                                      insert, &i_sectors_delta);
579         if (!ret && !insert)
580                 i_size_write(&inode->v, inode->v.i_size - len);
581         bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
582
583         return ret;
584 }
585
586 static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
587                              u64 start_sector, u64 end_sector)
588 {
589         struct bch_fs *c = inode->v.i_sb->s_fs_info;
590         struct btree_trans *trans = bch2_trans_get(c);
591         struct btree_iter iter;
592         struct bpos end_pos = POS(inode->v.i_ino, end_sector);
593         struct bch_io_opts opts;
594         int ret = 0;
595
596         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
597
598         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
599                         POS(inode->v.i_ino, start_sector),
600                         BTREE_ITER_slots|BTREE_ITER_intent);
601
602         while (!ret && bkey_lt(iter.pos, end_pos)) {
603                 s64 i_sectors_delta = 0;
604                 struct quota_res quota_res = { 0 };
605                 struct bkey_s_c k;
606                 unsigned sectors;
607                 bool is_allocation;
608                 u64 hole_start, hole_end;
609                 u32 snapshot;
610
611                 bch2_trans_begin(trans);
612
613                 ret = bch2_subvolume_get_snapshot(trans,
614                                         inode->ei_subvol, &snapshot);
615                 if (ret)
616                         goto bkey_err;
617
618                 bch2_btree_iter_set_snapshot(&iter, snapshot);
619
620                 k = bch2_btree_iter_peek_slot(&iter);
621                 if ((ret = bkey_err(k)))
622                         goto bkey_err;
623
624                 hole_start      = iter.pos.offset;
625                 hole_end        = bpos_min(k.k->p, end_pos).offset;
626                 is_allocation   = bkey_extent_is_allocation(k.k);
627
628                 /* already reserved */
629                 if (bkey_extent_is_reservation(k) &&
630                     bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
631                         bch2_btree_iter_advance(&iter);
632                         continue;
633                 }
634
635                 if (bkey_extent_is_data(k.k) &&
636                     !(mode & FALLOC_FL_ZERO_RANGE)) {
637                         bch2_btree_iter_advance(&iter);
638                         continue;
639                 }
640
641                 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
642                         /*
643                          * Lock ordering - can't be holding btree locks while
644                          * blocking on a folio lock:
645                          */
646                         if (bch2_clamp_data_hole(&inode->v,
647                                                  &hole_start,
648                                                  &hole_end,
649                                                  opts.data_replicas, true))
650                                 ret = drop_locks_do(trans,
651                                         (bch2_clamp_data_hole(&inode->v,
652                                                               &hole_start,
653                                                               &hole_end,
654                                                               opts.data_replicas, false), 0));
655                         bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
656
657                         if (ret)
658                                 goto bkey_err;
659
660                         if (hole_start == hole_end)
661                                 continue;
662                 }
663
664                 sectors = hole_end - hole_start;
665
666                 if (!is_allocation) {
667                         ret = bch2_quota_reservation_add(c, inode,
668                                         &quota_res, sectors, true);
669                         if (unlikely(ret))
670                                 goto bkey_err;
671                 }
672
673                 ret = bch2_extent_fallocate(trans, inode_inum(inode), &iter,
674                                             sectors, opts, &i_sectors_delta,
675                                             writepoint_hashed((unsigned long) current));
676                 if (ret)
677                         goto bkey_err;
678
679                 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
680
681                 if (bch2_mark_pagecache_reserved(inode, &hole_start,
682                                                  iter.pos.offset, true))
683                         drop_locks_do(trans,
684                                 bch2_mark_pagecache_reserved(inode, &hole_start,
685                                                              iter.pos.offset, false));
686 bkey_err:
687                 bch2_quota_reservation_put(c, inode, &quota_res);
688                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
689                         ret = 0;
690         }
691
692         if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
693                 struct quota_res quota_res = { 0 };
694                 s64 i_sectors_delta = 0;
695
696                 bch2_fpunch_at(trans, &iter, inode_inum(inode),
697                                end_sector, &i_sectors_delta);
698                 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
699                 bch2_quota_reservation_put(c, inode, &quota_res);
700         }
701
702         bch2_trans_iter_exit(trans, &iter);
703         bch2_trans_put(trans);
704         return ret;
705 }
706
707 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
708                             loff_t offset, loff_t len)
709 {
710         struct bch_fs *c = inode->v.i_sb->s_fs_info;
711         u64 end         = offset + len;
712         u64 block_start = round_down(offset,    block_bytes(c));
713         u64 block_end   = round_up(end,         block_bytes(c));
714         bool truncated_last_page = false;
715         int ret, ret2 = 0;
716
717         if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
718                 ret = inode_newsize_ok(&inode->v, end);
719                 if (ret)
720                         return ret;
721         }
722
723         if (mode & FALLOC_FL_ZERO_RANGE) {
724                 ret = bch2_truncate_folios(inode, offset, end);
725                 if (unlikely(ret < 0))
726                         return ret;
727
728                 truncated_last_page = ret;
729
730                 truncate_pagecache_range(&inode->v, offset, end - 1);
731
732                 block_start     = round_up(offset,      block_bytes(c));
733                 block_end       = round_down(end,       block_bytes(c));
734         }
735
736         ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
737
738         /*
739          * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
740          * so that the VFS cache i_size is consistent with the btree i_size:
741          */
742         if (ret &&
743             !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
744                 return ret;
745
746         if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
747                 end = inode->v.i_size;
748
749         if (end >= inode->v.i_size &&
750             (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
751              !(mode & FALLOC_FL_KEEP_SIZE))) {
752                 spin_lock(&inode->v.i_lock);
753                 i_size_write(&inode->v, end);
754                 spin_unlock(&inode->v.i_lock);
755
756                 mutex_lock(&inode->ei_update_lock);
757                 ret2 = bch2_write_inode_size(c, inode, end, 0);
758                 mutex_unlock(&inode->ei_update_lock);
759         }
760
761         return ret ?: ret2;
762 }
763
764 long bch2_fallocate_dispatch(struct file *file, int mode,
765                              loff_t offset, loff_t len)
766 {
767         struct bch_inode_info *inode = file_bch_inode(file);
768         struct bch_fs *c = inode->v.i_sb->s_fs_info;
769         long ret;
770
771         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
772                 return -EROFS;
773
774         inode_lock(&inode->v);
775         inode_dio_wait(&inode->v);
776         bch2_pagecache_block_get(inode);
777
778         ret = file_modified(file);
779         if (ret)
780                 goto err;
781
782         if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
783                 ret = bchfs_fallocate(inode, mode, offset, len);
784         else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
785                 ret = bchfs_fpunch(inode, offset, len);
786         else if (mode == FALLOC_FL_INSERT_RANGE)
787                 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
788         else if (mode == FALLOC_FL_COLLAPSE_RANGE)
789                 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
790         else
791                 ret = -EOPNOTSUPP;
792 err:
793         bch2_pagecache_block_put(inode);
794         inode_unlock(&inode->v);
795         bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
796
797         return bch2_err_class(ret);
798 }
799
800 /*
801  * Take a quota reservation for unallocated blocks in a given file range
802  * Does not check pagecache
803  */
804 static int quota_reserve_range(struct bch_inode_info *inode,
805                                struct quota_res *res,
806                                u64 start, u64 end)
807 {
808         struct bch_fs *c = inode->v.i_sb->s_fs_info;
809         struct btree_trans *trans = bch2_trans_get(c);
810         struct btree_iter iter;
811         struct bkey_s_c k;
812         u32 snapshot;
813         u64 sectors = end - start;
814         u64 pos = start;
815         int ret;
816 retry:
817         bch2_trans_begin(trans);
818
819         ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
820         if (ret)
821                 goto err;
822
823         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
824                              SPOS(inode->v.i_ino, pos, snapshot), 0);
825
826         while (!(ret = btree_trans_too_many_iters(trans)) &&
827                (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
828                !(ret = bkey_err(k))) {
829                 if (bkey_extent_is_allocation(k.k)) {
830                         u64 s = min(end, k.k->p.offset) -
831                                 max(start, bkey_start_offset(k.k));
832                         BUG_ON(s > sectors);
833                         sectors -= s;
834                 }
835                 bch2_btree_iter_advance(&iter);
836         }
837         pos = iter.pos.offset;
838         bch2_trans_iter_exit(trans, &iter);
839 err:
840         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
841                 goto retry;
842
843         bch2_trans_put(trans);
844
845         return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true);
846 }
847
848 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
849                              struct file *file_dst, loff_t pos_dst,
850                              loff_t len, unsigned remap_flags)
851 {
852         struct bch_inode_info *src = file_bch_inode(file_src);
853         struct bch_inode_info *dst = file_bch_inode(file_dst);
854         struct bch_fs *c = src->v.i_sb->s_fs_info;
855         struct quota_res quota_res = { 0 };
856         s64 i_sectors_delta = 0;
857         u64 aligned_len;
858         loff_t ret = 0;
859
860         if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
861                 return -EINVAL;
862
863         if (remap_flags & REMAP_FILE_DEDUP)
864                 return -EOPNOTSUPP;
865
866         if ((pos_src & (block_bytes(c) - 1)) ||
867             (pos_dst & (block_bytes(c) - 1)))
868                 return -EINVAL;
869
870         if (src == dst &&
871             abs(pos_src - pos_dst) < len)
872                 return -EINVAL;
873
874         lock_two_nondirectories(&src->v, &dst->v);
875         bch2_lock_inodes(INODE_PAGECACHE_BLOCK, src, dst);
876
877         inode_dio_wait(&src->v);
878         inode_dio_wait(&dst->v);
879
880         ret = generic_remap_file_range_prep(file_src, pos_src,
881                                             file_dst, pos_dst,
882                                             &len, remap_flags);
883         if (ret < 0 || len == 0)
884                 goto err;
885
886         aligned_len = round_up((u64) len, block_bytes(c));
887
888         ret = bch2_write_invalidate_inode_pages_range(dst->v.i_mapping,
889                                 pos_dst, pos_dst + len - 1);
890         if (ret)
891                 goto err;
892
893         ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
894                                   (pos_dst + aligned_len) >> 9);
895         if (ret)
896                 goto err;
897
898         file_update_time(file_dst);
899
900         bch2_mark_pagecache_unallocated(src, pos_src >> 9,
901                                    (pos_src + aligned_len) >> 9);
902
903         ret = bch2_remap_range(c,
904                                inode_inum(dst), pos_dst >> 9,
905                                inode_inum(src), pos_src >> 9,
906                                aligned_len >> 9,
907                                pos_dst + len, &i_sectors_delta);
908         if (ret < 0)
909                 goto err;
910
911         /*
912          * due to alignment, we might have remapped slightly more than requsted
913          */
914         ret = min((u64) ret << 9, (u64) len);
915
916         bch2_i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
917
918         spin_lock(&dst->v.i_lock);
919         if (pos_dst + ret > dst->v.i_size)
920                 i_size_write(&dst->v, pos_dst + ret);
921         spin_unlock(&dst->v.i_lock);
922
923         if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
924             IS_SYNC(file_inode(file_dst)))
925                 ret = bch2_flush_inode(c, dst);
926 err:
927         bch2_quota_reservation_put(c, dst, &quota_res);
928         bch2_unlock_inodes(INODE_PAGECACHE_BLOCK, src, dst);
929         unlock_two_nondirectories(&src->v, &dst->v);
930
931         return bch2_err_class(ret);
932 }
933
934 /* fseek: */
935
936 static loff_t bch2_seek_data(struct file *file, u64 offset)
937 {
938         struct bch_inode_info *inode = file_bch_inode(file);
939         struct bch_fs *c = inode->v.i_sb->s_fs_info;
940         struct btree_trans *trans;
941         struct btree_iter iter;
942         struct bkey_s_c k;
943         subvol_inum inum = inode_inum(inode);
944         u64 isize, next_data = MAX_LFS_FILESIZE;
945         u32 snapshot;
946         int ret;
947
948         isize = i_size_read(&inode->v);
949         if (offset >= isize)
950                 return -ENXIO;
951
952         trans = bch2_trans_get(c);
953 retry:
954         bch2_trans_begin(trans);
955
956         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
957         if (ret)
958                 goto err;
959
960         for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents,
961                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
962                            POS(inode->v.i_ino, U64_MAX),
963                            0, k, ret) {
964                 if (bkey_extent_is_data(k.k)) {
965                         next_data = max(offset, bkey_start_offset(k.k) << 9);
966                         break;
967                 } else if (k.k->p.offset >> 9 > isize)
968                         break;
969         }
970         bch2_trans_iter_exit(trans, &iter);
971 err:
972         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
973                 goto retry;
974
975         bch2_trans_put(trans);
976         if (ret)
977                 return ret;
978
979         if (next_data > offset)
980                 next_data = bch2_seek_pagecache_data(&inode->v,
981                                         offset, next_data, 0, false);
982
983         if (next_data >= isize)
984                 return -ENXIO;
985
986         return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
987 }
988
989 static loff_t bch2_seek_hole(struct file *file, u64 offset)
990 {
991         struct bch_inode_info *inode = file_bch_inode(file);
992         struct bch_fs *c = inode->v.i_sb->s_fs_info;
993         struct btree_trans *trans;
994         struct btree_iter iter;
995         struct bkey_s_c k;
996         subvol_inum inum = inode_inum(inode);
997         u64 isize, next_hole = MAX_LFS_FILESIZE;
998         u32 snapshot;
999         int ret;
1000
1001         isize = i_size_read(&inode->v);
1002         if (offset >= isize)
1003                 return -ENXIO;
1004
1005         trans = bch2_trans_get(c);
1006 retry:
1007         bch2_trans_begin(trans);
1008
1009         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1010         if (ret)
1011                 goto err;
1012
1013         for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
1014                            SPOS(inode->v.i_ino, offset >> 9, snapshot),
1015                            BTREE_ITER_slots, k, ret) {
1016                 if (k.k->p.inode != inode->v.i_ino) {
1017                         next_hole = bch2_seek_pagecache_hole(&inode->v,
1018                                         offset, MAX_LFS_FILESIZE, 0, false);
1019                         break;
1020                 } else if (!bkey_extent_is_data(k.k)) {
1021                         next_hole = bch2_seek_pagecache_hole(&inode->v,
1022                                         max(offset, bkey_start_offset(k.k) << 9),
1023                                         k.k->p.offset << 9, 0, false);
1024
1025                         if (next_hole < k.k->p.offset << 9)
1026                                 break;
1027                 } else {
1028                         offset = max(offset, bkey_start_offset(k.k) << 9);
1029                 }
1030         }
1031         bch2_trans_iter_exit(trans, &iter);
1032 err:
1033         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1034                 goto retry;
1035
1036         bch2_trans_put(trans);
1037         if (ret)
1038                 return ret;
1039
1040         if (next_hole > isize)
1041                 next_hole = isize;
1042
1043         return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
1044 }
1045
1046 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
1047 {
1048         loff_t ret;
1049
1050         switch (whence) {
1051         case SEEK_SET:
1052         case SEEK_CUR:
1053         case SEEK_END:
1054                 ret = generic_file_llseek(file, offset, whence);
1055                 break;
1056         case SEEK_DATA:
1057                 ret = bch2_seek_data(file, offset);
1058                 break;
1059         case SEEK_HOLE:
1060                 ret = bch2_seek_hole(file, offset);
1061                 break;
1062         default:
1063                 ret = -EINVAL;
1064                 break;
1065         }
1066
1067         return bch2_err_class(ret);
1068 }
1069
1070 void bch2_fs_fsio_exit(struct bch_fs *c)
1071 {
1072         bioset_exit(&c->nocow_flush_bioset);
1073 }
1074
1075 int bch2_fs_fsio_init(struct bch_fs *c)
1076 {
1077         if (bioset_init(&c->nocow_flush_bioset,
1078                         1, offsetof(struct nocow_flush, bio), 0))
1079                 return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
1080
1081         return 0;
1082 }
1083
1084 #endif /* NO_BCACHEFS_FS */
This page took 0.094729 seconds and 4 git commands to generate.