]> Git Repo - J-linux.git/blob - fs/xfs/xfs_iomap.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / xfs / xfs_iomap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2016-2018 Christoph Hellwig.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap_btree.h"
17 #include "xfs_bmap.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
21 #include "xfs_trans.h"
22 #include "xfs_trans_space.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_iomap.h"
25 #include "xfs_trace.h"
26 #include "xfs_quota.h"
27 #include "xfs_rtgroup.h"
28 #include "xfs_dquot_item.h"
29 #include "xfs_dquot.h"
30 #include "xfs_reflink.h"
31 #include "xfs_health.h"
32 #include "xfs_rtbitmap.h"
33
34 #define XFS_ALLOC_ALIGN(mp, off) \
35         (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
36
37 static int
38 xfs_alert_fsblock_zero(
39         xfs_inode_t     *ip,
40         xfs_bmbt_irec_t *imap)
41 {
42         xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
43                         "Access to block zero in inode %llu "
44                         "start_block: %llx start_off: %llx "
45                         "blkcnt: %llx extent-state: %x",
46                 (unsigned long long)ip->i_ino,
47                 (unsigned long long)imap->br_startblock,
48                 (unsigned long long)imap->br_startoff,
49                 (unsigned long long)imap->br_blockcount,
50                 imap->br_state);
51         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
52         return -EFSCORRUPTED;
53 }
54
55 u64
56 xfs_iomap_inode_sequence(
57         struct xfs_inode        *ip,
58         u16                     iomap_flags)
59 {
60         u64                     cookie = 0;
61
62         if (iomap_flags & IOMAP_F_XATTR)
63                 return READ_ONCE(ip->i_af.if_seq);
64         if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
65                 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
66         return cookie | READ_ONCE(ip->i_df.if_seq);
67 }
68
69 /*
70  * Check that the iomap passed to us is still valid for the given offset and
71  * length.
72  */
73 static bool
74 xfs_iomap_valid(
75         struct inode            *inode,
76         const struct iomap      *iomap)
77 {
78         struct xfs_inode        *ip = XFS_I(inode);
79
80         if (iomap->validity_cookie !=
81                         xfs_iomap_inode_sequence(ip, iomap->flags)) {
82                 trace_xfs_iomap_invalid(ip, iomap);
83                 return false;
84         }
85
86         XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
87         return true;
88 }
89
90 static const struct iomap_folio_ops xfs_iomap_folio_ops = {
91         .iomap_valid            = xfs_iomap_valid,
92 };
93
94 int
95 xfs_bmbt_to_iomap(
96         struct xfs_inode        *ip,
97         struct iomap            *iomap,
98         struct xfs_bmbt_irec    *imap,
99         unsigned int            mapping_flags,
100         u16                     iomap_flags,
101         u64                     sequence_cookie)
102 {
103         struct xfs_mount        *mp = ip->i_mount;
104         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
105
106         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
107                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
108                 return xfs_alert_fsblock_zero(ip, imap);
109         }
110
111         if (imap->br_startblock == HOLESTARTBLOCK) {
112                 iomap->addr = IOMAP_NULL_ADDR;
113                 iomap->type = IOMAP_HOLE;
114         } else if (imap->br_startblock == DELAYSTARTBLOCK ||
115                    isnullstartblock(imap->br_startblock)) {
116                 iomap->addr = IOMAP_NULL_ADDR;
117                 iomap->type = IOMAP_DELALLOC;
118         } else {
119                 xfs_daddr_t     daddr = xfs_fsb_to_db(ip, imap->br_startblock);
120
121                 iomap->addr = BBTOB(daddr);
122                 if (mapping_flags & IOMAP_DAX)
123                         iomap->addr += target->bt_dax_part_off;
124
125                 if (imap->br_state == XFS_EXT_UNWRITTEN)
126                         iomap->type = IOMAP_UNWRITTEN;
127                 else
128                         iomap->type = IOMAP_MAPPED;
129
130                 /*
131                  * Mark iomaps starting at the first sector of a RTG as merge
132                  * boundary so that each I/O completions is contained to a
133                  * single RTG.
134                  */
135                 if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(mp) &&
136                     xfs_rtbno_is_group_start(mp, imap->br_startblock))
137                         iomap->flags |= IOMAP_F_BOUNDARY;
138         }
139         iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
140         iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
141         if (mapping_flags & IOMAP_DAX)
142                 iomap->dax_dev = target->bt_daxdev;
143         else
144                 iomap->bdev = target->bt_bdev;
145         iomap->flags = iomap_flags;
146
147         if (xfs_ipincount(ip) &&
148             (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
149                 iomap->flags |= IOMAP_F_DIRTY;
150
151         iomap->validity_cookie = sequence_cookie;
152         iomap->folio_ops = &xfs_iomap_folio_ops;
153         return 0;
154 }
155
156 static void
157 xfs_hole_to_iomap(
158         struct xfs_inode        *ip,
159         struct iomap            *iomap,
160         xfs_fileoff_t           offset_fsb,
161         xfs_fileoff_t           end_fsb)
162 {
163         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
164
165         iomap->addr = IOMAP_NULL_ADDR;
166         iomap->type = IOMAP_HOLE;
167         iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
168         iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
169         iomap->bdev = target->bt_bdev;
170         iomap->dax_dev = target->bt_daxdev;
171 }
172
173 static inline xfs_fileoff_t
174 xfs_iomap_end_fsb(
175         struct xfs_mount        *mp,
176         loff_t                  offset,
177         loff_t                  count)
178 {
179         ASSERT(offset <= mp->m_super->s_maxbytes);
180         return min(XFS_B_TO_FSB(mp, offset + count),
181                    XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
182 }
183
184 static xfs_extlen_t
185 xfs_eof_alignment(
186         struct xfs_inode        *ip)
187 {
188         struct xfs_mount        *mp = ip->i_mount;
189         xfs_extlen_t            align = 0;
190
191         if (!XFS_IS_REALTIME_INODE(ip)) {
192                 /*
193                  * Round up the allocation request to a stripe unit
194                  * (m_dalign) boundary if the file size is >= stripe unit
195                  * size, and we are allocating past the allocation eof.
196                  *
197                  * If mounted with the "-o swalloc" option the alignment is
198                  * increased from the strip unit size to the stripe width.
199                  */
200                 if (mp->m_swidth && xfs_has_swalloc(mp))
201                         align = mp->m_swidth;
202                 else if (mp->m_dalign)
203                         align = mp->m_dalign;
204
205                 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
206                         align = 0;
207         }
208
209         return align;
210 }
211
212 /*
213  * Check if last_fsb is outside the last extent, and if so grow it to the next
214  * stripe unit boundary.
215  */
216 xfs_fileoff_t
217 xfs_iomap_eof_align_last_fsb(
218         struct xfs_inode        *ip,
219         xfs_fileoff_t           end_fsb)
220 {
221         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
222         xfs_extlen_t            extsz = xfs_get_extsz_hint(ip);
223         xfs_extlen_t            align = xfs_eof_alignment(ip);
224         struct xfs_bmbt_irec    irec;
225         struct xfs_iext_cursor  icur;
226
227         ASSERT(!xfs_need_iread_extents(ifp));
228
229         /*
230          * Always round up the allocation request to the extent hint boundary.
231          */
232         if (extsz) {
233                 if (align)
234                         align = roundup_64(align, extsz);
235                 else
236                         align = extsz;
237         }
238
239         if (align) {
240                 xfs_fileoff_t   aligned_end_fsb = roundup_64(end_fsb, align);
241
242                 xfs_iext_last(ifp, &icur);
243                 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
244                     aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
245                         return aligned_end_fsb;
246         }
247
248         return end_fsb;
249 }
250
251 int
252 xfs_iomap_write_direct(
253         struct xfs_inode        *ip,
254         xfs_fileoff_t           offset_fsb,
255         xfs_fileoff_t           count_fsb,
256         unsigned int            flags,
257         struct xfs_bmbt_irec    *imap,
258         u64                     *seq)
259 {
260         struct xfs_mount        *mp = ip->i_mount;
261         struct xfs_trans        *tp;
262         xfs_filblks_t           resaligned;
263         int                     nimaps;
264         unsigned int            dblocks, rblocks;
265         bool                    force = false;
266         int                     error;
267         int                     bmapi_flags = XFS_BMAPI_PREALLOC;
268         int                     nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
269
270         ASSERT(count_fsb > 0);
271
272         resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
273                                            xfs_get_extsz_hint(ip));
274         if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
275                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
276                 rblocks = resaligned;
277         } else {
278                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
279                 rblocks = 0;
280         }
281
282         error = xfs_qm_dqattach(ip);
283         if (error)
284                 return error;
285
286         /*
287          * For DAX, we do not allocate unwritten extents, but instead we zero
288          * the block before we commit the transaction.  Ideally we'd like to do
289          * this outside the transaction context, but if we commit and then crash
290          * we may not have zeroed the blocks and this will be exposed on
291          * recovery of the allocation. Hence we must zero before commit.
292          *
293          * Further, if we are mapping unwritten extents here, we need to zero
294          * and convert them to written so that we don't need an unwritten extent
295          * callback for DAX. This also means that we need to be able to dip into
296          * the reserve block pool for bmbt block allocation if there is no space
297          * left but we need to do unwritten extent conversion.
298          */
299         if (flags & IOMAP_DAX) {
300                 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
301                 if (imap->br_state == XFS_EXT_UNWRITTEN) {
302                         force = true;
303                         nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
304                         dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
305                 }
306         }
307
308         error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
309                         rblocks, force, &tp);
310         if (error)
311                 return error;
312
313         error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, nr_exts);
314         if (error)
315                 goto out_trans_cancel;
316
317         /*
318          * From this point onwards we overwrite the imap pointer that the
319          * caller gave to us.
320          */
321         nimaps = 1;
322         error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
323                                 imap, &nimaps);
324         if (error)
325                 goto out_trans_cancel;
326
327         /*
328          * Complete the transaction
329          */
330         error = xfs_trans_commit(tp);
331         if (error)
332                 goto out_unlock;
333
334         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
335                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
336                 error = xfs_alert_fsblock_zero(ip, imap);
337         }
338
339 out_unlock:
340         *seq = xfs_iomap_inode_sequence(ip, 0);
341         xfs_iunlock(ip, XFS_ILOCK_EXCL);
342         return error;
343
344 out_trans_cancel:
345         xfs_trans_cancel(tp);
346         goto out_unlock;
347 }
348
349 STATIC bool
350 xfs_quota_need_throttle(
351         struct xfs_inode        *ip,
352         xfs_dqtype_t            type,
353         xfs_fsblock_t           alloc_blocks)
354 {
355         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
356         struct xfs_dquot_res    *res;
357         struct xfs_dquot_pre    *pre;
358
359         if (!dq || !xfs_this_quota_on(ip->i_mount, type))
360                 return false;
361
362         if (XFS_IS_REALTIME_INODE(ip)) {
363                 res = &dq->q_rtb;
364                 pre = &dq->q_rtb_prealloc;
365         } else {
366                 res = &dq->q_blk;
367                 pre = &dq->q_blk_prealloc;
368         }
369
370         /* no hi watermark, no throttle */
371         if (!pre->q_prealloc_hi_wmark)
372                 return false;
373
374         /* under the lo watermark, no throttle */
375         if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark)
376                 return false;
377
378         return true;
379 }
380
381 STATIC void
382 xfs_quota_calc_throttle(
383         struct xfs_inode        *ip,
384         xfs_dqtype_t            type,
385         xfs_fsblock_t           *qblocks,
386         int                     *qshift,
387         int64_t                 *qfreesp)
388 {
389         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
390         struct xfs_dquot_res    *res;
391         struct xfs_dquot_pre    *pre;
392         int64_t                 freesp;
393         int                     shift = 0;
394
395         if (!dq) {
396                 res = NULL;
397                 pre = NULL;
398         } else if (XFS_IS_REALTIME_INODE(ip)) {
399                 res = &dq->q_rtb;
400                 pre = &dq->q_rtb_prealloc;
401         } else {
402                 res = &dq->q_blk;
403                 pre = &dq->q_blk_prealloc;
404         }
405
406         /* no dq, or over hi wmark, squash the prealloc completely */
407         if (!res || res->reserved >= pre->q_prealloc_hi_wmark) {
408                 *qblocks = 0;
409                 *qfreesp = 0;
410                 return;
411         }
412
413         freesp = pre->q_prealloc_hi_wmark - res->reserved;
414         if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) {
415                 shift = 2;
416                 if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT])
417                         shift += 2;
418                 if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT])
419                         shift += 2;
420         }
421
422         if (freesp < *qfreesp)
423                 *qfreesp = freesp;
424
425         /* only overwrite the throttle values if we are more aggressive */
426         if ((freesp >> shift) < (*qblocks >> *qshift)) {
427                 *qblocks = freesp;
428                 *qshift = shift;
429         }
430 }
431
432 static int64_t
433 xfs_iomap_freesp(
434         struct percpu_counter   *counter,
435         uint64_t                low_space[XFS_LOWSP_MAX],
436         int                     *shift)
437 {
438         int64_t                 freesp;
439
440         freesp = percpu_counter_read_positive(counter);
441         if (freesp < low_space[XFS_LOWSP_5_PCNT]) {
442                 *shift = 2;
443                 if (freesp < low_space[XFS_LOWSP_4_PCNT])
444                         (*shift)++;
445                 if (freesp < low_space[XFS_LOWSP_3_PCNT])
446                         (*shift)++;
447                 if (freesp < low_space[XFS_LOWSP_2_PCNT])
448                         (*shift)++;
449                 if (freesp < low_space[XFS_LOWSP_1_PCNT])
450                         (*shift)++;
451         }
452         return freesp;
453 }
454
455 /*
456  * If we don't have a user specified preallocation size, dynamically increase
457  * the preallocation size as the size of the file grows.  Cap the maximum size
458  * at a single extent or less if the filesystem is near full. The closer the
459  * filesystem is to being full, the smaller the maximum preallocation.
460  */
461 STATIC xfs_fsblock_t
462 xfs_iomap_prealloc_size(
463         struct xfs_inode        *ip,
464         int                     whichfork,
465         loff_t                  offset,
466         loff_t                  count,
467         struct xfs_iext_cursor  *icur)
468 {
469         struct xfs_iext_cursor  ncur = *icur;
470         struct xfs_bmbt_irec    prev, got;
471         struct xfs_mount        *mp = ip->i_mount;
472         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
473         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
474         int64_t                 freesp;
475         xfs_fsblock_t           qblocks;
476         xfs_fsblock_t           alloc_blocks = 0;
477         xfs_extlen_t            plen;
478         int                     shift = 0;
479         int                     qshift = 0;
480
481         /*
482          * As an exception we don't do any preallocation at all if the file is
483          * smaller than the minimum preallocation and we are using the default
484          * dynamic preallocation scheme, as it is likely this is the only write
485          * to the file that is going to be done.
486          */
487         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
488                 return 0;
489
490         /*
491          * Use the minimum preallocation size for small files or if we are
492          * writing right after a hole.
493          */
494         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
495             !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
496             prev.br_startoff + prev.br_blockcount < offset_fsb)
497                 return mp->m_allocsize_blocks;
498
499         /*
500          * Take the size of the preceding data extents as the basis for the
501          * preallocation size. Note that we don't care if the previous extents
502          * are written or not.
503          */
504         plen = prev.br_blockcount;
505         while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
506                 if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
507                     isnullstartblock(got.br_startblock) ||
508                     got.br_startoff + got.br_blockcount != prev.br_startoff ||
509                     got.br_startblock + got.br_blockcount != prev.br_startblock)
510                         break;
511                 plen += got.br_blockcount;
512                 prev = got;
513         }
514
515         /*
516          * If the size of the extents is greater than half the maximum extent
517          * length, then use the current offset as the basis.  This ensures that
518          * for large files the preallocation size always extends to
519          * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
520          * unit/width alignment of real extents.
521          */
522         alloc_blocks = plen * 2;
523         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
524                 alloc_blocks = XFS_B_TO_FSB(mp, offset);
525         qblocks = alloc_blocks;
526
527         /*
528          * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
529          * down to the nearest power of two value after throttling. To prevent
530          * the round down from unconditionally reducing the maximum supported
531          * prealloc size, we round up first, apply appropriate throttling, round
532          * down and cap the value to XFS_BMBT_MAX_EXTLEN.
533          */
534         alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
535                                        alloc_blocks);
536
537         if (unlikely(XFS_IS_REALTIME_INODE(ip)))
538                 freesp = xfs_rtbxlen_to_blen(mp,
539                                 xfs_iomap_freesp(&mp->m_frextents,
540                                         mp->m_low_rtexts, &shift));
541         else
542                 freesp = xfs_iomap_freesp(&mp->m_fdblocks, mp->m_low_space,
543                                 &shift);
544
545         /*
546          * Check each quota to cap the prealloc size, provide a shift value to
547          * throttle with and adjust amount of available space.
548          */
549         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
550                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
551                                         &freesp);
552         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
553                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
554                                         &freesp);
555         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
556                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
557                                         &freesp);
558
559         /*
560          * The final prealloc size is set to the minimum of free space available
561          * in each of the quotas and the overall filesystem.
562          *
563          * The shift throttle value is set to the maximum value as determined by
564          * the global low free space values and per-quota low free space values.
565          */
566         alloc_blocks = min(alloc_blocks, qblocks);
567         shift = max(shift, qshift);
568
569         if (shift)
570                 alloc_blocks >>= shift;
571         /*
572          * rounddown_pow_of_two() returns an undefined result if we pass in
573          * alloc_blocks = 0.
574          */
575         if (alloc_blocks)
576                 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
577         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
578                 alloc_blocks = XFS_MAX_BMBT_EXTLEN;
579
580         /*
581          * If we are still trying to allocate more space than is
582          * available, squash the prealloc hard. This can happen if we
583          * have a large file on a small filesystem and the above
584          * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
585          */
586         while (alloc_blocks && alloc_blocks >= freesp)
587                 alloc_blocks >>= 4;
588         if (alloc_blocks < mp->m_allocsize_blocks)
589                 alloc_blocks = mp->m_allocsize_blocks;
590         trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
591                                       mp->m_allocsize_blocks);
592         return alloc_blocks;
593 }
594
595 int
596 xfs_iomap_write_unwritten(
597         xfs_inode_t     *ip,
598         xfs_off_t       offset,
599         xfs_off_t       count,
600         bool            update_isize)
601 {
602         xfs_mount_t     *mp = ip->i_mount;
603         xfs_fileoff_t   offset_fsb;
604         xfs_filblks_t   count_fsb;
605         xfs_filblks_t   numblks_fsb;
606         int             nimaps;
607         xfs_trans_t     *tp;
608         xfs_bmbt_irec_t imap;
609         struct inode    *inode = VFS_I(ip);
610         xfs_fsize_t     i_size;
611         uint            resblks;
612         int             error;
613
614         trace_xfs_unwritten_convert(ip, offset, count);
615
616         offset_fsb = XFS_B_TO_FSBT(mp, offset);
617         count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
618         count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
619
620         /*
621          * Reserve enough blocks in this transaction for two complete extent
622          * btree splits.  We may be converting the middle part of an unwritten
623          * extent and in this case we will insert two new extents in the btree
624          * each of which could cause a full split.
625          *
626          * This reservation amount will be used in the first call to
627          * xfs_bmbt_split() to select an AG with enough space to satisfy the
628          * rest of the operation.
629          */
630         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
631
632         /* Attach dquots so that bmbt splits are accounted correctly. */
633         error = xfs_qm_dqattach(ip);
634         if (error)
635                 return error;
636
637         do {
638                 /*
639                  * Set up a transaction to convert the range of extents
640                  * from unwritten to real. Do allocations in a loop until
641                  * we have covered the range passed in.
642                  *
643                  * Note that we can't risk to recursing back into the filesystem
644                  * here as we might be asked to write out the same inode that we
645                  * complete here and might deadlock on the iolock.
646                  */
647                 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
648                                 0, true, &tp);
649                 if (error)
650                         return error;
651
652                 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
653                                 XFS_IEXT_WRITE_UNWRITTEN_CNT);
654                 if (error)
655                         goto error_on_bmapi_transaction;
656
657                 /*
658                  * Modify the unwritten extent state of the buffer.
659                  */
660                 nimaps = 1;
661                 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
662                                         XFS_BMAPI_CONVERT, resblks, &imap,
663                                         &nimaps);
664                 if (error)
665                         goto error_on_bmapi_transaction;
666
667                 /*
668                  * Log the updated inode size as we go.  We have to be careful
669                  * to only log it up to the actual write offset if it is
670                  * halfway into a block.
671                  */
672                 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
673                 if (i_size > offset + count)
674                         i_size = offset + count;
675                 if (update_isize && i_size > i_size_read(inode))
676                         i_size_write(inode, i_size);
677                 i_size = xfs_new_eof(ip, i_size);
678                 if (i_size) {
679                         ip->i_disk_size = i_size;
680                         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
681                 }
682
683                 error = xfs_trans_commit(tp);
684                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
685                 if (error)
686                         return error;
687
688                 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
689                         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
690                         return xfs_alert_fsblock_zero(ip, &imap);
691                 }
692
693                 if ((numblks_fsb = imap.br_blockcount) == 0) {
694                         /*
695                          * The numblks_fsb value should always get
696                          * smaller, otherwise the loop is stuck.
697                          */
698                         ASSERT(imap.br_blockcount);
699                         break;
700                 }
701                 offset_fsb += numblks_fsb;
702                 count_fsb -= numblks_fsb;
703         } while (count_fsb > 0);
704
705         return 0;
706
707 error_on_bmapi_transaction:
708         xfs_trans_cancel(tp);
709         xfs_iunlock(ip, XFS_ILOCK_EXCL);
710         return error;
711 }
712
713 static inline bool
714 imap_needs_alloc(
715         struct inode            *inode,
716         unsigned                flags,
717         struct xfs_bmbt_irec    *imap,
718         int                     nimaps)
719 {
720         /* don't allocate blocks when just zeroing */
721         if (flags & IOMAP_ZERO)
722                 return false;
723         if (!nimaps ||
724             imap->br_startblock == HOLESTARTBLOCK ||
725             imap->br_startblock == DELAYSTARTBLOCK)
726                 return true;
727         /* we convert unwritten extents before copying the data for DAX */
728         if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
729                 return true;
730         return false;
731 }
732
733 static inline bool
734 imap_needs_cow(
735         struct xfs_inode        *ip,
736         unsigned int            flags,
737         struct xfs_bmbt_irec    *imap,
738         int                     nimaps)
739 {
740         if (!xfs_is_cow_inode(ip))
741                 return false;
742
743         /* when zeroing we don't have to COW holes or unwritten extents */
744         if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
745                 if (!nimaps ||
746                     imap->br_startblock == HOLESTARTBLOCK ||
747                     imap->br_state == XFS_EXT_UNWRITTEN)
748                         return false;
749         }
750
751         return true;
752 }
753
754 /*
755  * Extents not yet cached requires exclusive access, don't block for
756  * IOMAP_NOWAIT.
757  *
758  * This is basically an opencoded xfs_ilock_data_map_shared() call, but with
759  * support for IOMAP_NOWAIT.
760  */
761 static int
762 xfs_ilock_for_iomap(
763         struct xfs_inode        *ip,
764         unsigned                flags,
765         unsigned                *lockmode)
766 {
767         if (flags & IOMAP_NOWAIT) {
768                 if (xfs_need_iread_extents(&ip->i_df))
769                         return -EAGAIN;
770                 if (!xfs_ilock_nowait(ip, *lockmode))
771                         return -EAGAIN;
772         } else {
773                 if (xfs_need_iread_extents(&ip->i_df))
774                         *lockmode = XFS_ILOCK_EXCL;
775                 xfs_ilock(ip, *lockmode);
776         }
777
778         return 0;
779 }
780
781 /*
782  * Check that the imap we are going to return to the caller spans the entire
783  * range that the caller requested for the IO.
784  */
785 static bool
786 imap_spans_range(
787         struct xfs_bmbt_irec    *imap,
788         xfs_fileoff_t           offset_fsb,
789         xfs_fileoff_t           end_fsb)
790 {
791         if (imap->br_startoff > offset_fsb)
792                 return false;
793         if (imap->br_startoff + imap->br_blockcount < end_fsb)
794                 return false;
795         return true;
796 }
797
798 static int
799 xfs_direct_write_iomap_begin(
800         struct inode            *inode,
801         loff_t                  offset,
802         loff_t                  length,
803         unsigned                flags,
804         struct iomap            *iomap,
805         struct iomap            *srcmap)
806 {
807         struct xfs_inode        *ip = XFS_I(inode);
808         struct xfs_mount        *mp = ip->i_mount;
809         struct xfs_bmbt_irec    imap, cmap;
810         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
811         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
812         int                     nimaps = 1, error = 0;
813         bool                    shared = false;
814         u16                     iomap_flags = 0;
815         unsigned int            lockmode;
816         u64                     seq;
817
818         ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
819
820         if (xfs_is_shutdown(mp))
821                 return -EIO;
822
823         /*
824          * Writes that span EOF might trigger an IO size update on completion,
825          * so consider them to be dirty for the purposes of O_DSYNC even if
826          * there is no other metadata changes pending or have been made here.
827          */
828         if (offset + length > i_size_read(inode))
829                 iomap_flags |= IOMAP_F_DIRTY;
830
831         /*
832          * COW writes may allocate delalloc space or convert unwritten COW
833          * extents, so we need to make sure to take the lock exclusively here.
834          */
835         if (xfs_is_cow_inode(ip))
836                 lockmode = XFS_ILOCK_EXCL;
837         else
838                 lockmode = XFS_ILOCK_SHARED;
839
840 relock:
841         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
842         if (error)
843                 return error;
844
845         /*
846          * The reflink iflag could have changed since the earlier unlocked
847          * check, check if it again and relock if needed.
848          */
849         if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) {
850                 xfs_iunlock(ip, lockmode);
851                 lockmode = XFS_ILOCK_EXCL;
852                 goto relock;
853         }
854
855         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
856                                &nimaps, 0);
857         if (error)
858                 goto out_unlock;
859
860         if (imap_needs_cow(ip, flags, &imap, nimaps)) {
861                 error = -EAGAIN;
862                 if (flags & IOMAP_NOWAIT)
863                         goto out_unlock;
864
865                 /* may drop and re-acquire the ilock */
866                 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
867                                 &lockmode,
868                                 (flags & IOMAP_DIRECT) || IS_DAX(inode));
869                 if (error)
870                         goto out_unlock;
871                 if (shared)
872                         goto out_found_cow;
873                 end_fsb = imap.br_startoff + imap.br_blockcount;
874                 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
875         }
876
877         if (imap_needs_alloc(inode, flags, &imap, nimaps))
878                 goto allocate_blocks;
879
880         /*
881          * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
882          * a single map so that we avoid partial IO failures due to the rest of
883          * the I/O range not covered by this map triggering an EAGAIN condition
884          * when it is subsequently mapped and aborting the I/O.
885          */
886         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
887                 error = -EAGAIN;
888                 if (!imap_spans_range(&imap, offset_fsb, end_fsb))
889                         goto out_unlock;
890         }
891
892         /*
893          * For overwrite only I/O, we cannot convert unwritten extents without
894          * requiring sub-block zeroing.  This can only be done under an
895          * exclusive IOLOCK, hence return -EAGAIN if this is not a written
896          * extent to tell the caller to try again.
897          */
898         if (flags & IOMAP_OVERWRITE_ONLY) {
899                 error = -EAGAIN;
900                 if (imap.br_state != XFS_EXT_NORM &&
901                     ((offset | length) & mp->m_blockmask))
902                         goto out_unlock;
903         }
904
905         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
906         xfs_iunlock(ip, lockmode);
907         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
908         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
909
910 allocate_blocks:
911         error = -EAGAIN;
912         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
913                 goto out_unlock;
914
915         /*
916          * We cap the maximum length we map to a sane size  to keep the chunks
917          * of work done where somewhat symmetric with the work writeback does.
918          * This is a completely arbitrary number pulled out of thin air as a
919          * best guess for initial testing.
920          *
921          * Note that the values needs to be less than 32-bits wide until the
922          * lower level functions are updated.
923          */
924         length = min_t(loff_t, length, 1024 * PAGE_SIZE);
925         end_fsb = xfs_iomap_end_fsb(mp, offset, length);
926
927         if (offset + length > XFS_ISIZE(ip))
928                 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
929         else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
930                 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
931         xfs_iunlock(ip, lockmode);
932
933         error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
934                         flags, &imap, &seq);
935         if (error)
936                 return error;
937
938         trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
939         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
940                                  iomap_flags | IOMAP_F_NEW, seq);
941
942 out_found_cow:
943         length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
944         trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
945         if (imap.br_startblock != HOLESTARTBLOCK) {
946                 seq = xfs_iomap_inode_sequence(ip, 0);
947                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
948                 if (error)
949                         goto out_unlock;
950         }
951         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
952         xfs_iunlock(ip, lockmode);
953         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
954
955 out_unlock:
956         if (lockmode)
957                 xfs_iunlock(ip, lockmode);
958         return error;
959 }
960
961 const struct iomap_ops xfs_direct_write_iomap_ops = {
962         .iomap_begin            = xfs_direct_write_iomap_begin,
963 };
964
965 static int
966 xfs_dax_write_iomap_end(
967         struct inode            *inode,
968         loff_t                  pos,
969         loff_t                  length,
970         ssize_t                 written,
971         unsigned                flags,
972         struct iomap            *iomap)
973 {
974         struct xfs_inode        *ip = XFS_I(inode);
975
976         if (!xfs_is_cow_inode(ip))
977                 return 0;
978
979         if (!written) {
980                 xfs_reflink_cancel_cow_range(ip, pos, length, true);
981                 return 0;
982         }
983
984         return xfs_reflink_end_cow(ip, pos, written);
985 }
986
987 const struct iomap_ops xfs_dax_write_iomap_ops = {
988         .iomap_begin    = xfs_direct_write_iomap_begin,
989         .iomap_end      = xfs_dax_write_iomap_end,
990 };
991
992 static int
993 xfs_buffered_write_iomap_begin(
994         struct inode            *inode,
995         loff_t                  offset,
996         loff_t                  count,
997         unsigned                flags,
998         struct iomap            *iomap,
999         struct iomap            *srcmap)
1000 {
1001         struct xfs_inode        *ip = XFS_I(inode);
1002         struct xfs_mount        *mp = ip->i_mount;
1003         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1004         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, count);
1005         struct xfs_bmbt_irec    imap, cmap;
1006         struct xfs_iext_cursor  icur, ccur;
1007         xfs_fsblock_t           prealloc_blocks = 0;
1008         bool                    eof = false, cow_eof = false, shared = false;
1009         int                     allocfork = XFS_DATA_FORK;
1010         int                     error = 0;
1011         unsigned int            lockmode = XFS_ILOCK_EXCL;
1012         unsigned int            iomap_flags = 0;
1013         u64                     seq;
1014
1015         if (xfs_is_shutdown(mp))
1016                 return -EIO;
1017
1018         /* we can't use delayed allocations when using extent size hints */
1019         if (xfs_get_extsz_hint(ip))
1020                 return xfs_direct_write_iomap_begin(inode, offset, count,
1021                                 flags, iomap, srcmap);
1022
1023         error = xfs_qm_dqattach(ip);
1024         if (error)
1025                 return error;
1026
1027         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
1028         if (error)
1029                 return error;
1030
1031         if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
1032             XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
1033                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
1034                 error = -EFSCORRUPTED;
1035                 goto out_unlock;
1036         }
1037
1038         XFS_STATS_INC(mp, xs_blk_mapw);
1039
1040         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1041         if (error)
1042                 goto out_unlock;
1043
1044         /*
1045          * Search the data fork first to look up our source mapping.  We
1046          * always need the data fork map, as we have to return it to the
1047          * iomap code so that the higher level write code can read data in to
1048          * perform read-modify-write cycles for unaligned writes.
1049          */
1050         eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
1051         if (eof)
1052                 imap.br_startoff = end_fsb; /* fake hole until the end */
1053
1054         /* We never need to allocate blocks for zeroing or unsharing a hole. */
1055         if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
1056             imap.br_startoff > offset_fsb) {
1057                 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
1058                 goto out_unlock;
1059         }
1060
1061         /*
1062          * For zeroing, trim a delalloc extent that extends beyond the EOF
1063          * block.  If it starts beyond the EOF block, convert it to an
1064          * unwritten extent.
1065          */
1066         if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb &&
1067             isnullstartblock(imap.br_startblock)) {
1068                 xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
1069
1070                 if (offset_fsb >= eof_fsb)
1071                         goto convert_delay;
1072                 if (end_fsb > eof_fsb) {
1073                         end_fsb = eof_fsb;
1074                         xfs_trim_extent(&imap, offset_fsb,
1075                                         end_fsb - offset_fsb);
1076                 }
1077         }
1078
1079         /*
1080          * Search the COW fork extent list even if we did not find a data fork
1081          * extent.  This serves two purposes: first this implements the
1082          * speculative preallocation using cowextsize, so that we also unshare
1083          * block adjacent to shared blocks instead of just the shared blocks
1084          * themselves.  Second the lookup in the extent list is generally faster
1085          * than going out to the shared extent tree.
1086          */
1087         if (xfs_is_cow_inode(ip)) {
1088                 if (!ip->i_cowfp) {
1089                         ASSERT(!xfs_is_reflink_inode(ip));
1090                         xfs_ifork_init_cow(ip);
1091                 }
1092                 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
1093                                 &ccur, &cmap);
1094                 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
1095                         trace_xfs_reflink_cow_found(ip, &cmap);
1096                         goto found_cow;
1097                 }
1098         }
1099
1100         if (imap.br_startoff <= offset_fsb) {
1101                 /*
1102                  * For reflink files we may need a delalloc reservation when
1103                  * overwriting shared extents.   This includes zeroing of
1104                  * existing extents that contain data.
1105                  */
1106                 if (!xfs_is_cow_inode(ip) ||
1107                     ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
1108                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
1109                                         &imap);
1110                         goto found_imap;
1111                 }
1112
1113                 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
1114
1115                 /* Trim the mapping to the nearest shared extent boundary. */
1116                 error = xfs_bmap_trim_cow(ip, &imap, &shared);
1117                 if (error)
1118                         goto out_unlock;
1119
1120                 /* Not shared?  Just report the (potentially capped) extent. */
1121                 if (!shared) {
1122                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
1123                                         &imap);
1124                         goto found_imap;
1125                 }
1126
1127                 /*
1128                  * Fork all the shared blocks from our write offset until the
1129                  * end of the extent.
1130                  */
1131                 allocfork = XFS_COW_FORK;
1132                 end_fsb = imap.br_startoff + imap.br_blockcount;
1133         } else {
1134                 /*
1135                  * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1136                  * pages to keep the chunks of work done where somewhat
1137                  * symmetric with the work writeback does.  This is a completely
1138                  * arbitrary number pulled out of thin air.
1139                  *
1140                  * Note that the values needs to be less than 32-bits wide until
1141                  * the lower level functions are updated.
1142                  */
1143                 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
1144                 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
1145
1146                 if (xfs_is_always_cow_inode(ip))
1147                         allocfork = XFS_COW_FORK;
1148         }
1149
1150         if (eof && offset + count > XFS_ISIZE(ip)) {
1151                 /*
1152                  * Determine the initial size of the preallocation.
1153                  * We clean up any extra preallocation when the file is closed.
1154                  */
1155                 if (xfs_has_allocsize(mp))
1156                         prealloc_blocks = mp->m_allocsize_blocks;
1157                 else if (allocfork == XFS_DATA_FORK)
1158                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
1159                                                 offset, count, &icur);
1160                 else
1161                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
1162                                                 offset, count, &ccur);
1163                 if (prealloc_blocks) {
1164                         xfs_extlen_t    align;
1165                         xfs_off_t       end_offset;
1166                         xfs_fileoff_t   p_end_fsb;
1167
1168                         end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
1169                         p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
1170                                         prealloc_blocks;
1171
1172                         align = xfs_eof_alignment(ip);
1173                         if (align)
1174                                 p_end_fsb = roundup_64(p_end_fsb, align);
1175
1176                         p_end_fsb = min(p_end_fsb,
1177                                 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
1178                         ASSERT(p_end_fsb > offset_fsb);
1179                         prealloc_blocks = p_end_fsb - end_fsb;
1180                 }
1181         }
1182
1183         /*
1184          * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
1185          * them out if the write happens to fail.
1186          */
1187         iomap_flags |= IOMAP_F_NEW;
1188         if (allocfork == XFS_COW_FORK) {
1189                 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
1190                                 end_fsb - offset_fsb, prealloc_blocks, &cmap,
1191                                 &ccur, cow_eof);
1192                 if (error)
1193                         goto out_unlock;
1194
1195                 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
1196                 goto found_cow;
1197         }
1198
1199         error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
1200                         end_fsb - offset_fsb, prealloc_blocks, &imap, &icur,
1201                         eof);
1202         if (error)
1203                 goto out_unlock;
1204
1205         trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
1206 found_imap:
1207         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
1208         xfs_iunlock(ip, lockmode);
1209         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
1210
1211 convert_delay:
1212         xfs_iunlock(ip, lockmode);
1213         truncate_pagecache(inode, offset);
1214         error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset,
1215                                            iomap, NULL);
1216         if (error)
1217                 return error;
1218
1219         trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap);
1220         return 0;
1221
1222 found_cow:
1223         if (imap.br_startoff <= offset_fsb) {
1224                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0,
1225                                 xfs_iomap_inode_sequence(ip, 0));
1226                 if (error)
1227                         goto out_unlock;
1228         } else {
1229                 xfs_trim_extent(&cmap, offset_fsb,
1230                                 imap.br_startoff - offset_fsb);
1231         }
1232
1233         iomap_flags |= IOMAP_F_SHARED;
1234         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
1235         xfs_iunlock(ip, lockmode);
1236         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, iomap_flags, seq);
1237
1238 out_unlock:
1239         xfs_iunlock(ip, lockmode);
1240         return error;
1241 }
1242
1243 static void
1244 xfs_buffered_write_delalloc_punch(
1245         struct inode            *inode,
1246         loff_t                  offset,
1247         loff_t                  length,
1248         struct iomap            *iomap)
1249 {
1250         xfs_bmap_punch_delalloc_range(XFS_I(inode),
1251                         (iomap->flags & IOMAP_F_SHARED) ?
1252                                 XFS_COW_FORK : XFS_DATA_FORK,
1253                         offset, offset + length);
1254 }
1255
1256 static int
1257 xfs_buffered_write_iomap_end(
1258         struct inode            *inode,
1259         loff_t                  offset,
1260         loff_t                  length,
1261         ssize_t                 written,
1262         unsigned                flags,
1263         struct iomap            *iomap)
1264 {
1265         loff_t                  start_byte, end_byte;
1266
1267         /* If we didn't reserve the blocks, we're not allowed to punch them. */
1268         if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
1269                 return 0;
1270
1271         /*
1272          * iomap_page_mkwrite() will never fail in a way that requires delalloc
1273          * extents that it allocated to be revoked.  Hence never try to release
1274          * them here.
1275          */
1276         if (flags & IOMAP_FAULT)
1277                 return 0;
1278
1279         /* Nothing to do if we've written the entire delalloc extent */
1280         start_byte = iomap_last_written_block(inode, offset, written);
1281         end_byte = round_up(offset + length, i_blocksize(inode));
1282         if (start_byte >= end_byte)
1283                 return 0;
1284
1285         /* For zeroing operations the callers already hold invalidate_lock. */
1286         if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) {
1287                 rwsem_assert_held_write(&inode->i_mapping->invalidate_lock);
1288                 iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
1289                                 iomap, xfs_buffered_write_delalloc_punch);
1290         } else {
1291                 filemap_invalidate_lock(inode->i_mapping);
1292                 iomap_write_delalloc_release(inode, start_byte, end_byte, flags,
1293                                 iomap, xfs_buffered_write_delalloc_punch);
1294                 filemap_invalidate_unlock(inode->i_mapping);
1295         }
1296
1297         return 0;
1298 }
1299
1300 const struct iomap_ops xfs_buffered_write_iomap_ops = {
1301         .iomap_begin            = xfs_buffered_write_iomap_begin,
1302         .iomap_end              = xfs_buffered_write_iomap_end,
1303 };
1304
1305 static int
1306 xfs_read_iomap_begin(
1307         struct inode            *inode,
1308         loff_t                  offset,
1309         loff_t                  length,
1310         unsigned                flags,
1311         struct iomap            *iomap,
1312         struct iomap            *srcmap)
1313 {
1314         struct xfs_inode        *ip = XFS_I(inode);
1315         struct xfs_mount        *mp = ip->i_mount;
1316         struct xfs_bmbt_irec    imap;
1317         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1318         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
1319         int                     nimaps = 1, error = 0;
1320         bool                    shared = false;
1321         unsigned int            lockmode = XFS_ILOCK_SHARED;
1322         u64                     seq;
1323
1324         ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
1325
1326         if (xfs_is_shutdown(mp))
1327                 return -EIO;
1328
1329         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
1330         if (error)
1331                 return error;
1332         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1333                                &nimaps, 0);
1334         if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
1335                 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
1336         seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
1337         xfs_iunlock(ip, lockmode);
1338
1339         if (error)
1340                 return error;
1341         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
1342         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
1343                                  shared ? IOMAP_F_SHARED : 0, seq);
1344 }
1345
1346 const struct iomap_ops xfs_read_iomap_ops = {
1347         .iomap_begin            = xfs_read_iomap_begin,
1348 };
1349
1350 static int
1351 xfs_seek_iomap_begin(
1352         struct inode            *inode,
1353         loff_t                  offset,
1354         loff_t                  length,
1355         unsigned                flags,
1356         struct iomap            *iomap,
1357         struct iomap            *srcmap)
1358 {
1359         struct xfs_inode        *ip = XFS_I(inode);
1360         struct xfs_mount        *mp = ip->i_mount;
1361         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1362         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
1363         xfs_fileoff_t           cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
1364         struct xfs_iext_cursor  icur;
1365         struct xfs_bmbt_irec    imap, cmap;
1366         int                     error = 0;
1367         unsigned                lockmode;
1368         u64                     seq;
1369
1370         if (xfs_is_shutdown(mp))
1371                 return -EIO;
1372
1373         lockmode = xfs_ilock_data_map_shared(ip);
1374         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1375         if (error)
1376                 goto out_unlock;
1377
1378         if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
1379                 /*
1380                  * If we found a data extent we are done.
1381                  */
1382                 if (imap.br_startoff <= offset_fsb)
1383                         goto done;
1384                 data_fsb = imap.br_startoff;
1385         } else {
1386                 /*
1387                  * Fake a hole until the end of the file.
1388                  */
1389                 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
1390         }
1391
1392         /*
1393          * If a COW fork extent covers the hole, report it - capped to the next
1394          * data fork extent:
1395          */
1396         if (xfs_inode_has_cow_data(ip) &&
1397             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
1398                 cow_fsb = cmap.br_startoff;
1399         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
1400                 if (data_fsb < cow_fsb + cmap.br_blockcount)
1401                         end_fsb = min(end_fsb, data_fsb);
1402                 xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb);
1403                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
1404                 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
1405                                 IOMAP_F_SHARED, seq);
1406                 /*
1407                  * This is a COW extent, so we must probe the page cache
1408                  * because there could be dirty page cache being backed
1409                  * by this extent.
1410                  */
1411                 iomap->type = IOMAP_UNWRITTEN;
1412                 goto out_unlock;
1413         }
1414
1415         /*
1416          * Else report a hole, capped to the next found data or COW extent.
1417          */
1418         if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
1419                 imap.br_blockcount = cow_fsb - offset_fsb;
1420         else
1421                 imap.br_blockcount = data_fsb - offset_fsb;
1422         imap.br_startoff = offset_fsb;
1423         imap.br_startblock = HOLESTARTBLOCK;
1424         imap.br_state = XFS_EXT_NORM;
1425 done:
1426         seq = xfs_iomap_inode_sequence(ip, 0);
1427         xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
1428         error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
1429 out_unlock:
1430         xfs_iunlock(ip, lockmode);
1431         return error;
1432 }
1433
1434 const struct iomap_ops xfs_seek_iomap_ops = {
1435         .iomap_begin            = xfs_seek_iomap_begin,
1436 };
1437
1438 static int
1439 xfs_xattr_iomap_begin(
1440         struct inode            *inode,
1441         loff_t                  offset,
1442         loff_t                  length,
1443         unsigned                flags,
1444         struct iomap            *iomap,
1445         struct iomap            *srcmap)
1446 {
1447         struct xfs_inode        *ip = XFS_I(inode);
1448         struct xfs_mount        *mp = ip->i_mount;
1449         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
1450         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
1451         struct xfs_bmbt_irec    imap;
1452         int                     nimaps = 1, error = 0;
1453         unsigned                lockmode;
1454         int                     seq;
1455
1456         if (xfs_is_shutdown(mp))
1457                 return -EIO;
1458
1459         lockmode = xfs_ilock_attr_map_shared(ip);
1460
1461         /* if there are no attribute fork or extents, return ENOENT */
1462         if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
1463                 error = -ENOENT;
1464                 goto out_unlock;
1465         }
1466
1467         ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
1468         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1469                                &nimaps, XFS_BMAPI_ATTRFORK);
1470 out_unlock:
1471
1472         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
1473         xfs_iunlock(ip, lockmode);
1474
1475         if (error)
1476                 return error;
1477         ASSERT(nimaps);
1478         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
1479 }
1480
1481 const struct iomap_ops xfs_xattr_iomap_ops = {
1482         .iomap_begin            = xfs_xattr_iomap_begin,
1483 };
1484
1485 int
1486 xfs_zero_range(
1487         struct xfs_inode        *ip,
1488         loff_t                  pos,
1489         loff_t                  len,
1490         bool                    *did_zero)
1491 {
1492         struct inode            *inode = VFS_I(ip);
1493
1494         xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
1495
1496         if (IS_DAX(inode))
1497                 return dax_zero_range(inode, pos, len, did_zero,
1498                                       &xfs_dax_write_iomap_ops);
1499         return iomap_zero_range(inode, pos, len, did_zero,
1500                                 &xfs_buffered_write_iomap_ops);
1501 }
1502
1503 int
1504 xfs_truncate_page(
1505         struct xfs_inode        *ip,
1506         loff_t                  pos,
1507         bool                    *did_zero)
1508 {
1509         struct inode            *inode = VFS_I(ip);
1510
1511         if (IS_DAX(inode))
1512                 return dax_truncate_page(inode, pos, did_zero,
1513                                         &xfs_dax_write_iomap_ops);
1514         return iomap_truncate_page(inode, pos, did_zero,
1515                                    &xfs_buffered_write_iomap_ops);
1516 }
This page took 0.116476 seconds and 4 git commands to generate.