1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level buffered write support.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
17 * Determined write method. Adjust netfs_folio_traces if this is changed.
19 enum netfs_how_to_modify {
20 NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
21 NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
22 NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
23 NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
25 NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
26 NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
29 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
31 void *priv = folio_get_private(folio);
33 if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
34 folio_attach_private(folio, netfs_get_group(netfs_group));
35 else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
36 folio_detach_private(folio);
40 * Decide how we should modify a folio. We might be attempting to do
41 * write-streaming, in which case we don't want to a local RMW cycle if we can
42 * avoid it. If we're doing local caching or content crypto, we award that
43 * priority over avoiding RMW. If the file is open readably, then we also
44 * assume that we may want to read what we wrote.
46 static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
55 struct netfs_folio *finfo = netfs_folio_info(folio);
56 struct netfs_group *group = netfs_folio_group(folio);
57 loff_t pos = folio_pos(folio);
61 if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
62 return NETFS_FLUSH_CONTENT;
64 if (folio_test_uptodate(folio))
65 return NETFS_FOLIO_IS_UPTODATE;
67 if (pos >= ctx->zero_point)
68 return NETFS_MODIFY_AND_CLEAR;
70 if (!maybe_trouble && offset == 0 && len >= flen)
71 return NETFS_WHOLE_FOLIO_MODIFY;
73 if (file->f_mode & FMODE_READ)
74 goto no_write_streaming;
76 if (netfs_is_cache_enabled(ctx)) {
77 /* We don't want to get a streaming write on a file that loses
78 * caching service temporarily because the backing store got
81 goto no_write_streaming;
85 return NETFS_STREAMING_WRITE;
87 /* We can continue a streaming write only if it continues on from the
88 * previous. If it overlaps, we must flush lest we suffer a partial
89 * copy and disjoint dirty regions.
91 if (offset == finfo->dirty_offset + finfo->dirty_len)
92 return NETFS_STREAMING_WRITE_CONT;
93 return NETFS_FLUSH_CONTENT;
97 netfs_stat(&netfs_n_wh_wstream_conflict);
98 return NETFS_FLUSH_CONTENT;
100 return NETFS_JUST_PREFETCH;
104 * Grab a folio for writing and lock it. Attempt to allocate as large a folio
105 * as possible to hold as much of the remaining length as possible in one go.
107 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
108 loff_t pos, size_t part)
110 pgoff_t index = pos / PAGE_SIZE;
111 fgf_t fgp_flags = FGP_WRITEBEGIN;
113 if (mapping_large_folio_support(mapping))
114 fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
116 return __filemap_get_folio(mapping, index, fgp_flags,
117 mapping_gfp_mask(mapping));
121 * Update i_size and estimate the update to i_blocks to reflect the additional
122 * data written into the pagecache until we can find out from the server what
123 * the values actually are.
125 static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
126 loff_t i_size, loff_t pos, size_t copied)
131 if (ctx->ops->update_i_size) {
132 ctx->ops->update_i_size(inode, pos);
136 i_size_write(inode, pos);
137 #if IS_ENABLED(CONFIG_FSCACHE)
138 fscache_update_cookie(ctx->cache, NULL, &pos);
141 gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
143 add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
145 inode->i_blocks = min_t(blkcnt_t,
146 DIV_ROUND_UP(pos, SECTOR_SIZE),
147 inode->i_blocks + add);
152 * netfs_perform_write - Copy data into the pagecache.
153 * @iocb: The operation parameters
154 * @iter: The source buffer
155 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
157 * Copy data into pagecache pages attached to the inode specified by @iocb.
158 * The caller must hold appropriate inode locks.
160 * Dirty pages are tagged with a netfs_folio struct if they're not up to date
161 * to indicate the range modified. Dirty pages may also be tagged with a
162 * netfs-specific grouping such that data from an old group gets flushed before
163 * a new one is started.
165 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
166 struct netfs_group *netfs_group)
168 struct file *file = iocb->ki_filp;
169 struct inode *inode = file_inode(file);
170 struct address_space *mapping = inode->i_mapping;
171 struct netfs_inode *ctx = netfs_inode(inode);
172 struct writeback_control wbc = {
173 .sync_mode = WB_SYNC_NONE,
175 .nr_to_write = LONG_MAX,
176 .range_start = iocb->ki_pos,
177 .range_end = iocb->ki_pos + iter->count,
179 struct netfs_io_request *wreq = NULL;
180 struct netfs_folio *finfo;
181 struct folio *folio, *writethrough = NULL;
182 enum netfs_how_to_modify howto;
183 enum netfs_folio_trace trace;
184 unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
185 ssize_t written = 0, ret, ret2;
186 loff_t i_size, pos = iocb->ki_pos, from, to;
187 size_t max_chunk = mapping_max_folio_size(mapping);
188 bool maybe_trouble = false;
190 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
191 iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
193 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
195 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
197 wbc_detach_inode(&wbc);
201 wreq = netfs_begin_writethrough(iocb, iter->count);
203 wbc_detach_inode(&wbc);
208 if (!is_sync_kiocb(iocb))
210 netfs_stat(&netfs_n_wh_writethrough);
212 netfs_stat(&netfs_n_wh_buffered_write);
217 size_t offset; /* Offset into pagecache folio */
218 size_t part; /* Bytes to write to folio */
219 size_t copied; /* Bytes copied from user */
221 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
222 if (unlikely(ret < 0))
225 offset = pos & (max_chunk - 1);
226 part = min(max_chunk - offset, iov_iter_count(iter));
228 /* Bring in the user pages that we will copy from _first_ lest
229 * we hit a nasty deadlock on copying from the same page as
230 * we're writing to, without it being marked uptodate.
232 * Not only is this an optimisation, but it is also required to
233 * check that the address is actually valid, when atomic
234 * usercopies are used below.
236 * We rely on the page being held onto long enough by the LRU
237 * that we can grab it below if this causes it to be read.
240 if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
243 folio = netfs_grab_folio_for_write(mapping, pos, part);
245 ret = PTR_ERR(folio);
249 flen = folio_size(folio);
250 offset = pos & (flen - 1);
251 part = min_t(size_t, flen - offset, part);
253 /* Wait for writeback to complete. The writeback engine owns
254 * the info in folio->private and may change it until it
255 * removes the WB mark.
257 if (folio_get_private(folio) &&
258 folio_wait_writeback_killable(folio)) {
259 ret = written ? -EINTR : -ERESTARTSYS;
260 goto error_folio_unlock;
263 if (signal_pending(current)) {
264 ret = written ? -EINTR : -ERESTARTSYS;
265 goto error_folio_unlock;
268 /* See if we need to prefetch the area we're going to modify.
269 * We need to do this before we get a lock on the folio in case
270 * there's more than one writer competing for the same cache
273 howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
274 flen, offset, part, maybe_trouble);
275 _debug("howto %u", howto);
277 case NETFS_JUST_PREFETCH:
278 ret = netfs_prefetch_for_write(file, folio, offset, part);
280 _debug("prefetch = %zd", ret);
281 goto error_folio_unlock;
284 case NETFS_FOLIO_IS_UPTODATE:
285 case NETFS_WHOLE_FOLIO_MODIFY:
286 case NETFS_STREAMING_WRITE_CONT:
288 case NETFS_MODIFY_AND_CLEAR:
289 zero_user_segment(&folio->page, 0, offset);
291 case NETFS_STREAMING_WRITE:
293 if (WARN_ON(folio_get_private(folio)))
294 goto error_folio_unlock;
296 case NETFS_FLUSH_CONTENT:
297 trace_netfs_folio(folio, netfs_flush_content);
298 from = folio_pos(folio);
299 to = from + folio_size(folio) - 1;
302 ret = filemap_write_and_wait_range(mapping, from, to);
304 goto error_folio_unlock;
308 if (mapping_writably_mapped(mapping))
309 flush_dcache_folio(folio);
311 copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
313 flush_dcache_folio(folio);
315 /* Deal with a (partially) failed copy */
318 goto error_folio_unlock;
321 trace = (enum netfs_folio_trace)howto;
323 case NETFS_FOLIO_IS_UPTODATE:
324 case NETFS_JUST_PREFETCH:
325 netfs_set_group(folio, netfs_group);
327 case NETFS_MODIFY_AND_CLEAR:
328 zero_user_segment(&folio->page, offset + copied, flen);
329 netfs_set_group(folio, netfs_group);
330 folio_mark_uptodate(folio);
332 case NETFS_WHOLE_FOLIO_MODIFY:
333 if (unlikely(copied < part)) {
334 maybe_trouble = true;
335 iov_iter_revert(iter, copied);
340 netfs_set_group(folio, netfs_group);
341 folio_mark_uptodate(folio);
343 case NETFS_STREAMING_WRITE:
344 if (offset == 0 && copied == flen) {
345 netfs_set_group(folio, netfs_group);
346 folio_mark_uptodate(folio);
347 trace = netfs_streaming_filled_page;
350 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
352 iov_iter_revert(iter, copied);
354 goto error_folio_unlock;
356 finfo->netfs_group = netfs_get_group(netfs_group);
357 finfo->dirty_offset = offset;
358 finfo->dirty_len = copied;
359 folio_attach_private(folio, (void *)((unsigned long)finfo |
362 case NETFS_STREAMING_WRITE_CONT:
363 finfo = netfs_folio_info(folio);
364 finfo->dirty_len += copied;
365 if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
366 if (finfo->netfs_group)
367 folio_change_private(folio, finfo->netfs_group);
369 folio_detach_private(folio);
370 folio_mark_uptodate(folio);
372 trace = netfs_streaming_cont_filled_page;
376 WARN(true, "Unexpected modify type %u ix=%lx\n",
377 howto, folio->index);
379 goto error_folio_unlock;
382 trace_netfs_folio(folio, trace);
384 /* Update the inode size if we moved the EOF marker */
386 i_size = i_size_read(inode);
388 netfs_update_i_size(ctx, inode, i_size, pos, copied);
392 folio_mark_dirty(folio);
395 netfs_advance_writethrough(wreq, &wbc, folio, copied,
396 offset + copied == flen,
405 } while (iov_iter_count(iter));
408 if (likely(written) && ctx->ops->post_modify)
409 ctx->ops->post_modify(inode);
411 if (unlikely(wreq)) {
412 ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
413 wbc_detach_inode(&wbc);
414 if (ret2 == -EIOCBQUEUED)
420 iocb->ki_pos += written;
421 _leave(" = %zd [%zd]", written, ret);
422 return written ? written : ret;
429 EXPORT_SYMBOL(netfs_perform_write);
432 * netfs_buffered_write_iter_locked - write data to a file
433 * @iocb: IO state structure (file, offset, etc.)
434 * @from: iov_iter with data to write
435 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
437 * This function does all the work needed for actually writing data to a
438 * file. It does all basic checks, removes SUID from the file, updates
439 * modification times and calls proper subroutines depending on whether we
440 * do direct IO or a standard buffered write.
442 * The caller must hold appropriate locks around this function and have called
443 * generic_write_checks() already. The caller is also responsible for doing
444 * any necessary syncing afterwards.
446 * This function does *not* take care of syncing data in case of O_SYNC write.
447 * A caller has to handle it. This is mainly due to the fact that we want to
448 * avoid syncing under i_rwsem.
451 * * number of bytes written, even for truncated writes
452 * * negative error code if no data has been written at all
454 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
455 struct netfs_group *netfs_group)
457 struct file *file = iocb->ki_filp;
460 trace_netfs_write_iter(iocb, from);
462 ret = file_remove_privs(file);
466 ret = file_update_time(file);
470 return netfs_perform_write(iocb, from, netfs_group);
472 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
475 * netfs_file_write_iter - write data to a file
476 * @iocb: IO state structure
477 * @from: iov_iter with data to write
479 * Perform a write to a file, writing into the pagecache if possible and doing
480 * an unbuffered write instead if not.
483 * * Negative error code if no data has been written at all of
484 * vfs_fsync_range() failed for a synchronous write
485 * * Number of bytes written, even for truncated writes
487 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
489 struct file *file = iocb->ki_filp;
490 struct inode *inode = file->f_mapping->host;
491 struct netfs_inode *ictx = netfs_inode(inode);
494 _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
496 if (!iov_iter_count(from))
499 if ((iocb->ki_flags & IOCB_DIRECT) ||
500 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
501 return netfs_unbuffered_write_iter(iocb, from);
503 ret = netfs_start_io_write(inode);
507 ret = generic_write_checks(iocb, from);
509 ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
510 netfs_end_io_write(inode);
512 ret = generic_write_sync(iocb, ret);
515 EXPORT_SYMBOL(netfs_file_write_iter);
518 * Notification that a previously read-only page is about to become writable.
519 * Note that the caller indicates a single page of a multipage folio.
521 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
523 struct netfs_group *group;
524 struct folio *folio = page_folio(vmf->page);
525 struct file *file = vmf->vma->vm_file;
526 struct address_space *mapping = file->f_mapping;
527 struct inode *inode = file_inode(file);
528 struct netfs_inode *ictx = netfs_inode(inode);
529 vm_fault_t ret = VM_FAULT_RETRY;
532 _enter("%lx", folio->index);
534 sb_start_pagefault(inode->i_sb);
536 if (folio_lock_killable(folio) < 0)
538 if (folio->mapping != mapping) {
540 ret = VM_FAULT_NOPAGE;
544 if (folio_wait_writeback_killable(folio)) {
545 ret = VM_FAULT_LOCKED;
549 /* Can we see a streaming write here? */
550 if (WARN_ON(!folio_test_uptodate(folio))) {
551 ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
555 group = netfs_folio_group(folio);
556 if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
558 err = filemap_fdatawrite_range(mapping,
560 folio_pos(folio) + folio_size(folio));
563 ret = VM_FAULT_RETRY;
569 ret = VM_FAULT_SIGBUS;
574 if (folio_test_dirty(folio))
575 trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
577 trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
578 netfs_set_group(folio, netfs_group);
579 file_update_time(file);
580 if (ictx->ops->post_modify)
581 ictx->ops->post_modify(inode);
582 ret = VM_FAULT_LOCKED;
584 sb_end_pagefault(inode->i_sb);
587 EXPORT_SYMBOL(netfs_page_mkwrite);