1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
17 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
18 loff_t i_size, bool caching);
20 #ifdef CONFIG_AFS_FSCACHE
22 * Mark a page as having been made dirty and thus needing writeback. We also
23 * need to pin the cache object to write back to.
25 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
27 return fscache_dirty_folio(mapping, folio,
28 afs_vnode_cache(AFS_FS_I(mapping->host)));
30 static void afs_folio_start_fscache(bool caching, struct folio *folio)
33 folio_start_fscache(folio);
36 static void afs_folio_start_fscache(bool caching, struct folio *folio)
42 * prepare to perform part of a write to a page
44 int afs_write_begin(struct file *file, struct address_space *mapping,
45 loff_t pos, unsigned len,
46 struct page **_page, void **fsdata)
48 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
56 _enter("{%llx:%llu},%llx,%x",
57 vnode->fid.vid, vnode->fid.vnode, pos, len);
59 /* Prefetch area to be written into the cache if we're caching this
60 * file. We need to do this before we get a lock on the page in case
61 * there's more than one writer competing for the same cache block.
63 ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata);
67 index = folio_index(folio);
68 from = pos - index * PAGE_SIZE;
72 /* See if this page is already partially written in a way that we can
73 * merge the new write with.
75 if (folio_test_private(folio)) {
76 priv = (unsigned long)folio_get_private(folio);
77 f = afs_folio_dirty_from(folio, priv);
78 t = afs_folio_dirty_to(folio, priv);
81 if (folio_test_writeback(folio)) {
82 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
83 goto flush_conflicting_write;
85 /* If the file is being filled locally, allow inter-write
86 * spaces to be merged into writes. If it's not, only write
87 * back what the user gives us.
89 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
91 goto flush_conflicting_write;
94 *_page = &folio->page;
98 /* The previous write and this write aren't adjacent or overlapping, so
101 flush_conflicting_write:
102 _debug("flush conflict");
103 ret = folio_write_one(folio);
107 ret = folio_lock_killable(folio);
114 _leave(" = %d", ret);
119 * finalise part of a write to a page
121 int afs_write_end(struct file *file, struct address_space *mapping,
122 loff_t pos, unsigned len, unsigned copied,
123 struct page *subpage, void *fsdata)
125 struct folio *folio = page_folio(subpage);
126 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
128 unsigned int f, from = offset_in_folio(folio, pos);
129 unsigned int t, to = from + copied;
130 loff_t i_size, write_end_pos;
132 _enter("{%llx:%llu},{%lx}",
133 vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
135 if (!folio_test_uptodate(folio)) {
141 folio_mark_uptodate(folio);
147 write_end_pos = pos + copied;
149 i_size = i_size_read(&vnode->vfs_inode);
150 if (write_end_pos > i_size) {
151 write_seqlock(&vnode->cb_lock);
152 i_size = i_size_read(&vnode->vfs_inode);
153 if (write_end_pos > i_size)
154 afs_set_i_size(vnode, write_end_pos);
155 write_sequnlock(&vnode->cb_lock);
156 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
159 if (folio_test_private(folio)) {
160 priv = (unsigned long)folio_get_private(folio);
161 f = afs_folio_dirty_from(folio, priv);
162 t = afs_folio_dirty_to(folio, priv);
167 priv = afs_folio_dirty(folio, f, t);
168 folio_change_private(folio, (void *)priv);
169 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
171 priv = afs_folio_dirty(folio, from, to);
172 folio_attach_private(folio, (void *)priv);
173 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
176 if (folio_mark_dirty(folio))
177 _debug("dirtied %lx", folio_index(folio));
186 * kill all the pages in the given range
188 static void afs_kill_pages(struct address_space *mapping,
189 loff_t start, loff_t len)
191 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
193 pgoff_t index = start / PAGE_SIZE;
194 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
196 _enter("{%llx:%llu},%llx @%llx",
197 vnode->fid.vid, vnode->fid.vnode, len, start);
200 _debug("kill %lx (to %lx)", index, last);
202 folio = filemap_get_folio(mapping, index);
208 next = folio_next_index(folio);
210 folio_clear_uptodate(folio);
211 folio_end_writeback(folio);
213 generic_error_remove_page(mapping, &folio->page);
217 } while (index = next, index <= last);
223 * Redirty all the pages in a given range.
225 static void afs_redirty_pages(struct writeback_control *wbc,
226 struct address_space *mapping,
227 loff_t start, loff_t len)
229 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
231 pgoff_t index = start / PAGE_SIZE;
232 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
234 _enter("{%llx:%llu},%llx @%llx",
235 vnode->fid.vid, vnode->fid.vnode, len, start);
238 _debug("redirty %llx @%llx", len, start);
240 folio = filemap_get_folio(mapping, index);
246 next = index + folio_nr_pages(folio);
247 folio_redirty_for_writepage(wbc, folio);
248 folio_end_writeback(folio);
250 } while (index = next, index <= last);
256 * completion of write to server
258 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
260 struct address_space *mapping = vnode->vfs_inode.i_mapping;
264 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
266 _enter("{%llx:%llu},{%x @%llx}",
267 vnode->fid.vid, vnode->fid.vnode, len, start);
271 end = (start + len - 1) / PAGE_SIZE;
272 xas_for_each(&xas, folio, end) {
273 if (!folio_test_writeback(folio)) {
274 kdebug("bad %x @%llx page %lx %lx",
275 len, start, folio_index(folio), end);
276 ASSERT(folio_test_writeback(folio));
279 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
280 folio_detach_private(folio);
281 folio_end_writeback(folio);
286 afs_prune_wb_keys(vnode);
291 * Find a key to use for the writeback. We cached the keys used to author the
292 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
293 * and we need to start from there if it's set.
295 static int afs_get_writeback_key(struct afs_vnode *vnode,
296 struct afs_wb_key **_wbk)
298 struct afs_wb_key *wbk = NULL;
300 int ret = -ENOKEY, ret2;
302 spin_lock(&vnode->wb_lock);
304 p = (*_wbk)->vnode_link.next;
306 p = vnode->wb_keys.next;
308 while (p != &vnode->wb_keys) {
309 wbk = list_entry(p, struct afs_wb_key, vnode_link);
310 _debug("wbk %u", key_serial(wbk->key));
311 ret2 = key_validate(wbk->key);
313 refcount_inc(&wbk->usage);
314 _debug("USE WB KEY %u", key_serial(wbk->key));
324 spin_unlock(&vnode->wb_lock);
326 afs_put_wb_key(*_wbk);
331 static void afs_store_data_success(struct afs_operation *op)
333 struct afs_vnode *vnode = op->file[0].vnode;
335 op->ctime = op->file[0].scb.status.mtime_client;
336 afs_vnode_commit_status(op, &op->file[0]);
337 if (op->error == 0) {
338 if (!op->store.laundering)
339 afs_pages_written_back(vnode, op->store.pos, op->store.size);
340 afs_stat_v(vnode, n_stores);
341 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
345 static const struct afs_operation_ops afs_store_data_operation = {
346 .issue_afs_rpc = afs_fs_store_data,
347 .issue_yfs_rpc = yfs_fs_store_data,
348 .success = afs_store_data_success,
354 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
357 struct netfs_i_context *ictx = &vnode->netfs_ctx;
358 struct afs_operation *op;
359 struct afs_wb_key *wbk = NULL;
360 loff_t size = iov_iter_count(iter);
363 _enter("%s{%llx:%llu.%u},%llx,%llx",
370 ret = afs_get_writeback_key(vnode, &wbk);
372 _leave(" = %d [no keys]", ret);
376 op = afs_alloc_operation(wbk->key, vnode->volume);
382 afs_op_set_vnode(op, 0, vnode);
383 op->file[0].dv_delta = 1;
384 op->file[0].modification = true;
385 op->store.write_iter = iter;
387 op->store.size = size;
388 op->store.i_size = max(pos + size, ictx->remote_i_size);
389 op->store.laundering = laundering;
390 op->mtime = vnode->vfs_inode.i_mtime;
391 op->flags |= AFS_OPERATION_UNINTR;
392 op->ops = &afs_store_data_operation;
395 afs_begin_vnode_operation(op);
396 afs_wait_for_operation(op);
407 ret = afs_get_writeback_key(vnode, &wbk);
410 op->key = key_get(wbk->key);
417 _leave(" = %d", op->error);
418 return afs_put_operation(op);
422 * Extend the region to be written back to include subsequent contiguously
423 * dirty pages if possible, but don't sleep while doing so.
425 * If this page holds new content, then we can include filler zeros in the
428 static void afs_extend_writeback(struct address_space *mapping,
429 struct afs_vnode *vnode,
440 unsigned int psize, filler = 0;
443 pgoff_t index = (start + len) / PAGE_SIZE;
447 XA_STATE(xas, &mapping->i_pages, index);
451 /* Firstly, we gather up a batch of contiguous dirty pages
452 * under the RCU read lock - but we can't clear the dirty flags
453 * there if any of those pages are mapped.
457 xas_for_each(&xas, folio, ULONG_MAX) {
459 if (xas_retry(&xas, folio))
461 if (xa_is_value(folio))
463 if (folio_index(folio) != index)
466 if (!folio_try_get_rcu(folio)) {
471 /* Has the page moved or been split? */
472 if (unlikely(folio != xas_reload(&xas))) {
477 if (!folio_trylock(folio)) {
481 if (!folio_test_dirty(folio) ||
482 folio_test_writeback(folio) ||
483 folio_test_fscache(folio)) {
489 psize = folio_size(folio);
490 priv = (unsigned long)folio_get_private(folio);
491 f = afs_folio_dirty_from(folio, priv);
492 t = afs_folio_dirty_to(folio, priv);
493 if (f != 0 && !new_content) {
501 if (len >= max_len || *_count <= 0)
503 else if (t == psize || new_content)
506 index += folio_nr_pages(folio);
507 if (!pagevec_add(&pvec, &folio->page))
517 /* Now, if we obtained any pages, we can shift them to being
518 * writable and mark them for caching.
520 if (!pagevec_count(&pvec))
523 for (i = 0; i < pagevec_count(&pvec); i++) {
524 folio = page_folio(pvec.pages[i]);
525 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
527 if (!folio_clear_dirty_for_io(folio))
529 if (folio_start_writeback(folio))
531 afs_folio_start_fscache(caching, folio);
533 *_count -= folio_nr_pages(folio);
537 pagevec_release(&pvec);
545 * Synchronously write back the locked page and any subsequent non-locked dirty
548 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
549 struct writeback_control *wbc,
551 loff_t start, loff_t end)
553 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
554 struct iov_iter iter;
556 unsigned int offset, to, len, max_len;
557 loff_t i_size = i_size_read(&vnode->vfs_inode);
558 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
559 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
560 long count = wbc->nr_to_write;
563 _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
565 if (folio_start_writeback(folio))
567 afs_folio_start_fscache(caching, folio);
569 count -= folio_nr_pages(folio);
571 /* Find all consecutive lockable dirty pages that have contiguous
572 * written regions, stopping when we find a page that is not
573 * immediately lockable, is not dirty or is missing, or we reach the
576 priv = (unsigned long)folio_get_private(folio);
577 offset = afs_folio_dirty_from(folio, priv);
578 to = afs_folio_dirty_to(folio, priv);
579 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
583 if (start < i_size) {
584 /* Trim the write to the EOF; the extra data is ignored. Also
585 * put an upper limit on the size of a single storedata op.
587 max_len = 65536 * 4096;
588 max_len = min_t(unsigned long long, max_len, end - start + 1);
589 max_len = min_t(unsigned long long, max_len, i_size - start);
592 (to == folio_size(folio) || new_content))
593 afs_extend_writeback(mapping, vnode, &count,
594 start, max_len, new_content,
596 len = min_t(loff_t, len, max_len);
599 /* We now have a contiguous set of dirty pages, each with writeback
600 * set; the first page is still locked at this point, but all the rest
601 * have been unlocked.
605 if (start < i_size) {
606 _debug("write back %x @%llx [%llx]", len, start, i_size);
608 /* Speculatively write to the cache. We have to fix this up
609 * later if the store fails.
611 afs_write_to_cache(vnode, start, len, i_size, caching);
613 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
614 ret = afs_store_data(vnode, &iter, start, false);
616 _debug("write discard %x @%llx [%llx]", len, start, i_size);
618 /* The dirty region was entirely beyond the EOF. */
619 fscache_clear_page_bits(mapping, start, len, caching);
620 afs_pages_written_back(vnode, start, len);
626 wbc->nr_to_write = count;
631 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
640 afs_redirty_pages(wbc, mapping, start, len);
641 mapping_set_error(mapping, ret);
646 afs_redirty_pages(wbc, mapping, start, len);
647 mapping_set_error(mapping, -ENOSPC);
657 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
658 afs_kill_pages(mapping, start, len);
659 mapping_set_error(mapping, ret);
663 _leave(" = %d", ret);
668 * write a page back to the server
669 * - the caller locked the page for us
671 int afs_writepage(struct page *subpage, struct writeback_control *wbc)
673 struct folio *folio = page_folio(subpage);
677 _enter("{%lx},", folio_index(folio));
679 #ifdef CONFIG_AFS_FSCACHE
680 folio_wait_fscache(folio);
683 start = folio_index(folio) * PAGE_SIZE;
684 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
685 folio, start, LLONG_MAX - start);
687 _leave(" = %zd", ret);
696 * write a region of pages back to the server
698 static int afs_writepages_region(struct address_space *mapping,
699 struct writeback_control *wbc,
700 loff_t start, loff_t end, loff_t *_next)
703 struct page *head_page;
707 _enter("%llx,%llx,", start, end);
710 pgoff_t index = start / PAGE_SIZE;
712 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
713 PAGECACHE_TAG_DIRTY, 1, &head_page);
717 folio = page_folio(head_page);
718 start = folio_pos(folio); /* May regress with THPs */
720 _debug("wback %lx", folio_index(folio));
722 /* At this point we hold neither the i_pages lock nor the
723 * page lock: the page may be truncated or invalidated
724 * (changing page->mapping to NULL), or even swizzled
725 * back from swapper_space to tmpfs file mapping
727 if (wbc->sync_mode != WB_SYNC_NONE) {
728 ret = folio_lock_killable(folio);
734 if (!folio_trylock(folio)) {
740 if (folio_mapping(folio) != mapping ||
741 !folio_test_dirty(folio)) {
742 start += folio_size(folio);
748 if (folio_test_writeback(folio) ||
749 folio_test_fscache(folio)) {
751 if (wbc->sync_mode != WB_SYNC_NONE) {
752 folio_wait_writeback(folio);
753 #ifdef CONFIG_AFS_FSCACHE
754 folio_wait_fscache(folio);
757 start += folio_size(folio);
760 if (wbc->sync_mode == WB_SYNC_NONE) {
761 if (skips >= 5 || need_resched())
768 if (!folio_clear_dirty_for_io(folio))
770 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
773 _leave(" = %zd", ret);
780 } while (wbc->nr_to_write > 0);
783 _leave(" = 0 [%llx]", *_next);
788 * write some of the pending data back to the server
790 int afs_writepages(struct address_space *mapping,
791 struct writeback_control *wbc)
793 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
799 /* We have to be careful as we can end up racing with setattr()
800 * truncating the pagecache since the caller doesn't take a lock here
803 if (wbc->sync_mode == WB_SYNC_ALL)
804 down_read(&vnode->validate_lock);
805 else if (!down_read_trylock(&vnode->validate_lock))
808 if (wbc->range_cyclic) {
809 start = mapping->writeback_index * PAGE_SIZE;
810 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
812 mapping->writeback_index = next / PAGE_SIZE;
813 if (start > 0 && wbc->nr_to_write > 0) {
814 ret = afs_writepages_region(mapping, wbc, 0,
817 mapping->writeback_index =
821 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
822 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
823 if (wbc->nr_to_write > 0 && ret == 0)
824 mapping->writeback_index = next / PAGE_SIZE;
826 ret = afs_writepages_region(mapping, wbc,
827 wbc->range_start, wbc->range_end, &next);
830 up_read(&vnode->validate_lock);
831 _leave(" = %d", ret);
836 * write to an AFS file
838 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
840 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
841 struct afs_file *af = iocb->ki_filp->private_data;
843 size_t count = iov_iter_count(from);
845 _enter("{%llx:%llu},{%zu},",
846 vnode->fid.vid, vnode->fid.vnode, count);
848 if (IS_SWAPFILE(&vnode->vfs_inode)) {
850 "AFS: Attempt to write to active swap file!\n");
857 result = afs_validate(vnode, af->key);
861 result = generic_file_write_iter(iocb, from);
863 _leave(" = %zd", result);
868 * flush any dirty pages for this process, and check for write errors.
869 * - the return status from this call provides a reliable indication of
870 * whether any write errors occurred for this process.
872 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
874 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
875 struct afs_file *af = file->private_data;
878 _enter("{%llx:%llu},{n=%pD},%d",
879 vnode->fid.vid, vnode->fid.vnode, file,
882 ret = afs_validate(vnode, af->key);
886 return file_write_and_wait_range(file, start, end);
890 * notification that a previously read-only page is about to become writable
891 * - if it returns an error, the caller will deliver a bus error signal
893 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
895 struct folio *folio = page_folio(vmf->page);
896 struct file *file = vmf->vma->vm_file;
897 struct inode *inode = file_inode(file);
898 struct afs_vnode *vnode = AFS_FS_I(inode);
899 struct afs_file *af = file->private_data;
901 vm_fault_t ret = VM_FAULT_RETRY;
903 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
905 afs_validate(vnode, af->key);
907 sb_start_pagefault(inode->i_sb);
909 /* Wait for the page to be written to the cache before we allow it to
910 * be modified. We then assume the entire page will need writing back.
912 #ifdef CONFIG_AFS_FSCACHE
913 if (folio_test_fscache(folio) &&
914 folio_wait_fscache_killable(folio) < 0)
918 if (folio_wait_writeback_killable(folio))
921 if (folio_lock_killable(folio) < 0)
924 /* We mustn't change folio->private until writeback is complete as that
925 * details the portion of the page we need to write back and we might
926 * need to redirty the page if there's a problem.
928 if (folio_wait_writeback_killable(folio) < 0) {
933 priv = afs_folio_dirty(folio, 0, folio_size(folio));
934 priv = afs_folio_dirty_mmapped(priv);
935 if (folio_test_private(folio)) {
936 folio_change_private(folio, (void *)priv);
937 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
939 folio_attach_private(folio, (void *)priv);
940 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
942 file_update_time(file);
944 ret = VM_FAULT_LOCKED;
946 sb_end_pagefault(inode->i_sb);
951 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
953 void afs_prune_wb_keys(struct afs_vnode *vnode)
955 LIST_HEAD(graveyard);
956 struct afs_wb_key *wbk, *tmp;
958 /* Discard unused keys */
959 spin_lock(&vnode->wb_lock);
961 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
962 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
963 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
964 if (refcount_read(&wbk->usage) == 1)
965 list_move(&wbk->vnode_link, &graveyard);
969 spin_unlock(&vnode->wb_lock);
971 while (!list_empty(&graveyard)) {
972 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
973 list_del(&wbk->vnode_link);
979 * Clean up a page during invalidation.
981 int afs_launder_folio(struct folio *folio)
983 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
984 struct iov_iter iter;
985 struct bio_vec bv[1];
990 _enter("{%lx}", folio->index);
992 priv = (unsigned long)folio_get_private(folio);
993 if (folio_clear_dirty_for_io(folio)) {
995 t = folio_size(folio);
996 if (folio_test_private(folio)) {
997 f = afs_folio_dirty_from(folio, priv);
998 t = afs_folio_dirty_to(folio, priv);
1001 bv[0].bv_page = &folio->page;
1002 bv[0].bv_offset = f;
1003 bv[0].bv_len = t - f;
1004 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
1006 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1007 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1010 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1011 folio_detach_private(folio);
1012 folio_wait_fscache(folio);
1017 * Deal with the completion of writing the data to the cache.
1019 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1022 struct afs_vnode *vnode = priv;
1024 if (IS_ERR_VALUE(transferred_or_error) &&
1025 transferred_or_error != -ENOBUFS)
1026 afs_invalidate_cache(vnode, 0);
1030 * Save the write to the cache also.
1032 static void afs_write_to_cache(struct afs_vnode *vnode,
1033 loff_t start, size_t len, loff_t i_size,
1036 fscache_write_to_cache(afs_vnode_cache(vnode),
1037 vnode->vfs_inode.i_mapping, start, len, i_size,
1038 afs_write_to_cache_done, vnode, caching);