1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
8 #include <linux/swap.h>
12 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
13 * @mapping: The mapping the folio belongs to.
14 * @folio: The folio being dirtied.
16 * Set the dirty flag on a folio and pin an in-use cache object in memory so
17 * that writeback can later write to it. This is intended to be called from
18 * the filesystem's ->dirty_folio() method.
20 * Return: true if the dirty flag was set on the folio, false otherwise.
22 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
24 struct inode *inode = mapping->host;
25 struct netfs_inode *ictx = netfs_inode(inode);
26 struct fscache_cookie *cookie = netfs_i_cookie(ictx);
27 bool need_use = false;
31 if (!filemap_dirty_folio(mapping, folio))
33 if (!fscache_cookie_valid(cookie))
36 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
37 spin_lock(&inode->i_lock);
38 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
39 inode->i_state |= I_PINNING_NETFS_WB;
42 spin_unlock(&inode->i_lock);
45 fscache_use_cookie(cookie, true);
49 EXPORT_SYMBOL(netfs_dirty_folio);
52 * netfs_unpin_writeback - Unpin writeback resources
53 * @inode: The inode on which the cookie resides
54 * @wbc: The writeback control
56 * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
57 * intended to be called as/by the netfs's ->write_inode() method.
59 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
61 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
63 if (wbc->unpinned_netfs_wb)
64 fscache_unuse_cookie(cookie, NULL, NULL);
67 EXPORT_SYMBOL(netfs_unpin_writeback);
70 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
71 * @inode: The inode to clean up
72 * @aux: Auxiliary data to apply to the inode
74 * Clear any writeback resources held by an inode when the inode is evicted.
75 * This must be called before clear_inode() is called.
77 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
79 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
81 if (inode->i_state & I_PINNING_NETFS_WB) {
82 loff_t i_size = i_size_read(inode);
83 fscache_unuse_cookie(cookie, aux, &i_size);
86 EXPORT_SYMBOL(netfs_clear_inode_writeback);
89 * netfs_invalidate_folio - Invalidate or partially invalidate a folio
90 * @folio: Folio proposed for release
91 * @offset: Offset of the invalidated region
92 * @length: Length of the invalidated region
94 * Invalidate part or all of a folio for a network filesystem. The folio will
95 * be removed afterwards if the invalidated region covers the entire folio.
97 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
99 struct netfs_folio *finfo;
100 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
101 size_t flen = folio_size(folio);
103 _enter("{%lx},%zx,%zx", folio->index, offset, length);
105 if (offset == 0 && length == flen) {
106 unsigned long long i_size = i_size_read(&ctx->inode);
107 unsigned long long fpos = folio_pos(folio), end;
109 end = umin(fpos + flen, i_size);
110 if (fpos < i_size && end > ctx->zero_point)
111 ctx->zero_point = end;
114 folio_wait_private_2(folio); /* [DEPRECATED] */
116 if (!folio_test_private(folio))
119 finfo = netfs_folio_info(folio);
121 if (offset == 0 && length >= flen)
122 goto erase_completely;
125 /* We have a partially uptodate page from a streaming write. */
126 unsigned int fstart = finfo->dirty_offset;
127 unsigned int fend = fstart + finfo->dirty_len;
128 unsigned int iend = offset + length;
135 /* The invalidation region overlaps the data. If the region
136 * covers the start of the data, we either move along the start
137 * or just erase the data entirely.
139 if (offset <= fstart) {
141 goto erase_completely;
142 /* Move the start of the data. */
143 finfo->dirty_len = fend - iend;
144 finfo->dirty_offset = offset;
148 /* Reduce the length of the data if the invalidation region
149 * covers the tail part.
152 finfo->dirty_len = offset - fstart;
156 /* A partial write was split. The caller has already zeroed
157 * it, so just absorb the hole.
163 netfs_put_group(netfs_folio_group(folio));
164 folio_detach_private(folio);
165 folio_clear_uptodate(folio);
169 EXPORT_SYMBOL(netfs_invalidate_folio);
172 * netfs_release_folio - Try to release a folio
173 * @folio: Folio proposed for release
174 * @gfp: Flags qualifying the release
176 * Request release of a folio and clean up its private state if it's not busy.
177 * Returns true if the folio can now be released, false if not
179 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
181 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
182 unsigned long long end;
184 if (folio_test_dirty(folio))
187 end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
188 if (end > ctx->zero_point)
189 ctx->zero_point = end;
191 if (folio_test_private(folio))
193 if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
194 if (current_is_kswapd() || !(gfp & __GFP_FS))
196 folio_wait_private_2(folio);
198 fscache_note_page_release(netfs_i_cookie(ctx));
201 EXPORT_SYMBOL(netfs_release_folio);