1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Miscellaneous routines.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
8 #include <linux/swap.h>
12 * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
13 * @mapping: Address space to set on the folio (or NULL).
14 * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
15 * @_cur_size: Current size of the buffer (updated).
16 * @size: Target size of the buffer.
17 * @gfp: The allocation constraints.
19 int netfs_alloc_folioq_buffer(struct address_space *mapping,
20 struct folio_queue **_buffer,
21 size_t *_cur_size, ssize_t size, gfp_t gfp)
23 struct folio_queue *tail = *_buffer, *p;
25 size = round_up(size, PAGE_SIZE);
26 if (*_cur_size >= size)
37 if (!tail || folioq_full(tail)) {
38 p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
50 if (size - *_cur_size > PAGE_SIZE)
51 order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
54 folio = folio_alloc(gfp, order);
55 if (!folio && order > 0)
56 folio = folio_alloc(gfp, 0);
60 folio->mapping = mapping;
61 folio->index = *_cur_size / PAGE_SIZE;
62 trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
63 slot = folioq_append_mark(tail, folio);
64 *_cur_size += folioq_folio_size(tail, slot);
65 } while (*_cur_size < size);
69 EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
72 * netfs_free_folioq_buffer - Free a folio queue.
73 * @fq: The start of the folio queue to free
75 * Free up a chain of folio_queues and, if marked, the marked folios they point
78 void netfs_free_folioq_buffer(struct folio_queue *fq)
80 struct folio_queue *next;
81 struct folio_batch fbatch;
83 folio_batch_init(&fbatch);
85 for (; fq; fq = next) {
86 for (int slot = 0; slot < folioq_count(fq); slot++) {
87 struct folio *folio = folioq_folio(fq, slot);
90 !folioq_is_marked(fq, slot))
93 trace_netfs_folio(folio, netfs_folio_trace_put);
94 if (folio_batch_add(&fbatch, folio))
95 folio_batch_release(&fbatch);
98 netfs_stat_d(&netfs_n_folioq);
103 folio_batch_release(&fbatch);
105 EXPORT_SYMBOL(netfs_free_folioq_buffer);
108 * Reset the subrequest iterator to refer just to the region remaining to be
109 * read. The iterator may or may not have been advanced by socket ops or
110 * extraction ops to an extent that may or may not match the amount actually
113 void netfs_reset_iter(struct netfs_io_subrequest *subreq)
115 struct iov_iter *io_iter = &subreq->io_iter;
116 size_t remain = subreq->len - subreq->transferred;
118 if (io_iter->count > remain)
119 iov_iter_advance(io_iter, io_iter->count - remain);
120 else if (io_iter->count < remain)
121 iov_iter_revert(io_iter, remain - io_iter->count);
122 iov_iter_truncate(&subreq->io_iter, remain);
126 * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
127 * @mapping: The mapping the folio belongs to.
128 * @folio: The folio being dirtied.
130 * Set the dirty flag on a folio and pin an in-use cache object in memory so
131 * that writeback can later write to it. This is intended to be called from
132 * the filesystem's ->dirty_folio() method.
134 * Return: true if the dirty flag was set on the folio, false otherwise.
136 bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
138 struct inode *inode = mapping->host;
139 struct netfs_inode *ictx = netfs_inode(inode);
140 struct fscache_cookie *cookie = netfs_i_cookie(ictx);
141 bool need_use = false;
145 if (!filemap_dirty_folio(mapping, folio))
147 if (!fscache_cookie_valid(cookie))
150 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
151 spin_lock(&inode->i_lock);
152 if (!(inode->i_state & I_PINNING_NETFS_WB)) {
153 inode->i_state |= I_PINNING_NETFS_WB;
156 spin_unlock(&inode->i_lock);
159 fscache_use_cookie(cookie, true);
163 EXPORT_SYMBOL(netfs_dirty_folio);
166 * netfs_unpin_writeback - Unpin writeback resources
167 * @inode: The inode on which the cookie resides
168 * @wbc: The writeback control
170 * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
171 * intended to be called as/by the netfs's ->write_inode() method.
173 int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
175 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
177 if (wbc->unpinned_netfs_wb)
178 fscache_unuse_cookie(cookie, NULL, NULL);
181 EXPORT_SYMBOL(netfs_unpin_writeback);
184 * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
185 * @inode: The inode to clean up
186 * @aux: Auxiliary data to apply to the inode
188 * Clear any writeback resources held by an inode when the inode is evicted.
189 * This must be called before clear_inode() is called.
191 void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
193 struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
195 if (inode->i_state & I_PINNING_NETFS_WB) {
196 loff_t i_size = i_size_read(inode);
197 fscache_unuse_cookie(cookie, aux, &i_size);
200 EXPORT_SYMBOL(netfs_clear_inode_writeback);
203 * netfs_invalidate_folio - Invalidate or partially invalidate a folio
204 * @folio: Folio proposed for release
205 * @offset: Offset of the invalidated region
206 * @length: Length of the invalidated region
208 * Invalidate part or all of a folio for a network filesystem. The folio will
209 * be removed afterwards if the invalidated region covers the entire folio.
211 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
213 struct netfs_folio *finfo;
214 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
215 size_t flen = folio_size(folio);
217 _enter("{%lx},%zx,%zx", folio->index, offset, length);
219 if (offset == 0 && length == flen) {
220 unsigned long long i_size = i_size_read(&ctx->inode);
221 unsigned long long fpos = folio_pos(folio), end;
223 end = umin(fpos + flen, i_size);
224 if (fpos < i_size && end > ctx->zero_point)
225 ctx->zero_point = end;
228 folio_wait_private_2(folio); /* [DEPRECATED] */
230 if (!folio_test_private(folio))
233 finfo = netfs_folio_info(folio);
235 if (offset == 0 && length >= flen)
236 goto erase_completely;
239 /* We have a partially uptodate page from a streaming write. */
240 unsigned int fstart = finfo->dirty_offset;
241 unsigned int fend = fstart + finfo->dirty_len;
242 unsigned int iend = offset + length;
249 /* The invalidation region overlaps the data. If the region
250 * covers the start of the data, we either move along the start
251 * or just erase the data entirely.
253 if (offset <= fstart) {
255 goto erase_completely;
256 /* Move the start of the data. */
257 finfo->dirty_len = fend - iend;
258 finfo->dirty_offset = offset;
262 /* Reduce the length of the data if the invalidation region
263 * covers the tail part.
266 finfo->dirty_len = offset - fstart;
270 /* A partial write was split. The caller has already zeroed
271 * it, so just absorb the hole.
277 netfs_put_group(netfs_folio_group(folio));
278 folio_detach_private(folio);
279 folio_clear_uptodate(folio);
283 EXPORT_SYMBOL(netfs_invalidate_folio);
286 * netfs_release_folio - Try to release a folio
287 * @folio: Folio proposed for release
288 * @gfp: Flags qualifying the release
290 * Request release of a folio and clean up its private state if it's not busy.
291 * Returns true if the folio can now be released, false if not
293 bool netfs_release_folio(struct folio *folio, gfp_t gfp)
295 struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
296 unsigned long long end;
298 if (folio_test_dirty(folio))
301 end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
302 if (end > ctx->zero_point)
303 ctx->zero_point = end;
305 if (folio_test_private(folio))
307 if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
308 if (current_is_kswapd() || !(gfp & __GFP_FS))
310 folio_wait_private_2(folio);
312 fscache_note_page_release(netfs_i_cookie(ctx));
315 EXPORT_SYMBOL(netfs_release_folio);