]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/readahead.c - address_space-level file readahead. | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds | |
6 | * | |
e1f8e874 | 7 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
8 | * Initial version. |
9 | */ | |
10 | ||
84dacdbd N |
11 | /** |
12 | * DOC: Readahead Overview | |
13 | * | |
14 | * Readahead is used to read content into the page cache before it is | |
15 | * explicitly requested by the application. Readahead only ever | |
1e470280 MWO |
16 | * attempts to read folios that are not yet in the page cache. If a |
17 | * folio is present but not up-to-date, readahead will not try to read | |
5efe7448 | 18 | * it. In that case a simple ->read_folio() will be requested. |
84dacdbd N |
19 | * |
20 | * Readahead is triggered when an application read request (whether a | |
1e470280 | 21 | * system call or a page fault) finds that the requested folio is not in |
84dacdbd | 22 | * the page cache, or that it is in the page cache and has the |
1e470280 MWO |
23 | * readahead flag set. This flag indicates that the folio was read |
24 | * as part of a previous readahead request and now that it has been | |
25 | * accessed, it is time for the next readahead. | |
84dacdbd N |
26 | * |
27 | * Each readahead request is partly synchronous read, and partly async | |
1e470280 MWO |
28 | * readahead. This is reflected in the struct file_ra_state which |
29 | * contains ->size being the total number of pages, and ->async_size | |
30 | * which is the number of pages in the async section. The readahead | |
31 | * flag will be set on the first folio in this async section to trigger | |
32 | * a subsequent readahead. Once a series of sequential reads has been | |
84dacdbd | 33 | * established, there should be no need for a synchronous component and |
1e470280 | 34 | * all readahead request will be fully asynchronous. |
84dacdbd | 35 | * |
1e470280 MWO |
36 | * When either of the triggers causes a readahead, three numbers need |
37 | * to be determined: the start of the region to read, the size of the | |
38 | * region, and the size of the async tail. | |
84dacdbd N |
39 | * |
40 | * The start of the region is simply the first page address at or after | |
41 | * the accessed address, which is not currently populated in the page | |
42 | * cache. This is found with a simple search in the page cache. | |
43 | * | |
44 | * The size of the async tail is determined by subtracting the size that | |
45 | * was explicitly requested from the determined request size, unless | |
46 | * this would be less than zero - then zero is used. NOTE THIS | |
47 | * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED | |
1e470280 | 48 | * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. |
84dacdbd N |
49 | * |
50 | * The size of the region is normally determined from the size of the | |
51 | * previous readahead which loaded the preceding pages. This may be | |
52 | * discovered from the struct file_ra_state for simple sequential reads, | |
53 | * or from examining the state of the page cache when multiple | |
54 | * sequential reads are interleaved. Specifically: where the readahead | |
1e470280 | 55 | * was triggered by the readahead flag, the size of the previous |
84dacdbd N |
56 | * readahead is assumed to be the number of pages from the triggering |
57 | * page to the start of the new readahead. In these cases, the size of | |
58 | * the previous readahead is scaled, often doubled, for the new | |
59 | * readahead, though see get_next_ra_size() for details. | |
60 | * | |
61 | * If the size of the previous read cannot be determined, the number of | |
62 | * preceding pages in the page cache is used to estimate the size of | |
63 | * a previous read. This estimate could easily be misled by random | |
64 | * reads being coincidentally adjacent, so it is ignored unless it is | |
65 | * larger than the current request, and it is not scaled up, unless it | |
66 | * is at the start of file. | |
67 | * | |
1e470280 | 68 | * In general readahead is accelerated at the start of the file, as |
84dacdbd | 69 | * reads from there are often sequential. There are other minor |
1e470280 | 70 | * adjustments to the readahead size in various special cases and these |
84dacdbd N |
71 | * are best discovered by reading the code. |
72 | * | |
1e470280 MWO |
73 | * The above calculation, based on the previous readahead size, |
74 | * determines the size of the readahead, to which any requested read | |
75 | * size may be added. | |
84dacdbd N |
76 | * |
77 | * Readahead requests are sent to the filesystem using the ->readahead() | |
78 | * address space operation, for which mpage_readahead() is a canonical | |
79 | * implementation. ->readahead() should normally initiate reads on all | |
1e470280 | 80 | * folios, but may fail to read any or all folios without causing an I/O |
5efe7448 | 81 | * error. The page cache reading code will issue a ->read_folio() request |
1e470280 | 82 | * for any folio which ->readahead() did not read, and only an error |
84dacdbd N |
83 | * from this will be final. |
84 | * | |
1e470280 MWO |
85 | * ->readahead() will generally call readahead_folio() repeatedly to get |
86 | * each folio from those prepared for readahead. It may fail to read a | |
87 | * folio by: | |
84dacdbd | 88 | * |
1e470280 MWO |
89 | * * not calling readahead_folio() sufficiently many times, effectively |
90 | * ignoring some folios, as might be appropriate if the path to | |
84dacdbd N |
91 | * storage is congested. |
92 | * | |
1e470280 | 93 | * * failing to actually submit a read request for a given folio, |
84dacdbd N |
94 | * possibly due to insufficient resources, or |
95 | * | |
96 | * * getting an error during subsequent processing of a request. | |
97 | * | |
1e470280 MWO |
98 | * In the last two cases, the folio should be unlocked by the filesystem |
99 | * to indicate that the read attempt has failed. In the first case the | |
100 | * folio will be unlocked by the VFS. | |
84dacdbd | 101 | * |
1e470280 | 102 | * Those folios not in the final ``async_size`` of the request should be |
84dacdbd N |
103 | * considered to be important and ->readahead() should not fail them due |
104 | * to congestion or temporary resource unavailability, but should wait | |
105 | * for necessary resources (e.g. memory or indexing information) to | |
1e470280 | 106 | * become available. Folios in the final ``async_size`` may be |
84dacdbd | 107 | * considered less urgent and failure to read them is more acceptable. |
1e470280 MWO |
108 | * In this case it is best to use filemap_remove_folio() to remove the |
109 | * folios from the page cache as is automatically done for folios that | |
110 | * were not fetched with readahead_folio(). This will allow a | |
111 | * subsequent synchronous readahead request to try them again. If they | |
9fd472af | 112 | * are left in the page cache, then they will be read individually using |
5efe7448 | 113 | * ->read_folio() which may be less efficient. |
84dacdbd N |
114 | */ |
115 | ||
c97ab271 | 116 | #include <linux/blkdev.h> |
1da177e4 | 117 | #include <linux/kernel.h> |
11bd969f | 118 | #include <linux/dax.h> |
5a0e3ad6 | 119 | #include <linux/gfp.h> |
b95f1b31 | 120 | #include <linux/export.h> |
1da177e4 | 121 | #include <linux/backing-dev.h> |
8bde37f0 | 122 | #include <linux/task_io_accounting_ops.h> |
f5ff8422 | 123 | #include <linux/pagemap.h> |
17604240 | 124 | #include <linux/psi.h> |
782182e5 CW |
125 | #include <linux/syscalls.h> |
126 | #include <linux/file.h> | |
d72ee911 | 127 | #include <linux/mm_inline.h> |
ca47e8c7 | 128 | #include <linux/blk-cgroup.h> |
3d8f7615 | 129 | #include <linux/fadvise.h> |
f2c817be | 130 | #include <linux/sched/mm.h> |
fac84846 | 131 | #include <linux/fsnotify.h> |
1da177e4 | 132 | |
29f175d1 FF |
133 | #include "internal.h" |
134 | ||
1da177e4 LT |
135 | /* |
136 | * Initialise a struct file's readahead state. Assumes that the caller has | |
137 | * memset *ra to zero. | |
138 | */ | |
139 | void | |
140 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
141 | { | |
de1414a6 | 142 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
f4e6b498 | 143 | ra->prev_pos = -1; |
1da177e4 | 144 | } |
d41cc702 | 145 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 146 | |
b4e089d7 | 147 | static void read_pages(struct readahead_control *rac) |
1da177e4 | 148 | { |
a4d96536 | 149 | const struct address_space_operations *aops = rac->mapping->a_ops; |
a42634a6 | 150 | struct folio *folio; |
5b417b18 | 151 | struct blk_plug plug; |
1da177e4 | 152 | |
a4d96536 | 153 | if (!readahead_count(rac)) |
b4e089d7 | 154 | return; |
ad4ae1c7 | 155 | |
17604240 CH |
156 | if (unlikely(rac->_workingset)) |
157 | psi_memstall_enter(&rac->_pflags); | |
5b417b18 JA |
158 | blk_start_plug(&plug); |
159 | ||
8151b4c8 MWO |
160 | if (aops->readahead) { |
161 | aops->readahead(rac); | |
7a1eb89f | 162 | /* Clean up the remaining folios. */ |
a42634a6 | 163 | while ((folio = readahead_folio(rac)) != NULL) { |
6bf74cdd | 164 | folio_get(folio); |
7a1eb89f | 165 | filemap_remove_folio(folio); |
a42634a6 | 166 | folio_unlock(folio); |
6bf74cdd | 167 | folio_put(folio); |
8151b4c8 | 168 | } |
c1f6925e | 169 | } else { |
5efe7448 | 170 | while ((folio = readahead_folio(rac)) != NULL) |
7e0a1265 | 171 | aops->read_folio(rac->file, folio); |
1da177e4 | 172 | } |
5b417b18 | 173 | |
5b417b18 | 174 | blk_finish_plug(&plug); |
17604240 CH |
175 | if (unlikely(rac->_workingset)) |
176 | psi_memstall_leave(&rac->_pflags); | |
177 | rac->_workingset = false; | |
ad4ae1c7 | 178 | |
c1f6925e | 179 | BUG_ON(readahead_count(rac)); |
1da177e4 LT |
180 | } |
181 | ||
1963de79 JA |
182 | static struct folio *ractl_alloc_folio(struct readahead_control *ractl, |
183 | gfp_t gfp_mask, unsigned int order) | |
184 | { | |
77d07522 JA |
185 | struct folio *folio; |
186 | ||
187 | folio = filemap_alloc_folio(gfp_mask, order); | |
188 | if (folio && ractl->dropbehind) | |
189 | __folio_set_dropbehind(folio); | |
190 | ||
191 | return folio; | |
1963de79 JA |
192 | } |
193 | ||
2c684234 | 194 | /** |
73bb49da MWO |
195 | * page_cache_ra_unbounded - Start unchecked readahead. |
196 | * @ractl: Readahead control. | |
2c684234 MWO |
197 | * @nr_to_read: The number of pages to read. |
198 | * @lookahead_size: Where to start the next readahead. | |
199 | * | |
200 | * This function is for filesystems to call when they want to start | |
201 | * readahead beyond a file's stated i_size. This is almost certainly | |
202 | * not the function you want to call. Use page_cache_async_readahead() | |
203 | * or page_cache_sync_readahead() instead. | |
204 | * | |
205 | * Context: File is referenced by caller. Mutexes may be held by caller. | |
206 | * May sleep, but will not reenter filesystem to reclaim memory. | |
1da177e4 | 207 | */ |
73bb49da MWO |
208 | void page_cache_ra_unbounded(struct readahead_control *ractl, |
209 | unsigned long nr_to_read, unsigned long lookahead_size) | |
1da177e4 | 210 | { |
73bb49da | 211 | struct address_space *mapping = ractl->mapping; |
0938b161 | 212 | unsigned long index = readahead_index(ractl); |
8a5c743e | 213 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
0938b161 | 214 | unsigned long mark = ULONG_MAX, i = 0; |
26cfdb39 | 215 | unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); |
1da177e4 | 216 | |
f2c817be MWO |
217 | /* |
218 | * Partway through the readahead operation, we will have added | |
219 | * locked pages to the page cache, but will not yet have submitted | |
220 | * them for I/O. Adding another page may need to allocate memory, | |
221 | * which can trigger memory reclaim. Telling the VM we're in | |
222 | * the middle of a filesystem operation will cause it to not | |
223 | * touch file-backed pages, preventing a deadlock. Most (all?) | |
224 | * filesystems already specify __GFP_NOFS in their mapping's | |
225 | * gfp_mask, but let's be explicit here. | |
226 | */ | |
227 | unsigned int nofs = memalloc_nofs_save(); | |
228 | ||
730633f0 | 229 | filemap_invalidate_lock_shared(mapping); |
26cfdb39 PR |
230 | index = mapping_align_index(mapping, index); |
231 | ||
232 | /* | |
233 | * As iterator `i` is aligned to min_nrpages, round_up the | |
234 | * difference between nr_to_read and lookahead_size to mark the | |
235 | * index that only has lookahead or "async_region" to set the | |
236 | * readahead flag. | |
237 | */ | |
0938b161 PR |
238 | if (lookahead_size <= nr_to_read) { |
239 | unsigned long ra_folio_index; | |
240 | ||
241 | ra_folio_index = round_up(readahead_index(ractl) + | |
242 | nr_to_read - lookahead_size, | |
243 | min_nrpages); | |
244 | mark = ra_folio_index - index; | |
245 | } | |
26cfdb39 PR |
246 | nr_to_read += readahead_index(ractl) - index; |
247 | ractl->_index = index; | |
248 | ||
1da177e4 LT |
249 | /* |
250 | * Preallocate as many pages as we will need. | |
251 | */ | |
26cfdb39 | 252 | while (i < nr_to_read) { |
0387df1d | 253 | struct folio *folio = xa_load(&mapping->i_pages, index + i); |
0fd44ab2 | 254 | int ret; |
1da177e4 | 255 | |
0387df1d | 256 | if (folio && !xa_is_value(folio)) { |
b3751e6a | 257 | /* |
2d8163e4 MWO |
258 | * Page already present? Kick off the current batch |
259 | * of contiguous pages before continuing with the | |
260 | * next batch. This page may be the one we would | |
261 | * have intended to mark as Readahead, but we don't | |
262 | * have a stable reference to this page, and it's | |
263 | * not worth getting one just for that. | |
b3751e6a | 264 | */ |
b4e089d7 | 265 | read_pages(ractl); |
26cfdb39 PR |
266 | ractl->_index += min_nrpages; |
267 | i = ractl->_index + ractl->_nr_pages - index; | |
1da177e4 | 268 | continue; |
b3751e6a | 269 | } |
1da177e4 | 270 | |
1963de79 JA |
271 | folio = ractl_alloc_folio(ractl, gfp_mask, |
272 | mapping_min_folio_order(mapping)); | |
0387df1d | 273 | if (!folio) |
1da177e4 | 274 | break; |
0fd44ab2 LS |
275 | |
276 | ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); | |
277 | if (ret < 0) { | |
0387df1d | 278 | folio_put(folio); |
0fd44ab2 LS |
279 | if (ret == -ENOMEM) |
280 | break; | |
b4e089d7 | 281 | read_pages(ractl); |
26cfdb39 PR |
282 | ractl->_index += min_nrpages; |
283 | i = ractl->_index + ractl->_nr_pages - index; | |
c1f6925e MWO |
284 | continue; |
285 | } | |
26cfdb39 | 286 | if (i == mark) |
0387df1d | 287 | folio_set_readahead(folio); |
17604240 | 288 | ractl->_workingset |= folio_test_workingset(folio); |
26cfdb39 PR |
289 | ractl->_nr_pages += min_nrpages; |
290 | i += min_nrpages; | |
1da177e4 | 291 | } |
1da177e4 LT |
292 | |
293 | /* | |
7e0a1265 MWO |
294 | * Now start the IO. We ignore I/O errors - if the folio is not |
295 | * uptodate then the caller will launch read_folio again, and | |
1da177e4 LT |
296 | * will then handle the error. |
297 | */ | |
b4e089d7 | 298 | read_pages(ractl); |
730633f0 | 299 | filemap_invalidate_unlock_shared(mapping); |
f2c817be | 300 | memalloc_nofs_restore(nofs); |
1da177e4 | 301 | } |
73bb49da | 302 | EXPORT_SYMBOL_GPL(page_cache_ra_unbounded); |
2c684234 MWO |
303 | |
304 | /* | |
8238287e | 305 | * do_page_cache_ra() actually reads a chunk of disk. It allocates |
2c684234 MWO |
306 | * the pages first, then submits them for I/O. This avoids the very bad |
307 | * behaviour which would occur if page allocations are causing VM writeback. | |
308 | * We really don't want to intermingle reads and writes like that. | |
309 | */ | |
56a4d67c | 310 | static void do_page_cache_ra(struct readahead_control *ractl, |
8238287e | 311 | unsigned long nr_to_read, unsigned long lookahead_size) |
2c684234 | 312 | { |
8238287e MWO |
313 | struct inode *inode = ractl->mapping->host; |
314 | unsigned long index = readahead_index(ractl); | |
2c684234 MWO |
315 | loff_t isize = i_size_read(inode); |
316 | pgoff_t end_index; /* The last page we want to read */ | |
317 | ||
318 | if (isize == 0) | |
319 | return; | |
320 | ||
321 | end_index = (isize - 1) >> PAGE_SHIFT; | |
322 | if (index > end_index) | |
323 | return; | |
324 | /* Don't read past the page containing the last byte of the file */ | |
325 | if (nr_to_read > end_index - index) | |
326 | nr_to_read = end_index - index + 1; | |
327 | ||
8238287e | 328 | page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size); |
2c684234 | 329 | } |
1da177e4 LT |
330 | |
331 | /* | |
332 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
333 | * memory at once. | |
334 | */ | |
7b3df3b9 | 335 | void force_page_cache_ra(struct readahead_control *ractl, |
fcd9ae4f | 336 | unsigned long nr_to_read) |
1da177e4 | 337 | { |
7b3df3b9 | 338 | struct address_space *mapping = ractl->mapping; |
fcd9ae4f | 339 | struct file_ra_state *ra = ractl->ra; |
9491ae4a | 340 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
878343df | 341 | unsigned long max_pages; |
9491ae4a | 342 | |
7e0a1265 | 343 | if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) |
9a42823a | 344 | return; |
1da177e4 | 345 | |
9491ae4a JA |
346 | /* |
347 | * If the request exceeds the readahead window, allow the read to | |
348 | * be up to the optimal hardware IO size | |
349 | */ | |
350 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); | |
7b3df3b9 | 351 | nr_to_read = min_t(unsigned long, nr_to_read, max_pages); |
1da177e4 | 352 | while (nr_to_read) { |
09cbfeaf | 353 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
1da177e4 LT |
354 | |
355 | if (this_chunk > nr_to_read) | |
356 | this_chunk = nr_to_read; | |
7b3df3b9 | 357 | do_page_cache_ra(ractl, this_chunk, 0); |
58d5640e | 358 | |
1da177e4 LT |
359 | nr_to_read -= this_chunk; |
360 | } | |
1da177e4 LT |
361 | } |
362 | ||
c743d96b FW |
363 | /* |
364 | * Set the initial window size, round to next power of 2 and square | |
365 | * for small size, x 4 for medium, and x 2 for large | |
366 | * for 128k (32 page) max ra | |
fb25a77d | 367 | * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial |
c743d96b FW |
368 | */ |
369 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
370 | { | |
371 | unsigned long newsize = roundup_pow_of_two(size); | |
372 | ||
373 | if (newsize <= max / 32) | |
374 | newsize = newsize * 4; | |
375 | else if (newsize <= max / 4) | |
376 | newsize = newsize * 2; | |
377 | else | |
378 | newsize = max; | |
379 | ||
380 | return newsize; | |
381 | } | |
382 | ||
122a21d1 FW |
383 | /* |
384 | * Get the previous window size, ramp it up, and | |
385 | * return it as the new window size. | |
386 | */ | |
c743d96b | 387 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
20ff1c95 | 388 | unsigned long max) |
122a21d1 | 389 | { |
f9acc8c7 | 390 | unsigned long cur = ra->size; |
122a21d1 FW |
391 | |
392 | if (cur < max / 16) | |
20ff1c95 GX |
393 | return 4 * cur; |
394 | if (cur <= max / 2) | |
395 | return 2 * cur; | |
396 | return max; | |
122a21d1 FW |
397 | } |
398 | ||
399 | /* | |
400 | * On-demand readahead design. | |
401 | * | |
402 | * The fields in struct file_ra_state represent the most-recently-executed | |
403 | * readahead attempt: | |
404 | * | |
f9acc8c7 FW |
405 | * |<----- async_size ---------| |
406 | * |------------------- size -------------------->| | |
407 | * |==================#===========================| | |
408 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
409 | * |
410 | * To overlap application thinking time and disk I/O time, we do | |
411 | * `readahead pipelining': Do not wait until the application consumed all | |
412 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
413 | * Instead, submit an asynchronous readahead I/O as soon as there are |
414 | * only async_size pages left in the readahead window. Normally async_size | |
415 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
416 | * |
417 | * In interleaved sequential reads, concurrent streams on the same fd can | |
418 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 419 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
420 | * indicator. The flag won't be set on already cached pages, to avoid the |
421 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
422 | * | |
f4e6b498 | 423 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
424 | * It should be maintained by the caller, and will be used for detecting |
425 | * small random reads. Note that the readahead algorithm checks loosely | |
426 | * for sequential patterns. Hence interleaved reads might be served as | |
427 | * sequential ones. | |
428 | * | |
429 | * There is a special-case: if the first page which the application tries to | |
430 | * read happens to be the first page of the file, it is assumed that a linear | |
431 | * read is about to happen and the window is immediately set to the initial size | |
432 | * based on I/O request size and the max_readahead. | |
433 | * | |
434 | * The code ramps up the readahead size aggressively at first, but slow down as | |
435 | * it approaches max_readhead. | |
436 | */ | |
437 | ||
793917d9 MWO |
438 | static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, |
439 | pgoff_t mark, unsigned int order, gfp_t gfp) | |
440 | { | |
441 | int err; | |
1963de79 | 442 | struct folio *folio = ractl_alloc_folio(ractl, gfp, order); |
793917d9 MWO |
443 | |
444 | if (!folio) | |
445 | return -ENOMEM; | |
ab4443fe | 446 | mark = round_down(mark, 1UL << order); |
b9ff43dd | 447 | if (index == mark) |
793917d9 MWO |
448 | folio_set_readahead(folio); |
449 | err = filemap_add_folio(ractl->mapping, folio, index, gfp); | |
17604240 | 450 | if (err) { |
793917d9 | 451 | folio_put(folio); |
17604240 CH |
452 | return err; |
453 | } | |
454 | ||
455 | ractl->_nr_pages += 1UL << order; | |
456 | ractl->_workingset |= folio_test_workingset(folio); | |
457 | return 0; | |
793917d9 MWO |
458 | } |
459 | ||
56a4d67c | 460 | void page_cache_ra_order(struct readahead_control *ractl, |
793917d9 MWO |
461 | struct file_ra_state *ra, unsigned int new_order) |
462 | { | |
463 | struct address_space *mapping = ractl->mapping; | |
d5ea5e5e JK |
464 | pgoff_t start = readahead_index(ractl); |
465 | pgoff_t index = start; | |
26cfdb39 | 466 | unsigned int min_order = mapping_min_folio_order(mapping); |
793917d9 MWO |
467 | pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; |
468 | pgoff_t mark = index + ra->size - ra->async_size; | |
30153e44 | 469 | unsigned int nofs; |
793917d9 MWO |
470 | int err = 0; |
471 | gfp_t gfp = readahead_gfp_mask(mapping); | |
26cfdb39 | 472 | unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping)); |
793917d9 | 473 | |
26cfdb39 PR |
474 | /* |
475 | * Fallback when size < min_nrpages as each folio should be | |
476 | * at least min_nrpages anyway. | |
477 | */ | |
478 | if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size) | |
793917d9 MWO |
479 | goto fallback; |
480 | ||
481 | limit = min(limit, index + ra->size - 1); | |
482 | ||
84429b67 | 483 | if (new_order < mapping_max_folio_order(mapping)) |
793917d9 | 484 | new_order += 2; |
1f789a45 | 485 | |
84429b67 | 486 | new_order = min(mapping_max_folio_order(mapping), new_order); |
1f789a45 | 487 | new_order = min_t(unsigned int, new_order, ilog2(ra->size)); |
26cfdb39 | 488 | new_order = max(new_order, min_order); |
793917d9 | 489 | |
30153e44 KW |
490 | /* See comment in page_cache_ra_unbounded() */ |
491 | nofs = memalloc_nofs_save(); | |
00fa15e0 | 492 | filemap_invalidate_lock_shared(mapping); |
26cfdb39 PR |
493 | /* |
494 | * If the new_order is greater than min_order and index is | |
495 | * already aligned to new_order, then this will be noop as index | |
496 | * aligned to new_order should also be aligned to min_order. | |
497 | */ | |
498 | ractl->_index = mapping_align_index(mapping, index); | |
499 | index = readahead_index(ractl); | |
500 | ||
793917d9 MWO |
501 | while (index <= limit) { |
502 | unsigned int order = new_order; | |
503 | ||
504 | /* Align with smaller pages if needed */ | |
ec056cef | 505 | if (index & ((1UL << order) - 1)) |
793917d9 | 506 | order = __ffs(index); |
793917d9 | 507 | /* Don't allocate pages past EOF */ |
26cfdb39 | 508 | while (order > min_order && index + (1UL << order) - 1 > limit) |
ec056cef | 509 | order--; |
793917d9 MWO |
510 | err = ra_alloc_folio(ractl, index, mark, order, gfp); |
511 | if (err) | |
512 | break; | |
513 | index += 1UL << order; | |
514 | } | |
515 | ||
b4e089d7 | 516 | read_pages(ractl); |
00fa15e0 | 517 | filemap_invalidate_unlock_shared(mapping); |
30153e44 | 518 | memalloc_nofs_restore(nofs); |
793917d9 MWO |
519 | |
520 | /* | |
521 | * If there were already pages in the page cache, then we may have | |
522 | * left some gaps. Let the regular readahead code take care of this | |
d5ea5e5e | 523 | * situation below. |
793917d9 MWO |
524 | */ |
525 | if (!err) | |
526 | return; | |
527 | fallback: | |
d5ea5e5e JK |
528 | /* |
529 | * ->readahead() may have updated readahead window size so we have to | |
530 | * check there's still something to read. | |
531 | */ | |
532 | if (ra->size > index - start) | |
533 | do_page_cache_ra(ractl, ra->size - (index - start), | |
534 | ra->async_size); | |
793917d9 MWO |
535 | } |
536 | ||
3a7a11a5 JK |
537 | static unsigned long ractl_max_pages(struct readahead_control *ractl, |
538 | unsigned long req_size) | |
122a21d1 | 539 | { |
6e4af69a | 540 | struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); |
3a7a11a5 | 541 | unsigned long max_pages = ractl->ra->ra_pages; |
045a2529 | 542 | |
9491ae4a JA |
543 | /* |
544 | * If the request exceeds the readahead window, allow the read to | |
545 | * be up to the optimal hardware IO size | |
546 | */ | |
547 | if (req_size > max_pages && bdi->io_pages > max_pages) | |
548 | max_pages = min(req_size, bdi->io_pages); | |
3a7a11a5 JK |
549 | return max_pages; |
550 | } | |
9491ae4a | 551 | |
3a7a11a5 JK |
552 | void page_cache_sync_ra(struct readahead_control *ractl, |
553 | unsigned long req_count) | |
554 | { | |
555 | pgoff_t index = readahead_index(ractl); | |
556 | bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); | |
557 | struct file_ra_state *ra = ractl->ra; | |
a6eccd5b JK |
558 | unsigned long max_pages, contig_count; |
559 | pgoff_t prev_index, miss; | |
122a21d1 | 560 | |
fac84846 JB |
561 | /* |
562 | * If we have pre-content watches we need to disable readahead to make | |
563 | * sure that we don't find 0 filled pages in cache that we never emitted | |
564 | * events for. Filesystems supporting HSM must make sure to not call | |
565 | * this function with ractl->file unset for files handled by HSM. | |
566 | */ | |
567 | if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode))) | |
568 | return; | |
569 | ||
6b10c6c9 | 570 | /* |
3a7a11a5 JK |
571 | * Even if readahead is disabled, issue this request as readahead |
572 | * as we'll need it to satisfy the requested range. The forced | |
573 | * readahead will do the right thing and limit the read to just the | |
574 | * requested range, which we'll set to 1 page for this case. | |
6b10c6c9 | 575 | */ |
3a7a11a5 JK |
576 | if (!ra->ra_pages || blk_cgroup_congested()) { |
577 | if (!ractl->file) | |
9a42823a | 578 | return; |
3a7a11a5 JK |
579 | req_count = 1; |
580 | do_forced_ra = true; | |
581 | } | |
6b10c6c9 | 582 | |
3a7a11a5 JK |
583 | /* be dumb */ |
584 | if (do_forced_ra) { | |
585 | force_page_cache_ra(ractl, req_count); | |
586 | return; | |
6b10c6c9 FW |
587 | } |
588 | ||
3a7a11a5 | 589 | max_pages = ractl_max_pages(ractl, req_count); |
58540f5c | 590 | prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
122a21d1 | 591 | /* |
58540f5c | 592 | * A start of file, oversized read, or sequential cache miss: |
08eb9658 MWO |
593 | * trivial case: (index - prev_index) == 1 |
594 | * unaligned reads: (index - prev_index) == 0 | |
045a2529 | 595 | */ |
58540f5c JK |
596 | if (!index || req_count > max_pages || index - prev_index <= 1UL) { |
597 | ra->start = index; | |
598 | ra->size = get_init_ra_size(req_count, max_pages); | |
599 | ra->async_size = ra->size > req_count ? ra->size - req_count : | |
600 | ra->size >> 1; | |
601 | goto readit; | |
602 | } | |
045a2529 | 603 | |
10be0b37 WF |
604 | /* |
605 | * Query the page cache and look for the traces(cached history pages) | |
606 | * that a sequential stream would leave behind. | |
607 | */ | |
a6eccd5b JK |
608 | rcu_read_lock(); |
609 | miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages); | |
610 | rcu_read_unlock(); | |
611 | contig_count = index - miss - 1; | |
045a2529 | 612 | /* |
a6eccd5b JK |
613 | * Standalone, small random read. Read as is, and do not pollute the |
614 | * readahead state. | |
045a2529 | 615 | */ |
a6eccd5b JK |
616 | if (contig_count <= req_count) { |
617 | do_page_cache_ra(ractl, req_count, 0); | |
618 | return; | |
619 | } | |
620 | /* | |
621 | * File cached from the beginning: | |
622 | * it is a strong indication of long-run stream (or whole-file-read) | |
623 | */ | |
624 | if (miss == ULONG_MAX) | |
625 | contig_count *= 2; | |
626 | ra->start = index; | |
627 | ra->size = min(contig_count + req_count, max_pages); | |
628 | ra->async_size = 1; | |
f9acc8c7 | 629 | readit: |
6e4af69a | 630 | ractl->_index = ra->start; |
3a7a11a5 | 631 | page_cache_ra_order(ractl, ra, 0); |
cf914a7d | 632 | } |
fefa7c47 | 633 | EXPORT_SYMBOL_GPL(page_cache_sync_ra); |
cf914a7d | 634 | |
fefa7c47 | 635 | void page_cache_async_ra(struct readahead_control *ractl, |
7836d999 | 636 | struct folio *folio, unsigned long req_count) |
cf914a7d | 637 | { |
3a7a11a5 JK |
638 | unsigned long max_pages; |
639 | struct file_ra_state *ra = ractl->ra; | |
640 | pgoff_t index = readahead_index(ractl); | |
641 | pgoff_t expected, start; | |
642 | unsigned int order = folio_order(folio); | |
643 | ||
1e470280 | 644 | /* no readahead */ |
3a7a11a5 | 645 | if (!ra->ra_pages) |
cf914a7d RR |
646 | return; |
647 | ||
fac84846 JB |
648 | /* See the comment in page_cache_sync_ra. */ |
649 | if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode))) | |
650 | return; | |
651 | ||
cf914a7d RR |
652 | /* |
653 | * Same bit is used for PG_readahead and PG_reclaim. | |
654 | */ | |
7836d999 | 655 | if (folio_test_writeback(folio)) |
cf914a7d RR |
656 | return; |
657 | ||
7836d999 | 658 | folio_clear_readahead(folio); |
cf914a7d | 659 | |
ca47e8c7 JB |
660 | if (blk_cgroup_congested()) |
661 | return; | |
662 | ||
3a7a11a5 JK |
663 | max_pages = ractl_max_pages(ractl, req_count); |
664 | /* | |
665 | * It's the expected callback index, assume sequential access. | |
666 | * Ramp up sizes, and push forward the readahead window. | |
667 | */ | |
668 | expected = round_down(ra->start + ra->size - ra->async_size, | |
669 | 1UL << order); | |
670 | if (index == expected) { | |
671 | ra->start += ra->size; | |
158cdce8 YS |
672 | /* |
673 | * In the case of MADV_HUGEPAGE, the actual size might exceed | |
674 | * the readahead window. | |
675 | */ | |
676 | ra->size = max(ra->size, get_next_ra_size(ra, max_pages)); | |
3a7a11a5 JK |
677 | ra->async_size = ra->size; |
678 | goto readit; | |
679 | } | |
680 | ||
681 | /* | |
682 | * Hit a marked folio without valid readahead state. | |
683 | * E.g. interleaved reads. | |
684 | * Query the pagecache for async_size, which normally equals to | |
685 | * readahead size. Ramp it up and use it as the new readahead size. | |
686 | */ | |
687 | rcu_read_lock(); | |
688 | start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); | |
689 | rcu_read_unlock(); | |
690 | ||
691 | if (!start || start - index > max_pages) | |
692 | return; | |
693 | ||
694 | ra->start = start; | |
695 | ra->size = start - index; /* old async_size */ | |
696 | ra->size += req_count; | |
697 | ra->size = get_next_ra_size(ra, max_pages); | |
698 | ra->async_size = ra->size; | |
699 | readit: | |
700 | ractl->_index = ra->start; | |
701 | page_cache_ra_order(ractl, ra, order); | |
122a21d1 | 702 | } |
fefa7c47 | 703 | EXPORT_SYMBOL_GPL(page_cache_async_ra); |
782182e5 | 704 | |
c7b95d51 | 705 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count) |
782182e5 | 706 | { |
6348be02 | 707 | CLASS(fd, f)(fd); |
782182e5 | 708 | |
6348be02 AV |
709 | if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ)) |
710 | return -EBADF; | |
3d8f7615 AG |
711 | |
712 | /* | |
713 | * The readahead() syscall is intended to run only on files | |
714 | * that can execute readahead. If readahead is not possible | |
715 | * on this file, then we must return -EINVAL. | |
716 | */ | |
1da91ea8 AV |
717 | if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops || |
718 | (!S_ISREG(file_inode(fd_file(f))->i_mode) && | |
719 | !S_ISBLK(file_inode(fd_file(f))->i_mode))) | |
6348be02 | 720 | return -EINVAL; |
3d8f7615 | 721 | |
6348be02 | 722 | return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); |
782182e5 | 723 | } |
c7b95d51 DB |
724 | |
725 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) | |
726 | { | |
727 | return ksys_readahead(fd, offset, count); | |
728 | } | |
3ca23644 | 729 | |
59c10c52 GR |
730 | #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD) |
731 | COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count) | |
732 | { | |
733 | return ksys_readahead(fd, compat_arg_u64_glue(offset), count); | |
734 | } | |
735 | #endif | |
736 | ||
3ca23644 DH |
737 | /** |
738 | * readahead_expand - Expand a readahead request | |
739 | * @ractl: The request to be expanded | |
740 | * @new_start: The revised start | |
741 | * @new_len: The revised size of the request | |
742 | * | |
743 | * Attempt to expand a readahead request outwards from the current size to the | |
744 | * specified size by inserting locked pages before and after the current window | |
745 | * to increase the size to the new window. This may involve the insertion of | |
746 | * THPs, in which case the window may get expanded even beyond what was | |
747 | * requested. | |
748 | * | |
749 | * The algorithm will stop if it encounters a conflicting page already in the | |
750 | * pagecache and leave a smaller expansion than requested. | |
751 | * | |
752 | * The caller must check for this by examining the revised @ractl object for a | |
753 | * different expansion than was requested. | |
754 | */ | |
755 | void readahead_expand(struct readahead_control *ractl, | |
756 | loff_t new_start, size_t new_len) | |
757 | { | |
758 | struct address_space *mapping = ractl->mapping; | |
759 | struct file_ra_state *ra = ractl->ra; | |
760 | pgoff_t new_index, new_nr_pages; | |
761 | gfp_t gfp_mask = readahead_gfp_mask(mapping); | |
26cfdb39 PR |
762 | unsigned long min_nrpages = mapping_min_folio_nrpages(mapping); |
763 | unsigned int min_order = mapping_min_folio_order(mapping); | |
3ca23644 DH |
764 | |
765 | new_index = new_start / PAGE_SIZE; | |
26cfdb39 PR |
766 | /* |
767 | * Readahead code should have aligned the ractl->_index to | |
768 | * min_nrpages before calling readahead aops. | |
769 | */ | |
770 | VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages)); | |
3ca23644 DH |
771 | |
772 | /* Expand the leading edge downwards */ | |
773 | while (ractl->_index > new_index) { | |
774 | unsigned long index = ractl->_index - 1; | |
11a98042 | 775 | struct folio *folio = xa_load(&mapping->i_pages, index); |
3ca23644 | 776 | |
11a98042 MWO |
777 | if (folio && !xa_is_value(folio)) |
778 | return; /* Folio apparently present */ | |
3ca23644 | 779 | |
1963de79 | 780 | folio = ractl_alloc_folio(ractl, gfp_mask, min_order); |
11a98042 | 781 | if (!folio) |
3ca23644 | 782 | return; |
26cfdb39 PR |
783 | |
784 | index = mapping_align_index(mapping, index); | |
11a98042 MWO |
785 | if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { |
786 | folio_put(folio); | |
3ca23644 DH |
787 | return; |
788 | } | |
11a98042 MWO |
789 | if (unlikely(folio_test_workingset(folio)) && |
790 | !ractl->_workingset) { | |
791 | ractl->_workingset = true; | |
792 | psi_memstall_enter(&ractl->_pflags); | |
793 | } | |
26cfdb39 | 794 | ractl->_nr_pages += min_nrpages; |
11a98042 | 795 | ractl->_index = folio->index; |
3ca23644 DH |
796 | } |
797 | ||
798 | new_len += new_start - readahead_pos(ractl); | |
799 | new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE); | |
800 | ||
801 | /* Expand the trailing edge upwards */ | |
802 | while (ractl->_nr_pages < new_nr_pages) { | |
803 | unsigned long index = ractl->_index + ractl->_nr_pages; | |
11a98042 | 804 | struct folio *folio = xa_load(&mapping->i_pages, index); |
3ca23644 | 805 | |
11a98042 MWO |
806 | if (folio && !xa_is_value(folio)) |
807 | return; /* Folio apparently present */ | |
3ca23644 | 808 | |
1963de79 | 809 | folio = ractl_alloc_folio(ractl, gfp_mask, min_order); |
11a98042 | 810 | if (!folio) |
3ca23644 | 811 | return; |
26cfdb39 PR |
812 | |
813 | index = mapping_align_index(mapping, index); | |
11a98042 MWO |
814 | if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { |
815 | folio_put(folio); | |
3ca23644 DH |
816 | return; |
817 | } | |
11a98042 MWO |
818 | if (unlikely(folio_test_workingset(folio)) && |
819 | !ractl->_workingset) { | |
17604240 CH |
820 | ractl->_workingset = true; |
821 | psi_memstall_enter(&ractl->_pflags); | |
822 | } | |
26cfdb39 | 823 | ractl->_nr_pages += min_nrpages; |
3ca23644 | 824 | if (ra) { |
26cfdb39 PR |
825 | ra->size += min_nrpages; |
826 | ra->async_size += min_nrpages; | |
3ca23644 DH |
827 | } |
828 | } | |
829 | } | |
830 | EXPORT_SYMBOL(readahead_expand); |