]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
6 | * 09Apr2002 [email protected] | |
7 | * Initial version. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/backing-dev.h> | |
8bde37f0 | 16 | #include <linux/task_io_accounting_ops.h> |
1da177e4 LT |
17 | #include <linux/pagevec.h> |
18 | ||
19 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | |
20 | { | |
21 | } | |
22 | EXPORT_SYMBOL(default_unplug_io_fn); | |
23 | ||
24 | struct backing_dev_info default_backing_dev_info = { | |
25 | .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, | |
26 | .state = 0, | |
27 | .capabilities = BDI_CAP_MAP_COPY, | |
28 | .unplug_io_fn = default_unplug_io_fn, | |
29 | }; | |
30 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | |
31 | ||
32 | /* | |
33 | * Initialise a struct file's readahead state. Assumes that the caller has | |
34 | * memset *ra to zero. | |
35 | */ | |
36 | void | |
37 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
38 | { | |
39 | ra->ra_pages = mapping->backing_dev_info->ra_pages; | |
6ce745ed | 40 | ra->prev_index = -1; |
1da177e4 | 41 | } |
d41cc702 | 42 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 LT |
43 | |
44 | /* | |
45 | * Return max readahead size for this inode in number-of-pages. | |
46 | */ | |
47 | static inline unsigned long get_max_readahead(struct file_ra_state *ra) | |
48 | { | |
49 | return ra->ra_pages; | |
50 | } | |
51 | ||
52 | static inline unsigned long get_min_readahead(struct file_ra_state *ra) | |
53 | { | |
54 | return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; | |
55 | } | |
56 | ||
a564da39 ON |
57 | static inline void reset_ahead_window(struct file_ra_state *ra) |
58 | { | |
59 | /* | |
60 | * ... but preserve ahead_start + ahead_size value, | |
61 | * see 'recheck:' label in page_cache_readahead(). | |
62 | * Note: We never use ->ahead_size as rvalue without | |
63 | * checking ->ahead_start != 0 first. | |
64 | */ | |
65 | ra->ahead_size += ra->ahead_start; | |
66 | ra->ahead_start = 0; | |
67 | } | |
68 | ||
1da177e4 LT |
69 | static inline void ra_off(struct file_ra_state *ra) |
70 | { | |
71 | ra->start = 0; | |
72 | ra->flags = 0; | |
73 | ra->size = 0; | |
a564da39 | 74 | reset_ahead_window(ra); |
1da177e4 LT |
75 | return; |
76 | } | |
77 | ||
78 | /* | |
79 | * Set the initial window size, round to next power of 2 and square | |
80 | * for small size, x 4 for medium, and x 2 for large | |
81 | * for 128k (32 page) max ra | |
82 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
83 | */ | |
84 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
85 | { | |
86 | unsigned long newsize = roundup_pow_of_two(size); | |
87 | ||
aed75ff3 SP |
88 | if (newsize <= max / 32) |
89 | newsize = newsize * 4; | |
1da177e4 | 90 | else if (newsize <= max / 4) |
aed75ff3 | 91 | newsize = newsize * 2; |
1da177e4 LT |
92 | else |
93 | newsize = max; | |
94 | return newsize; | |
95 | } | |
96 | ||
97 | /* | |
98 | * Set the new window size, this is called only when I/O is to be submitted, | |
99 | * not for each call to readahead. If a cache miss occured, reduce next I/O | |
100 | * size, else increase depending on how close to max we are. | |
101 | */ | |
102 | static inline unsigned long get_next_ra_size(struct file_ra_state *ra) | |
103 | { | |
104 | unsigned long max = get_max_readahead(ra); | |
105 | unsigned long min = get_min_readahead(ra); | |
106 | unsigned long cur = ra->size; | |
107 | unsigned long newsize; | |
108 | ||
109 | if (ra->flags & RA_FLAG_MISS) { | |
110 | ra->flags &= ~RA_FLAG_MISS; | |
111 | newsize = max((cur - 2), min); | |
112 | } else if (cur < max / 16) { | |
113 | newsize = 4 * cur; | |
114 | } else { | |
115 | newsize = 2 * cur; | |
116 | } | |
117 | return min(newsize, max); | |
118 | } | |
119 | ||
120 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) | |
121 | ||
122 | /** | |
bd40cdda | 123 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
124 | * @mapping: the address_space |
125 | * @pages: The address of a list_head which contains the target pages. These | |
126 | * pages have their ->index populated and are otherwise uninitialised. | |
127 | * @filler: callback routine for filling a single page. | |
128 | * @data: private data for the callback routine. | |
129 | * | |
130 | * Hides the details of the LRU cache etc from the filesystems. | |
131 | */ | |
132 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
133 | int (*filler)(void *, struct page *), void *data) | |
134 | { | |
135 | struct page *page; | |
136 | struct pagevec lru_pvec; | |
137 | int ret = 0; | |
138 | ||
139 | pagevec_init(&lru_pvec, 0); | |
140 | ||
141 | while (!list_empty(pages)) { | |
142 | page = list_to_page(pages); | |
143 | list_del(&page->lru); | |
144 | if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { | |
145 | page_cache_release(page); | |
146 | continue; | |
147 | } | |
148 | ret = filler(data, page); | |
149 | if (!pagevec_add(&lru_pvec, page)) | |
150 | __pagevec_lru_add(&lru_pvec); | |
151 | if (ret) { | |
38da288b | 152 | put_pages_list(pages); |
1da177e4 LT |
153 | break; |
154 | } | |
8bde37f0 | 155 | task_io_account_read(PAGE_CACHE_SIZE); |
1da177e4 LT |
156 | } |
157 | pagevec_lru_add(&lru_pvec); | |
158 | return ret; | |
159 | } | |
160 | ||
161 | EXPORT_SYMBOL(read_cache_pages); | |
162 | ||
163 | static int read_pages(struct address_space *mapping, struct file *filp, | |
164 | struct list_head *pages, unsigned nr_pages) | |
165 | { | |
166 | unsigned page_idx; | |
167 | struct pagevec lru_pvec; | |
994fc28c | 168 | int ret; |
1da177e4 LT |
169 | |
170 | if (mapping->a_ops->readpages) { | |
171 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | |
029e332e OH |
172 | /* Clean up the remaining pages */ |
173 | put_pages_list(pages); | |
1da177e4 LT |
174 | goto out; |
175 | } | |
176 | ||
177 | pagevec_init(&lru_pvec, 0); | |
178 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | |
179 | struct page *page = list_to_page(pages); | |
180 | list_del(&page->lru); | |
181 | if (!add_to_page_cache(page, mapping, | |
182 | page->index, GFP_KERNEL)) { | |
9f1a3cfc ZB |
183 | mapping->a_ops->readpage(filp, page); |
184 | if (!pagevec_add(&lru_pvec, page)) | |
185 | __pagevec_lru_add(&lru_pvec); | |
186 | } else | |
187 | page_cache_release(page); | |
1da177e4 LT |
188 | } |
189 | pagevec_lru_add(&lru_pvec); | |
994fc28c | 190 | ret = 0; |
1da177e4 LT |
191 | out: |
192 | return ret; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Readahead design. | |
197 | * | |
198 | * The fields in struct file_ra_state represent the most-recently-executed | |
199 | * readahead attempt: | |
200 | * | |
201 | * start: Page index at which we started the readahead | |
202 | * size: Number of pages in that read | |
203 | * Together, these form the "current window". | |
204 | * Together, start and size represent the `readahead window'. | |
6ce745ed | 205 | * prev_index: The page which the readahead algorithm most-recently inspected. |
1da177e4 LT |
206 | * It is mainly used to detect sequential file reading. |
207 | * If page_cache_readahead sees that it is again being called for | |
208 | * a page which it just looked at, it can return immediately without | |
209 | * making any state changes. | |
6ce745ed | 210 | * offset: Offset in the prev_index where the last read ended - used for |
ec0f1637 | 211 | * detection of sequential file reading. |
1da177e4 LT |
212 | * ahead_start, |
213 | * ahead_size: Together, these form the "ahead window". | |
214 | * ra_pages: The externally controlled max readahead for this fd. | |
215 | * | |
216 | * When readahead is in the off state (size == 0), readahead is disabled. | |
6ce745ed | 217 | * In this state, prev_index is used to detect the resumption of sequential I/O. |
1da177e4 LT |
218 | * |
219 | * The readahead code manages two windows - the "current" and the "ahead" | |
220 | * windows. The intent is that while the application is walking the pages | |
221 | * in the current window, I/O is underway on the ahead window. When the | |
222 | * current window is fully traversed, it is replaced by the ahead window | |
223 | * and the ahead window is invalidated. When this copying happens, the | |
224 | * new current window's pages are probably still locked. So | |
225 | * we submit a new batch of I/O immediately, creating a new ahead window. | |
226 | * | |
227 | * So: | |
228 | * | |
229 | * ----|----------------|----------------|----- | |
230 | * ^start ^start+size | |
231 | * ^ahead_start ^ahead_start+ahead_size | |
232 | * | |
233 | * ^ When this page is read, we submit I/O for the | |
234 | * ahead window. | |
235 | * | |
236 | * A `readahead hit' occurs when a read request is made against a page which is | |
237 | * the next sequential page. Ahead window calculations are done only when it | |
238 | * is time to submit a new IO. The code ramps up the size agressively at first, | |
239 | * but slow down as it approaches max_readhead. | |
240 | * | |
241 | * Any seek/ramdom IO will result in readahead being turned off. It will resume | |
242 | * at the first sequential access. | |
243 | * | |
244 | * There is a special-case: if the first page which the application tries to | |
245 | * read happens to be the first page of the file, it is assumed that a linear | |
246 | * read is about to happen and the window is immediately set to the initial size | |
247 | * based on I/O request size and the max_readahead. | |
248 | * | |
249 | * This function is to be called for every read request, rather than when | |
250 | * it is time to perform readahead. It is called only once for the entire I/O | |
251 | * regardless of size unless readahead is unable to start enough I/O to satisfy | |
252 | * the request (I/O request > max_readahead). | |
253 | */ | |
254 | ||
255 | /* | |
256 | * do_page_cache_readahead actually reads a chunk of disk. It allocates all | |
257 | * the pages first, then submits them all for I/O. This avoids the very bad | |
258 | * behaviour which would occur if page allocations are causing VM writeback. | |
259 | * We really don't want to intermingle reads and writes like that. | |
260 | * | |
261 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | |
262 | * | |
263 | * do_page_cache_readahead() returns -1 if it encountered request queue | |
264 | * congestion. | |
265 | */ | |
266 | static int | |
267 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 268 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
269 | { |
270 | struct inode *inode = mapping->host; | |
271 | struct page *page; | |
272 | unsigned long end_index; /* The last page we want to read */ | |
273 | LIST_HEAD(page_pool); | |
274 | int page_idx; | |
275 | int ret = 0; | |
276 | loff_t isize = i_size_read(inode); | |
277 | ||
278 | if (isize == 0) | |
279 | goto out; | |
280 | ||
281 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | |
282 | ||
283 | /* | |
284 | * Preallocate as many pages as we will need. | |
285 | */ | |
286 | read_lock_irq(&mapping->tree_lock); | |
287 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { | |
7361f4d8 | 288 | pgoff_t page_offset = offset + page_idx; |
1da177e4 LT |
289 | |
290 | if (page_offset > end_index) | |
291 | break; | |
292 | ||
293 | page = radix_tree_lookup(&mapping->page_tree, page_offset); | |
294 | if (page) | |
295 | continue; | |
296 | ||
297 | read_unlock_irq(&mapping->tree_lock); | |
298 | page = page_cache_alloc_cold(mapping); | |
299 | read_lock_irq(&mapping->tree_lock); | |
300 | if (!page) | |
301 | break; | |
302 | page->index = page_offset; | |
303 | list_add(&page->lru, &page_pool); | |
304 | ret++; | |
305 | } | |
306 | read_unlock_irq(&mapping->tree_lock); | |
307 | ||
308 | /* | |
309 | * Now start the IO. We ignore I/O errors - if the page is not | |
310 | * uptodate then the caller will launch readpage again, and | |
311 | * will then handle the error. | |
312 | */ | |
313 | if (ret) | |
314 | read_pages(mapping, filp, &page_pool, ret); | |
315 | BUG_ON(!list_empty(&page_pool)); | |
316 | out: | |
317 | return ret; | |
318 | } | |
319 | ||
320 | /* | |
321 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
322 | * memory at once. | |
323 | */ | |
324 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 325 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
326 | { |
327 | int ret = 0; | |
328 | ||
329 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | |
330 | return -EINVAL; | |
331 | ||
332 | while (nr_to_read) { | |
333 | int err; | |
334 | ||
335 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | |
336 | ||
337 | if (this_chunk > nr_to_read) | |
338 | this_chunk = nr_to_read; | |
339 | err = __do_page_cache_readahead(mapping, filp, | |
340 | offset, this_chunk); | |
341 | if (err < 0) { | |
342 | ret = err; | |
343 | break; | |
344 | } | |
345 | ret += err; | |
346 | offset += this_chunk; | |
347 | nr_to_read -= this_chunk; | |
348 | } | |
349 | return ret; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Check how effective readahead is being. If the amount of started IO is | |
354 | * less than expected then the file is partly or fully in pagecache and | |
355 | * readahead isn't helping. | |
356 | * | |
357 | */ | |
358 | static inline int check_ra_success(struct file_ra_state *ra, | |
359 | unsigned long nr_to_read, unsigned long actual) | |
360 | { | |
361 | if (actual == 0) { | |
362 | ra->cache_hit += nr_to_read; | |
363 | if (ra->cache_hit >= VM_MAX_CACHE_HIT) { | |
364 | ra_off(ra); | |
365 | ra->flags |= RA_FLAG_INCACHE; | |
366 | return 0; | |
367 | } | |
368 | } else { | |
369 | ra->cache_hit=0; | |
370 | } | |
371 | return 1; | |
372 | } | |
373 | ||
374 | /* | |
375 | * This version skips the IO if the queue is read-congested, and will tell the | |
376 | * block layer to abandon the readahead if request allocation would block. | |
377 | * | |
378 | * force_page_cache_readahead() will ignore queue congestion and will block on | |
379 | * request queues. | |
380 | */ | |
381 | int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 382 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
383 | { |
384 | if (bdi_read_congested(mapping->backing_dev_info)) | |
385 | return -1; | |
386 | ||
387 | return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); | |
388 | } | |
389 | ||
390 | /* | |
391 | * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' | |
392 | * is set wait till the read completes. Otherwise attempt to read without | |
393 | * blocking. | |
d6e05edc AM |
394 | * Returns 1 meaning 'success' if read is successful without switching off |
395 | * readahead mode. Otherwise return failure. | |
1da177e4 LT |
396 | */ |
397 | static int | |
398 | blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 399 | pgoff_t offset, unsigned long nr_to_read, |
1da177e4 LT |
400 | struct file_ra_state *ra, int block) |
401 | { | |
402 | int actual; | |
403 | ||
404 | if (!block && bdi_read_congested(mapping->backing_dev_info)) | |
405 | return 0; | |
406 | ||
407 | actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); | |
408 | ||
409 | return check_ra_success(ra, nr_to_read, actual); | |
410 | } | |
411 | ||
412 | static int make_ahead_window(struct address_space *mapping, struct file *filp, | |
413 | struct file_ra_state *ra, int force) | |
414 | { | |
415 | int block, ret; | |
416 | ||
417 | ra->ahead_size = get_next_ra_size(ra); | |
418 | ra->ahead_start = ra->start + ra->size; | |
419 | ||
6ce745ed | 420 | block = force || (ra->prev_index >= ra->ahead_start); |
1da177e4 LT |
421 | ret = blockable_page_cache_readahead(mapping, filp, |
422 | ra->ahead_start, ra->ahead_size, ra, block); | |
423 | ||
424 | if (!ret && !force) { | |
425 | /* A read failure in blocking mode, implies pages are | |
426 | * all cached. So we can safely assume we have taken | |
427 | * care of all the pages requested in this call. | |
428 | * A read failure in non-blocking mode, implies we are | |
429 | * reading more pages than requested in this call. So | |
430 | * we safely assume we have taken care of all the pages | |
431 | * requested in this call. | |
432 | * | |
433 | * Just reset the ahead window in case we failed due to | |
434 | * congestion. The ahead window will any way be closed | |
435 | * in case we failed due to excessive page cache hits. | |
436 | */ | |
a564da39 | 437 | reset_ahead_window(ra); |
1da177e4 LT |
438 | } |
439 | ||
440 | return ret; | |
441 | } | |
442 | ||
7361f4d8 AM |
443 | /** |
444 | * page_cache_readahead - generic adaptive readahead | |
445 | * @mapping: address_space which holds the pagecache and I/O vectors | |
446 | * @ra: file_ra_state which holds the readahead state | |
447 | * @filp: passed on to ->readpage() and ->readpages() | |
448 | * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units | |
449 | * @req_size: hint: total size of the read which the caller is performing in | |
450 | * PAGE_CACHE_SIZE units | |
451 | * | |
452 | * page_cache_readahead() is the main function. If performs the adaptive | |
1da177e4 | 453 | * readahead window size management and submits the readahead I/O. |
7361f4d8 AM |
454 | * |
455 | * Note that @filp is purely used for passing on to the ->readpage[s]() | |
456 | * handler: it may refer to a different file from @mapping (so we may not use | |
e9536ae7 | 457 | * @filp->f_mapping or @filp->f_path.dentry->d_inode here). |
7361f4d8 AM |
458 | * Also, @ra may not be equal to &@filp->f_ra. |
459 | * | |
1da177e4 LT |
460 | */ |
461 | unsigned long | |
462 | page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, | |
7361f4d8 | 463 | struct file *filp, pgoff_t offset, unsigned long req_size) |
1da177e4 LT |
464 | { |
465 | unsigned long max, newsize; | |
466 | int sequential; | |
467 | ||
468 | /* | |
469 | * We avoid doing extra work and bogusly perturbing the readahead | |
470 | * window expansion logic. | |
471 | */ | |
6ce745ed | 472 | if (offset == ra->prev_index && --req_size) |
1da177e4 LT |
473 | ++offset; |
474 | ||
6ce745ed JK |
475 | /* Note that prev_index == -1 if it is a first read */ |
476 | sequential = (offset == ra->prev_index + 1); | |
477 | ra->prev_index = offset; | |
478 | ra->prev_offset = 0; | |
1da177e4 LT |
479 | |
480 | max = get_max_readahead(ra); | |
481 | newsize = min(req_size, max); | |
482 | ||
483 | /* No readahead or sub-page sized read or file already in cache */ | |
484 | if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) | |
485 | goto out; | |
486 | ||
6ce745ed | 487 | ra->prev_index += newsize - 1; |
1da177e4 LT |
488 | |
489 | /* | |
490 | * Special case - first read at start of file. We'll assume it's | |
491 | * a whole-file read and grow the window fast. Or detect first | |
492 | * sequential access | |
493 | */ | |
494 | if (sequential && ra->size == 0) { | |
495 | ra->size = get_init_ra_size(newsize, max); | |
496 | ra->start = offset; | |
497 | if (!blockable_page_cache_readahead(mapping, filp, offset, | |
498 | ra->size, ra, 1)) | |
499 | goto out; | |
500 | ||
501 | /* | |
502 | * If the request size is larger than our max readahead, we | |
503 | * at least want to be sure that we get 2 IOs in flight and | |
504 | * we know that we will definitly need the new I/O. | |
505 | * once we do this, subsequent calls should be able to overlap | |
506 | * IOs,* thus preventing stalls. so issue the ahead window | |
507 | * immediately. | |
508 | */ | |
509 | if (req_size >= max) | |
510 | make_ahead_window(mapping, filp, ra, 1); | |
511 | ||
512 | goto out; | |
513 | } | |
514 | ||
515 | /* | |
516 | * Now handle the random case: | |
517 | * partial page reads and first access were handled above, | |
518 | * so this must be the next page otherwise it is random | |
519 | */ | |
520 | if (!sequential) { | |
521 | ra_off(ra); | |
522 | blockable_page_cache_readahead(mapping, filp, offset, | |
523 | newsize, ra, 1); | |
524 | goto out; | |
525 | } | |
526 | ||
527 | /* | |
528 | * If we get here we are doing sequential IO and this was not the first | |
529 | * occurence (ie we have an existing window) | |
530 | */ | |
1da177e4 LT |
531 | if (ra->ahead_start == 0) { /* no ahead window yet */ |
532 | if (!make_ahead_window(mapping, filp, ra, 0)) | |
a564da39 | 533 | goto recheck; |
1da177e4 | 534 | } |
a564da39 | 535 | |
1da177e4 LT |
536 | /* |
537 | * Already have an ahead window, check if we crossed into it. | |
538 | * If so, shift windows and issue a new ahead window. | |
539 | * Only return the #pages that are in the current window, so that | |
540 | * we get called back on the first page of the ahead window which | |
541 | * will allow us to submit more IO. | |
542 | */ | |
6ce745ed | 543 | if (ra->prev_index >= ra->ahead_start) { |
1da177e4 LT |
544 | ra->start = ra->ahead_start; |
545 | ra->size = ra->ahead_size; | |
546 | make_ahead_window(mapping, filp, ra, 0); | |
a564da39 | 547 | recheck: |
6ce745ed JK |
548 | /* prev_index shouldn't overrun the ahead window */ |
549 | ra->prev_index = min(ra->prev_index, | |
a564da39 | 550 | ra->ahead_start + ra->ahead_size - 1); |
1da177e4 LT |
551 | } |
552 | ||
553 | out: | |
6ce745ed | 554 | return ra->prev_index + 1; |
1da177e4 | 555 | } |
d8733c29 | 556 | EXPORT_SYMBOL_GPL(page_cache_readahead); |
1da177e4 LT |
557 | |
558 | /* | |
559 | * handle_ra_miss() is called when it is known that a page which should have | |
560 | * been present in the pagecache (we just did some readahead there) was in fact | |
561 | * not found. This will happen if it was evicted by the VM (readahead | |
562 | * thrashing) | |
563 | * | |
564 | * Turn on the cache miss flag in the RA struct, this will cause the RA code | |
565 | * to reduce the RA size on the next read. | |
566 | */ | |
567 | void handle_ra_miss(struct address_space *mapping, | |
568 | struct file_ra_state *ra, pgoff_t offset) | |
569 | { | |
570 | ra->flags |= RA_FLAG_MISS; | |
571 | ra->flags &= ~RA_FLAG_INCACHE; | |
3b30bbd9 | 572 | ra->cache_hit = 0; |
1da177e4 LT |
573 | } |
574 | ||
575 | /* | |
576 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | |
577 | * sensible upper limit. | |
578 | */ | |
579 | unsigned long max_sane_readahead(unsigned long nr) | |
580 | { | |
05a0416b CL |
581 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) |
582 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); | |
1da177e4 | 583 | } |