Merge branch 'master'
[deliverable/linux.git] / mm / readahead.c
1 /*
2 * mm/readahead.c - address_space-level file readahead.
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 09Apr2002 akpm@zip.com.au
7 * Initial version.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
17
18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
19 {
20 }
21 EXPORT_SYMBOL(default_unplug_io_fn);
22
23 struct backing_dev_info default_backing_dev_info = {
24 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
25 .state = 0,
26 .capabilities = BDI_CAP_MAP_COPY,
27 .unplug_io_fn = default_unplug_io_fn,
28 };
29 EXPORT_SYMBOL_GPL(default_backing_dev_info);
30
31 /*
32 * Initialise a struct file's readahead state. Assumes that the caller has
33 * memset *ra to zero.
34 */
35 void
36 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37 {
38 ra->ra_pages = mapping->backing_dev_info->ra_pages;
39 ra->prev_page = -1;
40 }
41 EXPORT_SYMBOL_GPL(file_ra_state_init);
42
43 /*
44 * Return max readahead size for this inode in number-of-pages.
45 */
46 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
47 {
48 return ra->ra_pages;
49 }
50
51 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
52 {
53 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
54 }
55
56 static inline void reset_ahead_window(struct file_ra_state *ra)
57 {
58 /*
59 * ... but preserve ahead_start + ahead_size value,
60 * see 'recheck:' label in page_cache_readahead().
61 * Note: We never use ->ahead_size as rvalue without
62 * checking ->ahead_start != 0 first.
63 */
64 ra->ahead_size += ra->ahead_start;
65 ra->ahead_start = 0;
66 }
67
68 static inline void ra_off(struct file_ra_state *ra)
69 {
70 ra->start = 0;
71 ra->flags = 0;
72 ra->size = 0;
73 reset_ahead_window(ra);
74 return;
75 }
76
77 /*
78 * Set the initial window size, round to next power of 2 and square
79 * for small size, x 4 for medium, and x 2 for large
80 * for 128k (32 page) max ra
81 * 1-8 page = 32k initial, > 8 page = 128k initial
82 */
83 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
84 {
85 unsigned long newsize = roundup_pow_of_two(size);
86
87 if (newsize <= max / 32)
88 newsize = newsize * 4;
89 else if (newsize <= max / 4)
90 newsize = newsize * 2;
91 else
92 newsize = max;
93 return newsize;
94 }
95
96 /*
97 * Set the new window size, this is called only when I/O is to be submitted,
98 * not for each call to readahead. If a cache miss occured, reduce next I/O
99 * size, else increase depending on how close to max we are.
100 */
101 static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
102 {
103 unsigned long max = get_max_readahead(ra);
104 unsigned long min = get_min_readahead(ra);
105 unsigned long cur = ra->size;
106 unsigned long newsize;
107
108 if (ra->flags & RA_FLAG_MISS) {
109 ra->flags &= ~RA_FLAG_MISS;
110 newsize = max((cur - 2), min);
111 } else if (cur < max / 16) {
112 newsize = 4 * cur;
113 } else {
114 newsize = 2 * cur;
115 }
116 return min(newsize, max);
117 }
118
119 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
120
121 /**
122 * read_cache_pages - populate an address space with some pages, and
123 * start reads against them.
124 * @mapping: the address_space
125 * @pages: The address of a list_head which contains the target pages. These
126 * pages have their ->index populated and are otherwise uninitialised.
127 * @filler: callback routine for filling a single page.
128 * @data: private data for the callback routine.
129 *
130 * Hides the details of the LRU cache etc from the filesystems.
131 */
132 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
133 int (*filler)(void *, struct page *), void *data)
134 {
135 struct page *page;
136 struct pagevec lru_pvec;
137 int ret = 0;
138
139 pagevec_init(&lru_pvec, 0);
140
141 while (!list_empty(pages)) {
142 page = list_to_page(pages);
143 list_del(&page->lru);
144 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
145 page_cache_release(page);
146 continue;
147 }
148 ret = filler(data, page);
149 if (!pagevec_add(&lru_pvec, page))
150 __pagevec_lru_add(&lru_pvec);
151 if (ret) {
152 while (!list_empty(pages)) {
153 struct page *victim;
154
155 victim = list_to_page(pages);
156 list_del(&victim->lru);
157 page_cache_release(victim);
158 }
159 break;
160 }
161 }
162 pagevec_lru_add(&lru_pvec);
163 return ret;
164 }
165
166 EXPORT_SYMBOL(read_cache_pages);
167
168 static int read_pages(struct address_space *mapping, struct file *filp,
169 struct list_head *pages, unsigned nr_pages)
170 {
171 unsigned page_idx;
172 struct pagevec lru_pvec;
173 int ret;
174
175 if (mapping->a_ops->readpages) {
176 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
177 goto out;
178 }
179
180 pagevec_init(&lru_pvec, 0);
181 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
182 struct page *page = list_to_page(pages);
183 list_del(&page->lru);
184 if (!add_to_page_cache(page, mapping,
185 page->index, GFP_KERNEL)) {
186 ret = mapping->a_ops->readpage(filp, page);
187 if (ret != AOP_TRUNCATED_PAGE) {
188 if (!pagevec_add(&lru_pvec, page))
189 __pagevec_lru_add(&lru_pvec);
190 continue;
191 } /* else fall through to release */
192 }
193 page_cache_release(page);
194 }
195 pagevec_lru_add(&lru_pvec);
196 ret = 0;
197 out:
198 return ret;
199 }
200
201 /*
202 * Readahead design.
203 *
204 * The fields in struct file_ra_state represent the most-recently-executed
205 * readahead attempt:
206 *
207 * start: Page index at which we started the readahead
208 * size: Number of pages in that read
209 * Together, these form the "current window".
210 * Together, start and size represent the `readahead window'.
211 * prev_page: The page which the readahead algorithm most-recently inspected.
212 * It is mainly used to detect sequential file reading.
213 * If page_cache_readahead sees that it is again being called for
214 * a page which it just looked at, it can return immediately without
215 * making any state changes.
216 * ahead_start,
217 * ahead_size: Together, these form the "ahead window".
218 * ra_pages: The externally controlled max readahead for this fd.
219 *
220 * When readahead is in the off state (size == 0), readahead is disabled.
221 * In this state, prev_page is used to detect the resumption of sequential I/O.
222 *
223 * The readahead code manages two windows - the "current" and the "ahead"
224 * windows. The intent is that while the application is walking the pages
225 * in the current window, I/O is underway on the ahead window. When the
226 * current window is fully traversed, it is replaced by the ahead window
227 * and the ahead window is invalidated. When this copying happens, the
228 * new current window's pages are probably still locked. So
229 * we submit a new batch of I/O immediately, creating a new ahead window.
230 *
231 * So:
232 *
233 * ----|----------------|----------------|-----
234 * ^start ^start+size
235 * ^ahead_start ^ahead_start+ahead_size
236 *
237 * ^ When this page is read, we submit I/O for the
238 * ahead window.
239 *
240 * A `readahead hit' occurs when a read request is made against a page which is
241 * the next sequential page. Ahead window calculations are done only when it
242 * is time to submit a new IO. The code ramps up the size agressively at first,
243 * but slow down as it approaches max_readhead.
244 *
245 * Any seek/ramdom IO will result in readahead being turned off. It will resume
246 * at the first sequential access.
247 *
248 * There is a special-case: if the first page which the application tries to
249 * read happens to be the first page of the file, it is assumed that a linear
250 * read is about to happen and the window is immediately set to the initial size
251 * based on I/O request size and the max_readahead.
252 *
253 * This function is to be called for every read request, rather than when
254 * it is time to perform readahead. It is called only once for the entire I/O
255 * regardless of size unless readahead is unable to start enough I/O to satisfy
256 * the request (I/O request > max_readahead).
257 */
258
259 /*
260 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
261 * the pages first, then submits them all for I/O. This avoids the very bad
262 * behaviour which would occur if page allocations are causing VM writeback.
263 * We really don't want to intermingle reads and writes like that.
264 *
265 * Returns the number of pages requested, or the maximum amount of I/O allowed.
266 *
267 * do_page_cache_readahead() returns -1 if it encountered request queue
268 * congestion.
269 */
270 static int
271 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
272 pgoff_t offset, unsigned long nr_to_read)
273 {
274 struct inode *inode = mapping->host;
275 struct page *page;
276 unsigned long end_index; /* The last page we want to read */
277 LIST_HEAD(page_pool);
278 int page_idx;
279 int ret = 0;
280 loff_t isize = i_size_read(inode);
281
282 if (isize == 0)
283 goto out;
284
285 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
286
287 /*
288 * Preallocate as many pages as we will need.
289 */
290 read_lock_irq(&mapping->tree_lock);
291 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
292 pgoff_t page_offset = offset + page_idx;
293
294 if (page_offset > end_index)
295 break;
296
297 page = radix_tree_lookup(&mapping->page_tree, page_offset);
298 if (page)
299 continue;
300
301 read_unlock_irq(&mapping->tree_lock);
302 page = page_cache_alloc_cold(mapping);
303 read_lock_irq(&mapping->tree_lock);
304 if (!page)
305 break;
306 page->index = page_offset;
307 list_add(&page->lru, &page_pool);
308 ret++;
309 }
310 read_unlock_irq(&mapping->tree_lock);
311
312 /*
313 * Now start the IO. We ignore I/O errors - if the page is not
314 * uptodate then the caller will launch readpage again, and
315 * will then handle the error.
316 */
317 if (ret)
318 read_pages(mapping, filp, &page_pool, ret);
319 BUG_ON(!list_empty(&page_pool));
320 out:
321 return ret;
322 }
323
324 /*
325 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
326 * memory at once.
327 */
328 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
329 pgoff_t offset, unsigned long nr_to_read)
330 {
331 int ret = 0;
332
333 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
334 return -EINVAL;
335
336 while (nr_to_read) {
337 int err;
338
339 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
340
341 if (this_chunk > nr_to_read)
342 this_chunk = nr_to_read;
343 err = __do_page_cache_readahead(mapping, filp,
344 offset, this_chunk);
345 if (err < 0) {
346 ret = err;
347 break;
348 }
349 ret += err;
350 offset += this_chunk;
351 nr_to_read -= this_chunk;
352 }
353 return ret;
354 }
355
356 /*
357 * Check how effective readahead is being. If the amount of started IO is
358 * less than expected then the file is partly or fully in pagecache and
359 * readahead isn't helping.
360 *
361 */
362 static inline int check_ra_success(struct file_ra_state *ra,
363 unsigned long nr_to_read, unsigned long actual)
364 {
365 if (actual == 0) {
366 ra->cache_hit += nr_to_read;
367 if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
368 ra_off(ra);
369 ra->flags |= RA_FLAG_INCACHE;
370 return 0;
371 }
372 } else {
373 ra->cache_hit=0;
374 }
375 return 1;
376 }
377
378 /*
379 * This version skips the IO if the queue is read-congested, and will tell the
380 * block layer to abandon the readahead if request allocation would block.
381 *
382 * force_page_cache_readahead() will ignore queue congestion and will block on
383 * request queues.
384 */
385 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
386 pgoff_t offset, unsigned long nr_to_read)
387 {
388 if (bdi_read_congested(mapping->backing_dev_info))
389 return -1;
390
391 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
392 }
393
394 /*
395 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
396 * is set wait till the read completes. Otherwise attempt to read without
397 * blocking.
398 * Returns 1 meaning 'success' if read is succesfull without switching off
399 * readhaead mode. Otherwise return failure.
400 */
401 static int
402 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
403 pgoff_t offset, unsigned long nr_to_read,
404 struct file_ra_state *ra, int block)
405 {
406 int actual;
407
408 if (!block && bdi_read_congested(mapping->backing_dev_info))
409 return 0;
410
411 actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
412
413 return check_ra_success(ra, nr_to_read, actual);
414 }
415
416 static int make_ahead_window(struct address_space *mapping, struct file *filp,
417 struct file_ra_state *ra, int force)
418 {
419 int block, ret;
420
421 ra->ahead_size = get_next_ra_size(ra);
422 ra->ahead_start = ra->start + ra->size;
423
424 block = force || (ra->prev_page >= ra->ahead_start);
425 ret = blockable_page_cache_readahead(mapping, filp,
426 ra->ahead_start, ra->ahead_size, ra, block);
427
428 if (!ret && !force) {
429 /* A read failure in blocking mode, implies pages are
430 * all cached. So we can safely assume we have taken
431 * care of all the pages requested in this call.
432 * A read failure in non-blocking mode, implies we are
433 * reading more pages than requested in this call. So
434 * we safely assume we have taken care of all the pages
435 * requested in this call.
436 *
437 * Just reset the ahead window in case we failed due to
438 * congestion. The ahead window will any way be closed
439 * in case we failed due to excessive page cache hits.
440 */
441 reset_ahead_window(ra);
442 }
443
444 return ret;
445 }
446
447 /**
448 * page_cache_readahead - generic adaptive readahead
449 * @mapping: address_space which holds the pagecache and I/O vectors
450 * @ra: file_ra_state which holds the readahead state
451 * @filp: passed on to ->readpage() and ->readpages()
452 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
453 * @req_size: hint: total size of the read which the caller is performing in
454 * PAGE_CACHE_SIZE units
455 *
456 * page_cache_readahead() is the main function. If performs the adaptive
457 * readahead window size management and submits the readahead I/O.
458 *
459 * Note that @filp is purely used for passing on to the ->readpage[s]()
460 * handler: it may refer to a different file from @mapping (so we may not use
461 * @filp->f_mapping or @filp->f_dentry->d_inode here).
462 * Also, @ra may not be equal to &@filp->f_ra.
463 *
464 */
465 unsigned long
466 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
467 struct file *filp, pgoff_t offset, unsigned long req_size)
468 {
469 unsigned long max, newsize;
470 int sequential;
471
472 /*
473 * We avoid doing extra work and bogusly perturbing the readahead
474 * window expansion logic.
475 */
476 if (offset == ra->prev_page && --req_size)
477 ++offset;
478
479 /* Note that prev_page == -1 if it is a first read */
480 sequential = (offset == ra->prev_page + 1);
481 ra->prev_page = offset;
482
483 max = get_max_readahead(ra);
484 newsize = min(req_size, max);
485
486 /* No readahead or sub-page sized read or file already in cache */
487 if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
488 goto out;
489
490 ra->prev_page += newsize - 1;
491
492 /*
493 * Special case - first read at start of file. We'll assume it's
494 * a whole-file read and grow the window fast. Or detect first
495 * sequential access
496 */
497 if (sequential && ra->size == 0) {
498 ra->size = get_init_ra_size(newsize, max);
499 ra->start = offset;
500 if (!blockable_page_cache_readahead(mapping, filp, offset,
501 ra->size, ra, 1))
502 goto out;
503
504 /*
505 * If the request size is larger than our max readahead, we
506 * at least want to be sure that we get 2 IOs in flight and
507 * we know that we will definitly need the new I/O.
508 * once we do this, subsequent calls should be able to overlap
509 * IOs,* thus preventing stalls. so issue the ahead window
510 * immediately.
511 */
512 if (req_size >= max)
513 make_ahead_window(mapping, filp, ra, 1);
514
515 goto out;
516 }
517
518 /*
519 * Now handle the random case:
520 * partial page reads and first access were handled above,
521 * so this must be the next page otherwise it is random
522 */
523 if (!sequential) {
524 ra_off(ra);
525 blockable_page_cache_readahead(mapping, filp, offset,
526 newsize, ra, 1);
527 goto out;
528 }
529
530 /*
531 * If we get here we are doing sequential IO and this was not the first
532 * occurence (ie we have an existing window)
533 */
534 if (ra->ahead_start == 0) { /* no ahead window yet */
535 if (!make_ahead_window(mapping, filp, ra, 0))
536 goto recheck;
537 }
538
539 /*
540 * Already have an ahead window, check if we crossed into it.
541 * If so, shift windows and issue a new ahead window.
542 * Only return the #pages that are in the current window, so that
543 * we get called back on the first page of the ahead window which
544 * will allow us to submit more IO.
545 */
546 if (ra->prev_page >= ra->ahead_start) {
547 ra->start = ra->ahead_start;
548 ra->size = ra->ahead_size;
549 make_ahead_window(mapping, filp, ra, 0);
550 recheck:
551 /* prev_page shouldn't overrun the ahead window */
552 ra->prev_page = min(ra->prev_page,
553 ra->ahead_start + ra->ahead_size - 1);
554 }
555
556 out:
557 return ra->prev_page + 1;
558 }
559 EXPORT_SYMBOL_GPL(page_cache_readahead);
560
561 /*
562 * handle_ra_miss() is called when it is known that a page which should have
563 * been present in the pagecache (we just did some readahead there) was in fact
564 * not found. This will happen if it was evicted by the VM (readahead
565 * thrashing)
566 *
567 * Turn on the cache miss flag in the RA struct, this will cause the RA code
568 * to reduce the RA size on the next read.
569 */
570 void handle_ra_miss(struct address_space *mapping,
571 struct file_ra_state *ra, pgoff_t offset)
572 {
573 ra->flags |= RA_FLAG_MISS;
574 ra->flags &= ~RA_FLAG_INCACHE;
575 ra->cache_hit = 0;
576 }
577
578 /*
579 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
580 * sensible upper limit.
581 */
582 unsigned long max_sane_readahead(unsigned long nr)
583 {
584 unsigned long active;
585 unsigned long inactive;
586 unsigned long free;
587
588 __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
589 return min(nr, (inactive + free) / 2);
590 }
This page took 0.09191 seconds and 6 git commands to generate.