aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-07-19 01:48:02 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 10:04:44 -0700
commit3ea89ee86a82e9fbde37018d9b9e92a552e5fd13 (patch)
tree5fef7216ef39d66684754e4aadf368ce9adfd72e /mm
parent122a21d11cbfda6d1e33cbc8ae9e4c4ee2f1886e (diff)
downloadkernel_samsung_smdk4412-3ea89ee86a82e9fbde37018d9b9e92a552e5fd13.zip
kernel_samsung_smdk4412-3ea89ee86a82e9fbde37018d9b9e92a552e5fd13.tar.gz
kernel_samsung_smdk4412-3ea89ee86a82e9fbde37018d9b9e92a552e5fd13.tar.bz2
readahead: convert filemap invocations
Convert filemap reads to use on-demand readahead. The new call scheme is to - call readahead on non-cached page - call readahead on look-ahead page - update prev_index when finished with the read request Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Steven Pratt <slpratt@austin.ibm.com> Cc: Ram Pai <linuxram@us.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c51
1 files changed, 31 insertions, 20 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 4fd9e3f..5eb0a6b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long nr, ret;
cond_resched();
- if (index == next_index)
- next_index = page_cache_readahead(mapping, &ra, filp,
- index, last_index - index);
-
find_page:
page = find_get_page(mapping, index);
- if (unlikely(page == NULL)) {
- handle_ra_miss(mapping, &ra, index);
- goto no_cached_page;
+ if (!page) {
+ page_cache_readahead_ondemand(mapping,
+ &ra, filp, page,
+ index, last_index - index);
+ page = find_get_page(mapping, index);
+ if (unlikely(page == NULL))
+ goto no_cached_page;
+ }
+ if (PageReadahead(page)) {
+ page_cache_readahead_ondemand(mapping,
+ &ra, filp, page,
+ index, last_index - index);
}
if (!PageUptodate(page))
goto page_not_up_to_date;
@@ -1051,6 +1056,7 @@ no_cached_page:
out:
*_ra = ra;
+ _ra->prev_index = prev_index;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)
@@ -1333,26 +1339,30 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto no_cached_page;
/*
- * The readahead code wants to be told about each and every page
- * so it can build and shrink its windows appropriately
- *
- * For sequential accesses, we use the generic readahead logic.
- */
- if (VM_SequentialReadHint(vma))
- page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);
-
- /*
* Do we have something in the page cache already?
*/
retry_find:
page = find_lock_page(mapping, vmf->pgoff);
+ /*
+ * For sequential accesses, we use the generic readahead logic.
+ */
+ if (VM_SequentialReadHint(vma)) {
+ if (!page) {
+ page_cache_readahead_ondemand(mapping, ra, file, page,
+ vmf->pgoff, 1);
+ page = find_lock_page(mapping, vmf->pgoff);
+ if (!page)
+ goto no_cached_page;
+ }
+ if (PageReadahead(page)) {
+ page_cache_readahead_ondemand(mapping, ra, file, page,
+ vmf->pgoff, 1);
+ }
+ }
+
if (!page) {
unsigned long ra_pages;
- if (VM_SequentialReadHint(vma)) {
- handle_ra_miss(mapping, ra, vmf->pgoff);
- goto no_cached_page;
- }
ra->mmap_miss++;
/*
@@ -1405,6 +1415,7 @@ retry_find:
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
+ ra->prev_index = page->index;
vmf->page = page;
return ret | VM_FAULT_LOCKED;