aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c51
1 files changed, 31 insertions, 20 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 4fd9e3f0f48a..5eb0a6b9d607 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping,
891 unsigned long nr, ret; 891 unsigned long nr, ret;
892 892
893 cond_resched(); 893 cond_resched();
894 if (index == next_index)
895 next_index = page_cache_readahead(mapping, &ra, filp,
896 index, last_index - index);
897
898find_page: 894find_page:
899 page = find_get_page(mapping, index); 895 page = find_get_page(mapping, index);
900 if (unlikely(page == NULL)) { 896 if (!page) {
901 handle_ra_miss(mapping, &ra, index); 897 page_cache_readahead_ondemand(mapping,
902 goto no_cached_page; 898 &ra, filp, page,
899 index, last_index - index);
900 page = find_get_page(mapping, index);
901 if (unlikely(page == NULL))
902 goto no_cached_page;
903 }
904 if (PageReadahead(page)) {
905 page_cache_readahead_ondemand(mapping,
906 &ra, filp, page,
907 index, last_index - index);
903 } 908 }
904 if (!PageUptodate(page)) 909 if (!PageUptodate(page))
905 goto page_not_up_to_date; 910 goto page_not_up_to_date;
@@ -1051,6 +1056,7 @@ no_cached_page:
1051 1056
1052out: 1057out:
1053 *_ra = ra; 1058 *_ra = ra;
1059 _ra->prev_index = prev_index;
1054 1060
1055 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1061 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1056 if (cached_page) 1062 if (cached_page)
@@ -1333,26 +1339,30 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1333 goto no_cached_page; 1339 goto no_cached_page;
1334 1340
1335 /* 1341 /*
1336 * The readahead code wants to be told about each and every page
1337 * so it can build and shrink its windows appropriately
1338 *
1339 * For sequential accesses, we use the generic readahead logic.
1340 */
1341 if (VM_SequentialReadHint(vma))
1342 page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);
1343
1344 /*
1345 * Do we have something in the page cache already? 1342 * Do we have something in the page cache already?
1346 */ 1343 */
1347retry_find: 1344retry_find:
1348 page = find_lock_page(mapping, vmf->pgoff); 1345 page = find_lock_page(mapping, vmf->pgoff);
1346 /*
1347 * For sequential accesses, we use the generic readahead logic.
1348 */
1349 if (VM_SequentialReadHint(vma)) {
1350 if (!page) {
1351 page_cache_readahead_ondemand(mapping, ra, file, page,
1352 vmf->pgoff, 1);
1353 page = find_lock_page(mapping, vmf->pgoff);
1354 if (!page)
1355 goto no_cached_page;
1356 }
1357 if (PageReadahead(page)) {
1358 page_cache_readahead_ondemand(mapping, ra, file, page,
1359 vmf->pgoff, 1);
1360 }
1361 }
1362
1349 if (!page) { 1363 if (!page) {
1350 unsigned long ra_pages; 1364 unsigned long ra_pages;
1351 1365
1352 if (VM_SequentialReadHint(vma)) {
1353 handle_ra_miss(mapping, ra, vmf->pgoff);
1354 goto no_cached_page;
1355 }
1356 ra->mmap_miss++; 1366 ra->mmap_miss++;
1357 1367
1358 /* 1368 /*
@@ -1405,6 +1415,7 @@ retry_find:
1405 * Found the page and have a reference on it. 1415 * Found the page and have a reference on it.
1406 */ 1416 */
1407 mark_page_accessed(page); 1417 mark_page_accessed(page);
1418 ra->prev_index = page->index;
1408 vmf->page = page; 1419 vmf->page = page;
1409 return ret | VM_FAULT_LOCKED; 1420 return ret | VM_FAULT_LOCKED;
1410 1421