aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-05-24 20:12:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:27 -0400
commit2cbea1d3ab11946885d37a2461072ee4d687cb4e (patch)
treeaab301cb3da1e633bbd7df2acc4b4c2e4f777b35 /mm/filemap.c
parent207d04baa3591a354711e863dd90087fc75873b3 (diff)
readahead: trigger mmap sequential readahead on PG_readahead
Previously the mmap sequential readahead is triggered by updating ra->prev_pos on each page fault and compare it with current page offset. It costs dirtying the cache line on each _minor_ page fault. So remove the ra->prev_pos recording, and instead tag PG_readahead to trigger the possible sequential readahead. It's not only more simple, but also will work more reliably and reduce cache line bouncing on concurrent page faults on shared struct file. In the mosbench exim benchmark which does multi-threaded page faults on shared struct file, the ra->mmap_miss and ra->prev_pos updates are found to cause excessive cache line bouncing on tmpfs, which actually disabled readahead totally (shmem_backing_dev_info.ra_pages == 0). So remove the ra->prev_pos recording, and instead tag PG_readahead to trigger the possible sequential readahead. It's not only more simple, but also will work more reliably on concurrent reads on shared struct file. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Tested-by: Tim Chen <tim.c.chen@intel.com> Reported-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e5131392d32e..68e782b3d3de 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1559,8 +1559,7 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1559 if (!ra->ra_pages) 1559 if (!ra->ra_pages)
1560 return; 1560 return;
1561 1561
1562 if (VM_SequentialReadHint(vma) || 1562 if (VM_SequentialReadHint(vma)) {
1563 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1564 page_cache_sync_readahead(mapping, ra, file, offset, 1563 page_cache_sync_readahead(mapping, ra, file, offset,
1565 ra->ra_pages); 1564 ra->ra_pages);
1566 return; 1565 return;
@@ -1583,7 +1582,7 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1583 ra_pages = max_sane_readahead(ra->ra_pages); 1582 ra_pages = max_sane_readahead(ra->ra_pages);
1584 ra->start = max_t(long, 0, offset - ra_pages / 2); 1583 ra->start = max_t(long, 0, offset - ra_pages / 2);
1585 ra->size = ra_pages; 1584 ra->size = ra_pages;
1586 ra->async_size = 0; 1585 ra->async_size = ra_pages / 4;
1587 ra_submit(ra, mapping, file); 1586 ra_submit(ra, mapping, file);
1588} 1587}
1589 1588
@@ -1689,7 +1688,6 @@ retry_find:
1689 return VM_FAULT_SIGBUS; 1688 return VM_FAULT_SIGBUS;
1690 } 1689 }
1691 1690
1692 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1693 vmf->page = page; 1691 vmf->page = page;
1694 return ret | VM_FAULT_LOCKED; 1692 return ret | VM_FAULT_LOCKED;
1695 1693