aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-10-16 04:24:35 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:52 -0400
commit7ff81078d8b9f3d05a27b7bd3786ffb1ef1b0d1f (patch)
treeac73cf0c8325783a28c4d16c783f6fd96d17be7c
parent6b10c6c9fbfe754e8482efb8c8b84f8e40c0f2eb (diff)
readahead: remove the local copy of ra in do_generic_mapping_read()
The local copy of ra in do_generic_mapping_read() can now go away. It predates readanead(req_size). In a time when the readahead code was called on *every* single page. Hence a local has to be made to reduce the chance of the readahead state being overwritten by a concurrent reader. More details in: Linux: Random File I/O Regressions In 2.6 <http://kerneltrap.org/node/3039> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/filemap.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index bbcca456d8a6..3c97bdc74a85 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -859,7 +859,7 @@ static void shrink_readahead_size_eio(struct file *filp,
859 * It may be NULL. 859 * It may be NULL.
860 */ 860 */
861void do_generic_mapping_read(struct address_space *mapping, 861void do_generic_mapping_read(struct address_space *mapping,
862 struct file_ra_state *_ra, 862 struct file_ra_state *ra,
863 struct file *filp, 863 struct file *filp,
864 loff_t *ppos, 864 loff_t *ppos,
865 read_descriptor_t *desc, 865 read_descriptor_t *desc,
@@ -874,13 +874,12 @@ void do_generic_mapping_read(struct address_space *mapping,
874 unsigned int prev_offset; 874 unsigned int prev_offset;
875 struct page *cached_page; 875 struct page *cached_page;
876 int error; 876 int error;
877 struct file_ra_state ra = *_ra;
878 877
879 cached_page = NULL; 878 cached_page = NULL;
880 index = *ppos >> PAGE_CACHE_SHIFT; 879 index = *ppos >> PAGE_CACHE_SHIFT;
881 next_index = index; 880 next_index = index;
882 prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT; 881 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
883 prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1); 882 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
884 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 883 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
885 offset = *ppos & ~PAGE_CACHE_MASK; 884 offset = *ppos & ~PAGE_CACHE_MASK;
886 885
@@ -895,7 +894,7 @@ find_page:
895 page = find_get_page(mapping, index); 894 page = find_get_page(mapping, index);
896 if (!page) { 895 if (!page) {
897 page_cache_sync_readahead(mapping, 896 page_cache_sync_readahead(mapping,
898 &ra, filp, 897 ra, filp,
899 index, last_index - index); 898 index, last_index - index);
900 page = find_get_page(mapping, index); 899 page = find_get_page(mapping, index);
901 if (unlikely(page == NULL)) 900 if (unlikely(page == NULL))
@@ -903,7 +902,7 @@ find_page:
903 } 902 }
904 if (PageReadahead(page)) { 903 if (PageReadahead(page)) {
905 page_cache_async_readahead(mapping, 904 page_cache_async_readahead(mapping,
906 &ra, filp, page, 905 ra, filp, page,
907 index, last_index - index); 906 index, last_index - index);
908 } 907 }
909 if (!PageUptodate(page)) 908 if (!PageUptodate(page))
@@ -1014,7 +1013,7 @@ readpage:
1014 } 1013 }
1015 unlock_page(page); 1014 unlock_page(page);
1016 error = -EIO; 1015 error = -EIO;
1017 shrink_readahead_size_eio(filp, &ra); 1016 shrink_readahead_size_eio(filp, ra);
1018 goto readpage_error; 1017 goto readpage_error;
1019 } 1018 }
1020 unlock_page(page); 1019 unlock_page(page);
@@ -1054,10 +1053,9 @@ no_cached_page:
1054 } 1053 }
1055 1054
1056out: 1055out:
1057 *_ra = ra; 1056 ra->prev_pos = prev_index;
1058 _ra->prev_pos = prev_index; 1057 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1059 _ra->prev_pos <<= PAGE_CACHE_SHIFT; 1058 ra->prev_pos |= prev_offset;
1060 _ra->prev_pos |= prev_offset;
1061 1059
1062 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1060 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1063 if (cached_page) 1061 if (cached_page)