aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/splice.c2
-rw-r--r--include/linux/fs.h3
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/readahead.c15
6 files changed, 19 insertions, 18 deletions
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index c00723a99f44..c2c3491b18cf 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp,
143 sb->s_bdev->bd_inode->i_mapping, 143 sb->s_bdev->bd_inode->i_mapping,
144 &filp->f_ra, filp, 144 &filp->f_ra, filp,
145 index, 1); 145 index, 1);
146 filp->f_ra.prev_index = index; 146 filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
147 bh = ext3_bread(NULL, inode, blk, 0, &err); 147 bh = ext3_bread(NULL, inode, blk, 0, &err);
148 } 148 }
149 149
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 3ab01c04e00c..e11890acfa21 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp,
142 sb->s_bdev->bd_inode->i_mapping, 142 sb->s_bdev->bd_inode->i_mapping,
143 &filp->f_ra, filp, 143 &filp->f_ra, filp,
144 index, 1); 144 index, 1);
145 filp->f_ra.prev_index = index; 145 filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
146 bh = ext4_bread(NULL, inode, blk, 0, &err); 146 bh = ext4_bread(NULL, inode, blk, 0, &err);
147 } 147 }
148 148
diff --git a/fs/splice.c b/fs/splice.c
index e95a36228863..2df6be43c667 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -447,7 +447,7 @@ fill_it:
447 */ 447 */
448 while (page_nr < nr_pages) 448 while (page_nr < nr_pages)
449 page_cache_release(pages[page_nr++]); 449 page_cache_release(pages[page_nr++]);
450 in->f_ra.prev_index = index; 450 in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
451 451
452 if (spd.nr_pages) 452 if (spd.nr_pages)
453 return splice_to_pipe(pipe, &spd); 453 return splice_to_pipe(pipe, &spd);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8250811081ff..500ffc0e4ac7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -704,8 +704,7 @@ struct file_ra_state {
704 704
705 unsigned int ra_pages; /* Maximum readahead window */ 705 unsigned int ra_pages; /* Maximum readahead window */
706 int mmap_miss; /* Cache miss stat for mmap accesses */ 706 int mmap_miss; /* Cache miss stat for mmap accesses */
707 unsigned long prev_index; /* Cache last read() position */ 707 loff_t prev_pos; /* Cache last read() position */
708 unsigned int prev_offset; /* Offset where last read() ended in a page */
709}; 708};
710 709
711/* 710/*
diff --git a/mm/filemap.c b/mm/filemap.c
index 5dc18d76e703..bbcca456d8a6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
879 cached_page = NULL; 879 cached_page = NULL;
880 index = *ppos >> PAGE_CACHE_SHIFT; 880 index = *ppos >> PAGE_CACHE_SHIFT;
881 next_index = index; 881 next_index = index;
882 prev_index = ra.prev_index; 882 prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
883 prev_offset = ra.prev_offset; 883 prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
884 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 884 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
885 offset = *ppos & ~PAGE_CACHE_MASK; 885 offset = *ppos & ~PAGE_CACHE_MASK;
886 886
@@ -966,7 +966,6 @@ page_ok:
966 index += offset >> PAGE_CACHE_SHIFT; 966 index += offset >> PAGE_CACHE_SHIFT;
967 offset &= ~PAGE_CACHE_MASK; 967 offset &= ~PAGE_CACHE_MASK;
968 prev_offset = offset; 968 prev_offset = offset;
969 ra.prev_offset = offset;
970 969
971 page_cache_release(page); 970 page_cache_release(page);
972 if (ret == nr && desc->count) 971 if (ret == nr && desc->count)
@@ -1056,9 +1055,11 @@ no_cached_page:
1056 1055
1057out: 1056out:
1058 *_ra = ra; 1057 *_ra = ra;
1059 _ra->prev_index = prev_index; 1058 _ra->prev_pos = prev_index;
1059 _ra->prev_pos <<= PAGE_CACHE_SHIFT;
1060 _ra->prev_pos |= prev_offset;
1060 1061
1061 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1062 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1062 if (cached_page) 1063 if (cached_page)
1063 page_cache_release(cached_page); 1064 page_cache_release(cached_page);
1064 if (filp) 1065 if (filp)
@@ -1396,7 +1397,7 @@ retry_find:
1396 * Found the page and have a reference on it. 1397 * Found the page and have a reference on it.
1397 */ 1398 */
1398 mark_page_accessed(page); 1399 mark_page_accessed(page);
1399 ra->prev_index = page->index; 1400 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
1400 vmf->page = page; 1401 vmf->page = page;
1401 return ret | VM_FAULT_LOCKED; 1402 return ret | VM_FAULT_LOCKED;
1402 1403
diff --git a/mm/readahead.c b/mm/readahead.c
index d2504877b269..4a58befbde4a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -46,7 +46,7 @@ void
46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
47{ 47{
48 ra->ra_pages = mapping->backing_dev_info->ra_pages; 48 ra->ra_pages = mapping->backing_dev_info->ra_pages;
49 ra->prev_index = -1; 49 ra->prev_pos = -1;
50} 50}
51EXPORT_SYMBOL_GPL(file_ra_state_init); 51EXPORT_SYMBOL_GPL(file_ra_state_init);
52 52
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
327 * indicator. The flag won't be set on already cached pages, to avoid the 327 * indicator. The flag won't be set on already cached pages, to avoid the
328 * readahead-for-nothing fuss, saving pointless page cache lookups. 328 * readahead-for-nothing fuss, saving pointless page cache lookups.
329 * 329 *
330 * prev_index tracks the last visited page in the _previous_ read request. 330 * prev_pos tracks the last visited byte in the _previous_ read request.
331 * It should be maintained by the caller, and will be used for detecting 331 * It should be maintained by the caller, and will be used for detecting
332 * small random reads. Note that the readahead algorithm checks loosely 332 * small random reads. Note that the readahead algorithm checks loosely
333 * for sequential patterns. Hence interleaved reads might be served as 333 * for sequential patterns. Hence interleaved reads might be served as
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
351 bool hit_readahead_marker, pgoff_t offset, 351 bool hit_readahead_marker, pgoff_t offset,
352 unsigned long req_size) 352 unsigned long req_size)
353{ 353{
354 int max; /* max readahead pages */ 354 int max = ra->ra_pages; /* max readahead pages */
355 int sequential; 355 pgoff_t prev_offset;
356 356 int sequential;
357 max = ra->ra_pages;
358 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
359 357
360 /* 358 /*
361 * It's the expected callback offset, assume sequential access. 359 * It's the expected callback offset, assume sequential access.
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
369 goto readit; 367 goto readit;
370 } 368 }
371 369
370 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
371 sequential = offset - prev_offset <= 1UL || req_size > max;
372
372 /* 373 /*
373 * Standalone, small read. 374 * Standalone, small read.
374 * Read as is, and do not pollute the readahead state. 375 * Read as is, and do not pollute the readahead state.