diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-06-16 18:31:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 22:47:28 -0400 |
commit | 1ebf26a9b338534def47f307c6c8694b6dfc0a79 (patch) | |
tree | 26829a8cc727388c0da8eea6de38d7ba0eda2a47 /include | |
parent | bb1f17b0372de93758653ca3454bc0df18dc2e5c (diff) |
readahead: make mmap_miss an unsigned int
This makes the performance impact of possible mmap_miss wrap around to be
temporary and tolerable: i.e. MMAP_LOTSAMISS=100 extra readarounds.
Otherwise if ever mmap_miss wraps around to negative, it takes INT_MAX
cache misses to bring it back to normal state. During the time mmap
readaround will be _enabled_ for whatever wild random workload. That's
almost permanent performance impact.
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/fs.h | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h index ede84fa7da5d..8146e0264ef9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -879,7 +879,7 @@ struct file_ra_state { | |||
879 | there are only # of pages ahead */ | 879 | there are only # of pages ahead */ |
880 | 880 | ||
881 | unsigned int ra_pages; /* Maximum readahead window */ | 881 | unsigned int ra_pages; /* Maximum readahead window */ |
882 | int mmap_miss; /* Cache miss stat for mmap accesses */ | 882 | unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ |
883 | loff_t prev_pos; /* Cache last read() position */ | 883 | loff_t prev_pos; /* Cache last read() position */ |
884 | }; | 884 | }; |
885 | 885 | ||