diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-07-07 13:26:10 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-07-08 15:22:40 -0400 |
commit | 2aac05a91971fbd1bf6cbed78b8731eb7454b9b7 (patch) | |
tree | 1bbbb13b7e4353bc729363582eef95925c523b7b | |
parent | 9c0fc4e28b57c5a6da7b58d60f71476c64d457a6 (diff) |
NFS: Fix readdir cache invalidation
invalidate_inode_pages2_range() takes page offset arguments, not byte
ranges.
Another thought is that individual pages might perhaps get evicted by VM
pressure, in which case we might perhaps want to re-read not only the
evicted page, but all subsequent pages too (in case the server returns
more/less data per page so that the alignment of the next entry
changes). We should therefore remove the condition that we only do this on
page->index==0.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r-- | fs/nfs/dir.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 58d43daec084..982a2064fe4c 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -204,7 +204,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) | |||
204 | * Note: assumes we have exclusive access to this mapping either | 204 | * Note: assumes we have exclusive access to this mapping either |
205 | * through inode->i_mutex or some other mechanism. | 205 | * through inode->i_mutex or some other mechanism. |
206 | */ | 206 | */ |
207 | if (page->index == 0 && invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1) < 0) { | 207 | if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) { |
208 | /* Should never happen */ | 208 | /* Should never happen */ |
209 | nfs_zap_mapping(inode, inode->i_mapping); | 209 | nfs_zap_mapping(inode, inode->i_mapping); |
210 | } | 210 | } |