aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/dir.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-07-25 11:28:19 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-09-22 23:24:29 -0400
commit979df72e6f963b42ee484f2eca049c3344da0ba7 (patch)
treef3ba48a16203c899dbe57482c1ff7f1c02e0c9ca /fs/nfs/dir.c
parentcfcea3e8c66c2dcde98d5c2693d4bff50b5cac97 (diff)
NFS: Add an ACCESS cache memory shrinker
A pinned inode may in theory end up filling memory with cached ACCESS calls. This patch ensures that the VM may shrink away the cache in these particular cases. The shrinker works by iterating through the list of inodes on the global nfs_access_lru_list, and removing the least recently used access cache entry until it is done (or until the entire cache is empty). Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/dir.c')
-rw-r--r--fs/nfs/dir.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index bf4f5ffda703..067d144d141b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1651,6 +1651,50 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
1651 smp_mb__after_atomic_dec(); 1651 smp_mb__after_atomic_dec();
1652} 1652}
1653 1653
1654int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
1655{
1656 LIST_HEAD(head);
1657 struct nfs_inode *nfsi;
1658 struct nfs_access_entry *cache;
1659
1660 spin_lock(&nfs_access_lru_lock);
1661restart:
1662 list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
1663 struct inode *inode;
1664
1665 if (nr_to_scan-- == 0)
1666 break;
1667 inode = igrab(&nfsi->vfs_inode);
1668 if (inode == NULL)
1669 continue;
1670 spin_lock(&inode->i_lock);
1671 if (list_empty(&nfsi->access_cache_entry_lru))
1672 goto remove_lru_entry;
1673 cache = list_entry(nfsi->access_cache_entry_lru.next,
1674 struct nfs_access_entry, lru);
1675 list_move(&cache->lru, &head);
1676 rb_erase(&cache->rb_node, &nfsi->access_cache);
1677 if (!list_empty(&nfsi->access_cache_entry_lru))
1678 list_move_tail(&nfsi->access_cache_inode_lru,
1679 &nfs_access_lru_list);
1680 else {
1681remove_lru_entry:
1682 list_del_init(&nfsi->access_cache_inode_lru);
1683 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
1684 }
1685 spin_unlock(&inode->i_lock);
1686 iput(inode);
1687 goto restart;
1688 }
1689 spin_unlock(&nfs_access_lru_lock);
1690 while (!list_empty(&head)) {
1691 cache = list_entry(head.next, struct nfs_access_entry, lru);
1692 list_del(&cache->lru);
1693 nfs_access_free_entry(cache);
1694 }
1695 return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
1696}
1697
1654static void __nfs_access_zap_cache(struct inode *inode) 1698static void __nfs_access_zap_cache(struct inode *inode)
1655{ 1699{
1656 struct nfs_inode *nfsi = NFS_I(inode); 1700 struct nfs_inode *nfsi = NFS_I(inode);