aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/dir.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2014-07-21 13:53:48 -0400
committerTrond Myklebust <trond.myklebust@primarydata.com>2014-08-03 17:03:22 -0400
commit3a505845cd58a7ff3bc75f96572045d8de34e34e (patch)
treea2c09c010384269752ed6f7af37861622ec813cc /fs/nfs/dir.c
parentbae6746ff356478e1a2706072edbfb514072e0ff (diff)
NFS: Enforce an upper limit on the number of cached access call
This may be used to limit the number of cached credentials building up inside the access cache. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'fs/nfs/dir.c')
-rw-r--r--fs/nfs/dir.c42
1 files changed, 35 insertions, 7 deletions
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4a3d4ef76127..7dc88bb4296c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2028,6 +2028,10 @@ static DEFINE_SPINLOCK(nfs_access_lru_lock);
2028static LIST_HEAD(nfs_access_lru_list); 2028static LIST_HEAD(nfs_access_lru_list);
2029static atomic_long_t nfs_access_nr_entries; 2029static atomic_long_t nfs_access_nr_entries;
2030 2030
2031static unsigned long nfs_access_max_cachesize = ULONG_MAX;
2032module_param(nfs_access_max_cachesize, ulong, 0644);
2033MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
2034
2031static void nfs_access_free_entry(struct nfs_access_entry *entry) 2035static void nfs_access_free_entry(struct nfs_access_entry *entry)
2032{ 2036{
2033 put_rpccred(entry->cred); 2037 put_rpccred(entry->cred);
@@ -2048,19 +2052,14 @@ static void nfs_access_free_list(struct list_head *head)
2048 } 2052 }
2049} 2053}
2050 2054
2051unsigned long 2055static unsigned long
2052nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) 2056nfs_do_access_cache_scan(unsigned int nr_to_scan)
2053{ 2057{
2054 LIST_HEAD(head); 2058 LIST_HEAD(head);
2055 struct nfs_inode *nfsi, *next; 2059 struct nfs_inode *nfsi, *next;
2056 struct nfs_access_entry *cache; 2060 struct nfs_access_entry *cache;
2057 int nr_to_scan = sc->nr_to_scan;
2058 gfp_t gfp_mask = sc->gfp_mask;
2059 long freed = 0; 2061 long freed = 0;
2060 2062
2061 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2062 return SHRINK_STOP;
2063
2064 spin_lock(&nfs_access_lru_lock); 2063 spin_lock(&nfs_access_lru_lock);
2065 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { 2064 list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
2066 struct inode *inode; 2065 struct inode *inode;
@@ -2094,11 +2093,39 @@ remove_lru_entry:
2094} 2093}
2095 2094
2096unsigned long 2095unsigned long
2096nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
2097{
2098 int nr_to_scan = sc->nr_to_scan;
2099 gfp_t gfp_mask = sc->gfp_mask;
2100
2101 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2102 return SHRINK_STOP;
2103 return nfs_do_access_cache_scan(nr_to_scan);
2104}
2105
2106
2107unsigned long
2097nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc) 2108nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
2098{ 2109{
2099 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); 2110 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2100} 2111}
2101 2112
2113static void
2114nfs_access_cache_enforce_limit(void)
2115{
2116 long nr_entries = atomic_long_read(&nfs_access_nr_entries);
2117 unsigned long diff;
2118 unsigned int nr_to_scan;
2119
2120 if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize)
2121 return;
2122 nr_to_scan = 100;
2123 diff = nr_entries - nfs_access_max_cachesize;
2124 if (diff < nr_to_scan)
2125 nr_to_scan = diff;
2126 nfs_do_access_cache_scan(nr_to_scan);
2127}
2128
2102static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) 2129static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
2103{ 2130{
2104 struct rb_root *root_node = &nfsi->access_cache; 2131 struct rb_root *root_node = &nfsi->access_cache;
@@ -2244,6 +2271,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
2244 &nfs_access_lru_list); 2271 &nfs_access_lru_list);
2245 spin_unlock(&nfs_access_lru_lock); 2272 spin_unlock(&nfs_access_lru_lock);
2246 } 2273 }
2274 nfs_access_cache_enforce_limit();
2247} 2275}
2248EXPORT_SYMBOL_GPL(nfs_access_add_cache); 2276EXPORT_SYMBOL_GPL(nfs_access_add_cache);
2249 2277