aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@openvz.org>2013-08-27 20:17:53 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:29 -0400
commit55f841ce9395a72c6285fbcc4c403c0c786e1c74 (patch)
treed64933e4976ca3fe5a83e619ba6bdc96c5690438
parent3942c07ccf98e66b8893f396dca98f5b076f905f (diff)
super: fix calculation of shrinkable objects for small numbers
The sysctl knob sysctl_vfs_cache_pressure is used to determine which percentage of the shrinkable objects in our cache we should actively try to shrink. It works great in situations in which we have many objects (at least more than 100), because the aproximation errors will be negligible. But if this is not the case, specially when total_objects < 100, we may end up concluding that we have no objects at all (total / 100 = 0, if total < 100). This is certainly not the biggest killer in the world, but may matter in very low kernel memory situations. Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/mbcache.c2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/super.c14
-rw-r--r--fs/xfs/xfs_qm.c2
-rw-r--r--include/linux/dcache.h4
8 files changed, 18 insertions, 15 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 722329cac98f..b782bb56085d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1462,7 +1462,7 @@ static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1462 gfs2_scan_glock_lru(sc->nr_to_scan); 1462 gfs2_scan_glock_lru(sc->nr_to_scan);
1463 } 1463 }
1464 1464
1465 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; 1465 return vfs_pressure_ratio(atomic_read(&lru_count));
1466} 1466}
1467 1467
1468static struct shrinker glock_shrinker = { 1468static struct shrinker glock_shrinker = {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 3768c2f40e43..d550a5d6a05f 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -114,7 +114,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
114 spin_unlock(&qd_lru_lock); 114 spin_unlock(&qd_lru_lock);
115 115
116out: 116out:
117 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; 117 return vfs_pressure_ratio(atomic_read(&qd_lru_count));
118} 118}
119 119
120static u64 qd2index(struct gfs2_quota_data *qd) 120static u64 qd2index(struct gfs2_quota_data *qd)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 8c32ef3ba88e..5eb04767cb29 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -189,7 +189,7 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
189 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { 189 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
190 __mb_cache_entry_forget(entry, gfp_mask); 190 __mb_cache_entry_forget(entry, gfp_mask);
191 } 191 }
192 return (count / 100) * sysctl_vfs_cache_pressure; 192 return vfs_pressure_ratio(count);
193} 193}
194 194
195 195
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e79bc6ce828e..813ef2571545 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2046,7 +2046,7 @@ remove_lru_entry:
2046 } 2046 }
2047 spin_unlock(&nfs_access_lru_lock); 2047 spin_unlock(&nfs_access_lru_lock);
2048 nfs_access_free_list(&head); 2048 nfs_access_free_list(&head);
2049 return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure; 2049 return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
2050} 2050}
2051 2051
2052static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) 2052static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9a702e193538..13eee847605c 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -719,9 +719,8 @@ static int shrink_dqcache_memory(struct shrinker *shrink,
719 prune_dqcache(nr); 719 prune_dqcache(nr);
720 spin_unlock(&dq_list_lock); 720 spin_unlock(&dq_list_lock);
721 } 721 }
722 return ((unsigned) 722 return vfs_pressure_ratio(
723 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]) 723 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
724 /100) * sysctl_vfs_cache_pressure;
725} 724}
726 725
727static struct shrinker dqcache_shrinker = { 726static struct shrinker dqcache_shrinker = {
diff --git a/fs/super.c b/fs/super.c
index f6961ea84c56..63b6863bac7b 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -82,13 +82,13 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
82 int inodes; 82 int inodes;
83 83
84 /* proportion the scan between the caches */ 84 /* proportion the scan between the caches */
85 dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / 85 dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
86 total_objects; 86 total_objects);
87 inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / 87 inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
88 total_objects; 88 total_objects);
89 if (fs_objects) 89 if (fs_objects)
90 fs_objects = (sc->nr_to_scan * fs_objects) / 90 fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
91 total_objects; 91 total_objects);
92 /* 92 /*
93 * prune the dcache first as the icache is pinned by it, then 93 * prune the dcache first as the icache is pinned by it, then
94 * prune the icache, followed by the filesystem specific caches 94 * prune the icache, followed by the filesystem specific caches
@@ -104,7 +104,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
104 sb->s_nr_inodes_unused + fs_objects; 104 sb->s_nr_inodes_unused + fs_objects;
105 } 105 }
106 106
107 total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; 107 total_objects = vfs_pressure_ratio(total_objects);
108 drop_super(sb); 108 drop_super(sb);
109 return total_objects; 109 return total_objects;
110} 110}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 6218a0aeeeea..956da2e1c7af 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1722,7 +1722,7 @@ xfs_qm_shake(
1722 } 1722 }
1723 1723
1724out: 1724out:
1725 return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure; 1725 return vfs_pressure_ratio(qi->qi_lru_count);
1726} 1726}
1727 1727
1728/* 1728/*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 844a1ef387e4..59066e0b4ff1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -395,4 +395,8 @@ static inline bool d_mountpoint(const struct dentry *dentry)
395 395
396extern int sysctl_vfs_cache_pressure; 396extern int sysctl_vfs_cache_pressure;
397 397
398static inline unsigned long vfs_pressure_ratio(unsigned long val)
399{
400 return mult_frac(val, sysctl_vfs_cache_pressure, 100);
401}
398#endif /* __LINUX_DCACHE_H */ 402#endif /* __LINUX_DCACHE_H */