aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2013-02-28 23:58:56 -0500
committerTheodore Ts'o <tytso@mit.edu>2013-02-28 23:58:56 -0500
commit246307745c406379996e6ed6411f0e20f1ce1449 (patch)
tree71b78d765ba7406c6755cad807944da6a99e5980 /fs/ext4
parent8e919d13048cd5acaadb2b15b48acbfb8832d3c2 (diff)
ext4: optimize ext4_es_shrink()
When the system is under memory pressure, ext4_es_srhink() will get called very often. So optimize returning the number of items in the file system's extent status cache by keeping a per-filesystem count, instead of calculating it each time by scanning all of the inodes in the extent status cache. Also rename the slab used for the extent status cache to be "ext4_extent_status" so it's obviousl the slab in question is created by ext4. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: Zheng Liu <gnehzuil.liu@gmail.com>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents_status.c39
2 files changed, 14 insertions, 26 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6e16c1867959..96c10934bb96 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1268,6 +1268,7 @@ struct ext4_sb_info {
1268 atomic_t s_mb_preallocated; 1268 atomic_t s_mb_preallocated;
1269 atomic_t s_mb_discarded; 1269 atomic_t s_mb_discarded;
1270 atomic_t s_lock_busy; 1270 atomic_t s_lock_busy;
1271 atomic_t s_extent_cache_cnt;
1271 1272
1272 /* locality groups */ 1273 /* locality groups */
1273 struct ext4_locality_group __percpu *s_locality_groups; 1274 struct ext4_locality_group __percpu *s_locality_groups;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index f768f4a98a2b..27fcdd2b2607 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -147,11 +147,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
147 ext4_lblk_t end); 147 ext4_lblk_t end);
148static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 148static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
149 int nr_to_scan); 149 int nr_to_scan);
150static int ext4_es_reclaim_extents_count(struct super_block *sb);
151 150
152int __init ext4_init_es(void) 151int __init ext4_init_es(void)
153{ 152{
154 ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); 153 ext4_es_cachep = kmem_cache_create("ext4_extent_status",
154 sizeof(struct extent_status),
155 0, (SLAB_RECLAIM_ACCOUNT), NULL);
155 if (ext4_es_cachep == NULL) 156 if (ext4_es_cachep == NULL)
156 return -ENOMEM; 157 return -ENOMEM;
157 return 0; 158 return 0;
@@ -302,8 +303,10 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
302 /* 303 /*
303 * We don't count delayed extent because we never try to reclaim them 304 * We don't count delayed extent because we never try to reclaim them
304 */ 305 */
305 if (!ext4_es_is_delayed(es)) 306 if (!ext4_es_is_delayed(es)) {
306 EXT4_I(inode)->i_es_lru_nr++; 307 EXT4_I(inode)->i_es_lru_nr++;
308 atomic_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
309 }
307 310
308 return es; 311 return es;
309} 312}
@@ -314,6 +317,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
314 if (!ext4_es_is_delayed(es)) { 317 if (!ext4_es_is_delayed(es)) {
315 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); 318 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
316 EXT4_I(inode)->i_es_lru_nr--; 319 EXT4_I(inode)->i_es_lru_nr--;
320 atomic_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
317 } 321 }
318 322
319 kmem_cache_free(ext4_es_cachep, es); 323 kmem_cache_free(ext4_es_cachep, es);
@@ -674,10 +678,11 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
674 int nr_to_scan = sc->nr_to_scan; 678 int nr_to_scan = sc->nr_to_scan;
675 int ret, nr_shrunk = 0; 679 int ret, nr_shrunk = 0;
676 680
677 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan); 681 ret = atomic_read(&sbi->s_extent_cache_cnt);
682 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
678 683
679 if (!nr_to_scan) 684 if (!nr_to_scan)
680 return ext4_es_reclaim_extents_count(sbi->s_sb); 685 return ret;
681 686
682 INIT_LIST_HEAD(&scanned); 687 INIT_LIST_HEAD(&scanned);
683 688
@@ -705,9 +710,10 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
705 } 710 }
706 list_splice_tail(&scanned, &sbi->s_es_lru); 711 list_splice_tail(&scanned, &sbi->s_es_lru);
707 spin_unlock(&sbi->s_es_lru_lock); 712 spin_unlock(&sbi->s_es_lru_lock);
708 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk);
709 713
710 return ext4_es_reclaim_extents_count(sbi->s_sb); 714 ret = atomic_read(&sbi->s_extent_cache_cnt);
715 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
716 return ret;
711} 717}
712 718
713void ext4_es_register_shrinker(struct super_block *sb) 719void ext4_es_register_shrinker(struct super_block *sb)
@@ -751,25 +757,6 @@ void ext4_es_lru_del(struct inode *inode)
751 spin_unlock(&sbi->s_es_lru_lock); 757 spin_unlock(&sbi->s_es_lru_lock);
752} 758}
753 759
754static int ext4_es_reclaim_extents_count(struct super_block *sb)
755{
756 struct ext4_sb_info *sbi = EXT4_SB(sb);
757 struct ext4_inode_info *ei;
758 struct list_head *cur;
759 int nr_cached = 0;
760
761 spin_lock(&sbi->s_es_lru_lock);
762 list_for_each(cur, &sbi->s_es_lru) {
763 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
764 read_lock(&ei->i_es_lock);
765 nr_cached += ei->i_es_lru_nr;
766 read_unlock(&ei->i_es_lock);
767 }
768 spin_unlock(&sbi->s_es_lru_lock);
769 trace_ext4_es_reclaim_extents_count(sb, nr_cached);
770 return nr_cached;
771}
772
773static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 760static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
774 int nr_to_scan) 761 int nr_to_scan)
775{ 762{