diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dcache.c | 276 | ||||
-rw-r--r-- | fs/drop_caches.c | 1 | ||||
-rw-r--r-- | fs/ext4/extents_status.c | 33 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 30 | ||||
-rw-r--r-- | fs/gfs2/main.c | 3 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 18 | ||||
-rw-r--r-- | fs/gfs2/quota.h | 6 | ||||
-rw-r--r-- | fs/inode.c | 193 | ||||
-rw-r--r-- | fs/internal.h | 6 | ||||
-rw-r--r-- | fs/mbcache.c | 49 | ||||
-rw-r--r-- | fs/namei.c | 49 | ||||
-rw-r--r-- | fs/nfs/dir.c | 16 | ||||
-rw-r--r-- | fs/nfs/internal.h | 6 | ||||
-rw-r--r-- | fs/nfs/super.c | 3 | ||||
-rw-r--r-- | fs/nfsd/nfscache.c | 32 | ||||
-rw-r--r-- | fs/quota/dquot.c | 34 | ||||
-rw-r--r-- | fs/super.c | 111 | ||||
-rw-r--r-- | fs/ubifs/shrinker.c | 29 | ||||
-rw-r--r-- | fs/ubifs/super.c | 3 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 5 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.c | 253 | ||||
-rw-r--r-- | fs/xfs/xfs_buf.h | 17 | ||||
-rw-r--r-- | fs/xfs/xfs_dquot.c | 7 | ||||
-rw-r--r-- | fs/xfs/xfs_icache.c | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_icache.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.c | 287 | ||||
-rw-r--r-- | fs/xfs/xfs_qm.h | 4 | ||||
-rw-r--r-- | fs/xfs/xfs_super.c | 12 |
28 files changed, 823 insertions, 666 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index dddc67fed732..1bd4614ce93b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/rculist_bl.h> | 37 | #include <linux/rculist_bl.h> |
38 | #include <linux/prefetch.h> | 38 | #include <linux/prefetch.h> |
39 | #include <linux/ratelimit.h> | 39 | #include <linux/ratelimit.h> |
40 | #include <linux/list_lru.h> | ||
40 | #include "internal.h" | 41 | #include "internal.h" |
41 | #include "mount.h" | 42 | #include "mount.h" |
42 | 43 | ||
@@ -48,7 +49,7 @@ | |||
48 | * - the dcache hash table | 49 | * - the dcache hash table |
49 | * s_anon bl list spinlock protects: | 50 | * s_anon bl list spinlock protects: |
50 | * - the s_anon list (see __d_drop) | 51 | * - the s_anon list (see __d_drop) |
51 | * dcache_lru_lock protects: | 52 | * dentry->d_sb->s_dentry_lru_lock protects: |
52 | * - the dcache lru lists and counters | 53 | * - the dcache lru lists and counters |
53 | * d_lock protects: | 54 | * d_lock protects: |
54 | * - d_flags | 55 | * - d_flags |
@@ -63,7 +64,7 @@ | |||
63 | * Ordering: | 64 | * Ordering: |
64 | * dentry->d_inode->i_lock | 65 | * dentry->d_inode->i_lock |
65 | * dentry->d_lock | 66 | * dentry->d_lock |
66 | * dcache_lru_lock | 67 | * dentry->d_sb->s_dentry_lru_lock |
67 | * dcache_hash_bucket lock | 68 | * dcache_hash_bucket lock |
68 | * s_anon lock | 69 | * s_anon lock |
69 | * | 70 | * |
@@ -81,7 +82,6 @@ | |||
81 | int sysctl_vfs_cache_pressure __read_mostly = 100; | 82 | int sysctl_vfs_cache_pressure __read_mostly = 100; |
82 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); | 83 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); |
83 | 84 | ||
84 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); | ||
85 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); | 85 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); |
86 | 86 | ||
87 | EXPORT_SYMBOL(rename_lock); | 87 | EXPORT_SYMBOL(rename_lock); |
@@ -146,23 +146,47 @@ struct dentry_stat_t dentry_stat = { | |||
146 | .age_limit = 45, | 146 | .age_limit = 45, |
147 | }; | 147 | }; |
148 | 148 | ||
149 | static DEFINE_PER_CPU(unsigned int, nr_dentry); | 149 | static DEFINE_PER_CPU(long, nr_dentry); |
150 | static DEFINE_PER_CPU(long, nr_dentry_unused); | ||
150 | 151 | ||
151 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | 152 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
152 | static int get_nr_dentry(void) | 153 | |
154 | /* | ||
155 | * Here we resort to our own counters instead of using generic per-cpu counters | ||
156 | * for consistency with what the vfs inode code does. We are expected to harvest | ||
157 | * better code and performance by having our own specialized counters. | ||
158 | * | ||
159 | * Please note that the loop is done over all possible CPUs, not over all online | ||
160 | * CPUs. The reason for this is that we don't want to play games with CPUs going | ||
161 | * on and off. If one of them goes off, we will just keep their counters. | ||
162 | * | ||
163 | * glommer: See cffbc8a for details, and if you ever intend to change this, | ||
164 | * please update all vfs counters to match. | ||
165 | */ | ||
166 | static long get_nr_dentry(void) | ||
153 | { | 167 | { |
154 | int i; | 168 | int i; |
155 | int sum = 0; | 169 | long sum = 0; |
156 | for_each_possible_cpu(i) | 170 | for_each_possible_cpu(i) |
157 | sum += per_cpu(nr_dentry, i); | 171 | sum += per_cpu(nr_dentry, i); |
158 | return sum < 0 ? 0 : sum; | 172 | return sum < 0 ? 0 : sum; |
159 | } | 173 | } |
160 | 174 | ||
175 | static long get_nr_dentry_unused(void) | ||
176 | { | ||
177 | int i; | ||
178 | long sum = 0; | ||
179 | for_each_possible_cpu(i) | ||
180 | sum += per_cpu(nr_dentry_unused, i); | ||
181 | return sum < 0 ? 0 : sum; | ||
182 | } | ||
183 | |||
161 | int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, | 184 | int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, |
162 | size_t *lenp, loff_t *ppos) | 185 | size_t *lenp, loff_t *ppos) |
163 | { | 186 | { |
164 | dentry_stat.nr_dentry = get_nr_dentry(); | 187 | dentry_stat.nr_dentry = get_nr_dentry(); |
165 | return proc_dointvec(table, write, buffer, lenp, ppos); | 188 | dentry_stat.nr_unused = get_nr_dentry_unused(); |
189 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | ||
166 | } | 190 | } |
167 | #endif | 191 | #endif |
168 | 192 | ||
@@ -333,52 +357,35 @@ static void dentry_unlink_inode(struct dentry * dentry) | |||
333 | } | 357 | } |
334 | 358 | ||
335 | /* | 359 | /* |
336 | * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. | 360 | * dentry_lru_(add|del)_list) must be called with d_lock held. |
337 | */ | 361 | */ |
338 | static void dentry_lru_add(struct dentry *dentry) | 362 | static void dentry_lru_add(struct dentry *dentry) |
339 | { | 363 | { |
340 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { | 364 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { |
341 | spin_lock(&dcache_lru_lock); | 365 | if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) |
366 | this_cpu_inc(nr_dentry_unused); | ||
342 | dentry->d_flags |= DCACHE_LRU_LIST; | 367 | dentry->d_flags |= DCACHE_LRU_LIST; |
343 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | ||
344 | dentry->d_sb->s_nr_dentry_unused++; | ||
345 | dentry_stat.nr_unused++; | ||
346 | spin_unlock(&dcache_lru_lock); | ||
347 | } | 368 | } |
348 | } | 369 | } |
349 | 370 | ||
350 | static void __dentry_lru_del(struct dentry *dentry) | ||
351 | { | ||
352 | list_del_init(&dentry->d_lru); | ||
353 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | ||
354 | dentry->d_sb->s_nr_dentry_unused--; | ||
355 | dentry_stat.nr_unused--; | ||
356 | } | ||
357 | |||
358 | /* | 371 | /* |
359 | * Remove a dentry with references from the LRU. | 372 | * Remove a dentry with references from the LRU. |
373 | * | ||
374 | * If we are on the shrink list, then we can get to try_prune_one_dentry() and | ||
375 | * lose our last reference through the parent walk. In this case, we need to | ||
376 | * remove ourselves from the shrink list, not the LRU. | ||
360 | */ | 377 | */ |
361 | static void dentry_lru_del(struct dentry *dentry) | 378 | static void dentry_lru_del(struct dentry *dentry) |
362 | { | 379 | { |
363 | if (!list_empty(&dentry->d_lru)) { | 380 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
364 | spin_lock(&dcache_lru_lock); | 381 | list_del_init(&dentry->d_lru); |
365 | __dentry_lru_del(dentry); | 382 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; |
366 | spin_unlock(&dcache_lru_lock); | 383 | return; |
367 | } | 384 | } |
368 | } | ||
369 | 385 | ||
370 | static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) | 386 | if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) |
371 | { | 387 | this_cpu_dec(nr_dentry_unused); |
372 | spin_lock(&dcache_lru_lock); | 388 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
373 | if (list_empty(&dentry->d_lru)) { | ||
374 | dentry->d_flags |= DCACHE_LRU_LIST; | ||
375 | list_add_tail(&dentry->d_lru, list); | ||
376 | dentry->d_sb->s_nr_dentry_unused++; | ||
377 | dentry_stat.nr_unused++; | ||
378 | } else { | ||
379 | list_move_tail(&dentry->d_lru, list); | ||
380 | } | ||
381 | spin_unlock(&dcache_lru_lock); | ||
382 | } | 389 | } |
383 | 390 | ||
384 | /** | 391 | /** |
@@ -474,7 +481,8 @@ EXPORT_SYMBOL(d_drop); | |||
474 | * If ref is non-zero, then decrement the refcount too. | 481 | * If ref is non-zero, then decrement the refcount too. |
475 | * Returns dentry requiring refcount drop, or NULL if we're done. | 482 | * Returns dentry requiring refcount drop, or NULL if we're done. |
476 | */ | 483 | */ |
477 | static inline struct dentry *dentry_kill(struct dentry *dentry) | 484 | static inline struct dentry * |
485 | dentry_kill(struct dentry *dentry, int unlock_on_failure) | ||
478 | __releases(dentry->d_lock) | 486 | __releases(dentry->d_lock) |
479 | { | 487 | { |
480 | struct inode *inode; | 488 | struct inode *inode; |
@@ -483,8 +491,10 @@ static inline struct dentry *dentry_kill(struct dentry *dentry) | |||
483 | inode = dentry->d_inode; | 491 | inode = dentry->d_inode; |
484 | if (inode && !spin_trylock(&inode->i_lock)) { | 492 | if (inode && !spin_trylock(&inode->i_lock)) { |
485 | relock: | 493 | relock: |
486 | spin_unlock(&dentry->d_lock); | 494 | if (unlock_on_failure) { |
487 | cpu_relax(); | 495 | spin_unlock(&dentry->d_lock); |
496 | cpu_relax(); | ||
497 | } | ||
488 | return dentry; /* try again with same dentry */ | 498 | return dentry; /* try again with same dentry */ |
489 | } | 499 | } |
490 | if (IS_ROOT(dentry)) | 500 | if (IS_ROOT(dentry)) |
@@ -567,7 +577,7 @@ repeat: | |||
567 | return; | 577 | return; |
568 | 578 | ||
569 | kill_it: | 579 | kill_it: |
570 | dentry = dentry_kill(dentry); | 580 | dentry = dentry_kill(dentry, 1); |
571 | if (dentry) | 581 | if (dentry) |
572 | goto repeat; | 582 | goto repeat; |
573 | } | 583 | } |
@@ -787,12 +797,12 @@ EXPORT_SYMBOL(d_prune_aliases); | |||
787 | * | 797 | * |
788 | * This may fail if locks cannot be acquired no problem, just try again. | 798 | * This may fail if locks cannot be acquired no problem, just try again. |
789 | */ | 799 | */ |
790 | static void try_prune_one_dentry(struct dentry *dentry) | 800 | static struct dentry * try_prune_one_dentry(struct dentry *dentry) |
791 | __releases(dentry->d_lock) | 801 | __releases(dentry->d_lock) |
792 | { | 802 | { |
793 | struct dentry *parent; | 803 | struct dentry *parent; |
794 | 804 | ||
795 | parent = dentry_kill(dentry); | 805 | parent = dentry_kill(dentry, 0); |
796 | /* | 806 | /* |
797 | * If dentry_kill returns NULL, we have nothing more to do. | 807 | * If dentry_kill returns NULL, we have nothing more to do. |
798 | * if it returns the same dentry, trylocks failed. In either | 808 | * if it returns the same dentry, trylocks failed. In either |
@@ -804,17 +814,18 @@ static void try_prune_one_dentry(struct dentry *dentry) | |||
804 | * fragmentation. | 814 | * fragmentation. |
805 | */ | 815 | */ |
806 | if (!parent) | 816 | if (!parent) |
807 | return; | 817 | return NULL; |
808 | if (parent == dentry) | 818 | if (parent == dentry) |
809 | return; | 819 | return dentry; |
810 | 820 | ||
811 | /* Prune ancestors. */ | 821 | /* Prune ancestors. */ |
812 | dentry = parent; | 822 | dentry = parent; |
813 | while (dentry) { | 823 | while (dentry) { |
814 | if (lockref_put_or_lock(&dentry->d_lockref)) | 824 | if (lockref_put_or_lock(&dentry->d_lockref)) |
815 | return; | 825 | return NULL; |
816 | dentry = dentry_kill(dentry); | 826 | dentry = dentry_kill(dentry, 1); |
817 | } | 827 | } |
828 | return NULL; | ||
818 | } | 829 | } |
819 | 830 | ||
820 | static void shrink_dentry_list(struct list_head *list) | 831 | static void shrink_dentry_list(struct list_head *list) |
@@ -833,76 +844,143 @@ static void shrink_dentry_list(struct list_head *list) | |||
833 | } | 844 | } |
834 | 845 | ||
835 | /* | 846 | /* |
847 | * The dispose list is isolated and dentries are not accounted | ||
848 | * to the LRU here, so we can simply remove it from the list | ||
849 | * here regardless of whether it is referenced or not. | ||
850 | */ | ||
851 | list_del_init(&dentry->d_lru); | ||
852 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; | ||
853 | |||
854 | /* | ||
836 | * We found an inuse dentry which was not removed from | 855 | * We found an inuse dentry which was not removed from |
837 | * the LRU because of laziness during lookup. Do not free | 856 | * the LRU because of laziness during lookup. Do not free it. |
838 | * it - just keep it off the LRU list. | ||
839 | */ | 857 | */ |
840 | if (dentry->d_lockref.count) { | 858 | if (dentry->d_lockref.count) { |
841 | dentry_lru_del(dentry); | ||
842 | spin_unlock(&dentry->d_lock); | 859 | spin_unlock(&dentry->d_lock); |
843 | continue; | 860 | continue; |
844 | } | 861 | } |
845 | |||
846 | rcu_read_unlock(); | 862 | rcu_read_unlock(); |
847 | 863 | ||
848 | try_prune_one_dentry(dentry); | 864 | dentry = try_prune_one_dentry(dentry); |
849 | 865 | ||
850 | rcu_read_lock(); | 866 | rcu_read_lock(); |
867 | if (dentry) { | ||
868 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
869 | list_add(&dentry->d_lru, list); | ||
870 | spin_unlock(&dentry->d_lock); | ||
871 | } | ||
851 | } | 872 | } |
852 | rcu_read_unlock(); | 873 | rcu_read_unlock(); |
853 | } | 874 | } |
854 | 875 | ||
876 | static enum lru_status | ||
877 | dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | ||
878 | { | ||
879 | struct list_head *freeable = arg; | ||
880 | struct dentry *dentry = container_of(item, struct dentry, d_lru); | ||
881 | |||
882 | |||
883 | /* | ||
884 | * we are inverting the lru lock/dentry->d_lock here, | ||
885 | * so use a trylock. If we fail to get the lock, just skip | ||
886 | * it | ||
887 | */ | ||
888 | if (!spin_trylock(&dentry->d_lock)) | ||
889 | return LRU_SKIP; | ||
890 | |||
891 | /* | ||
892 | * Referenced dentries are still in use. If they have active | ||
893 | * counts, just remove them from the LRU. Otherwise give them | ||
894 | * another pass through the LRU. | ||
895 | */ | ||
896 | if (dentry->d_lockref.count) { | ||
897 | list_del_init(&dentry->d_lru); | ||
898 | spin_unlock(&dentry->d_lock); | ||
899 | return LRU_REMOVED; | ||
900 | } | ||
901 | |||
902 | if (dentry->d_flags & DCACHE_REFERENCED) { | ||
903 | dentry->d_flags &= ~DCACHE_REFERENCED; | ||
904 | spin_unlock(&dentry->d_lock); | ||
905 | |||
906 | /* | ||
907 | * The list move itself will be made by the common LRU code. At | ||
908 | * this point, we've dropped the dentry->d_lock but keep the | ||
909 | * lru lock. This is safe to do, since every list movement is | ||
910 | * protected by the lru lock even if both locks are held. | ||
911 | * | ||
912 | * This is guaranteed by the fact that all LRU management | ||
913 | * functions are intermediated by the LRU API calls like | ||
914 | * list_lru_add and list_lru_del. List movement in this file | ||
915 | * only ever occur through this functions or through callbacks | ||
916 | * like this one, that are called from the LRU API. | ||
917 | * | ||
918 | * The only exceptions to this are functions like | ||
919 | * shrink_dentry_list, and code that first checks for the | ||
920 | * DCACHE_SHRINK_LIST flag. Those are guaranteed to be | ||
921 | * operating only with stack provided lists after they are | ||
922 | * properly isolated from the main list. It is thus, always a | ||
923 | * local access. | ||
924 | */ | ||
925 | return LRU_ROTATE; | ||
926 | } | ||
927 | |||
928 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
929 | list_move_tail(&dentry->d_lru, freeable); | ||
930 | this_cpu_dec(nr_dentry_unused); | ||
931 | spin_unlock(&dentry->d_lock); | ||
932 | |||
933 | return LRU_REMOVED; | ||
934 | } | ||
935 | |||
855 | /** | 936 | /** |
856 | * prune_dcache_sb - shrink the dcache | 937 | * prune_dcache_sb - shrink the dcache |
857 | * @sb: superblock | 938 | * @sb: superblock |
858 | * @count: number of entries to try to free | 939 | * @nr_to_scan : number of entries to try to free |
940 | * @nid: which node to scan for freeable entities | ||
859 | * | 941 | * |
860 | * Attempt to shrink the superblock dcache LRU by @count entries. This is | 942 | * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is |
861 | * done when we need more memory an called from the superblock shrinker | 943 | * done when we need more memory an called from the superblock shrinker |
862 | * function. | 944 | * function. |
863 | * | 945 | * |
864 | * This function may fail to free any resources if all the dentries are in | 946 | * This function may fail to free any resources if all the dentries are in |
865 | * use. | 947 | * use. |
866 | */ | 948 | */ |
867 | void prune_dcache_sb(struct super_block *sb, int count) | 949 | long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, |
950 | int nid) | ||
868 | { | 951 | { |
869 | struct dentry *dentry; | 952 | LIST_HEAD(dispose); |
870 | LIST_HEAD(referenced); | 953 | long freed; |
871 | LIST_HEAD(tmp); | ||
872 | 954 | ||
873 | relock: | 955 | freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate, |
874 | spin_lock(&dcache_lru_lock); | 956 | &dispose, &nr_to_scan); |
875 | while (!list_empty(&sb->s_dentry_lru)) { | 957 | shrink_dentry_list(&dispose); |
876 | dentry = list_entry(sb->s_dentry_lru.prev, | 958 | return freed; |
877 | struct dentry, d_lru); | 959 | } |
878 | BUG_ON(dentry->d_sb != sb); | ||
879 | |||
880 | if (!spin_trylock(&dentry->d_lock)) { | ||
881 | spin_unlock(&dcache_lru_lock); | ||
882 | cpu_relax(); | ||
883 | goto relock; | ||
884 | } | ||
885 | 960 | ||
886 | if (dentry->d_flags & DCACHE_REFERENCED) { | 961 | static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, |
887 | dentry->d_flags &= ~DCACHE_REFERENCED; | 962 | spinlock_t *lru_lock, void *arg) |
888 | list_move(&dentry->d_lru, &referenced); | 963 | { |
889 | spin_unlock(&dentry->d_lock); | 964 | struct list_head *freeable = arg; |
890 | } else { | 965 | struct dentry *dentry = container_of(item, struct dentry, d_lru); |
891 | list_move_tail(&dentry->d_lru, &tmp); | ||
892 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
893 | spin_unlock(&dentry->d_lock); | ||
894 | if (!--count) | ||
895 | break; | ||
896 | } | ||
897 | cond_resched_lock(&dcache_lru_lock); | ||
898 | } | ||
899 | if (!list_empty(&referenced)) | ||
900 | list_splice(&referenced, &sb->s_dentry_lru); | ||
901 | spin_unlock(&dcache_lru_lock); | ||
902 | 966 | ||
903 | shrink_dentry_list(&tmp); | 967 | /* |
968 | * we are inverting the lru lock/dentry->d_lock here, | ||
969 | * so use a trylock. If we fail to get the lock, just skip | ||
970 | * it | ||
971 | */ | ||
972 | if (!spin_trylock(&dentry->d_lock)) | ||
973 | return LRU_SKIP; | ||
974 | |||
975 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
976 | list_move_tail(&dentry->d_lru, freeable); | ||
977 | this_cpu_dec(nr_dentry_unused); | ||
978 | spin_unlock(&dentry->d_lock); | ||
979 | |||
980 | return LRU_REMOVED; | ||
904 | } | 981 | } |
905 | 982 | ||
983 | |||
906 | /** | 984 | /** |
907 | * shrink_dcache_sb - shrink dcache for a superblock | 985 | * shrink_dcache_sb - shrink dcache for a superblock |
908 | * @sb: superblock | 986 | * @sb: superblock |
@@ -912,16 +990,17 @@ relock: | |||
912 | */ | 990 | */ |
913 | void shrink_dcache_sb(struct super_block *sb) | 991 | void shrink_dcache_sb(struct super_block *sb) |
914 | { | 992 | { |
915 | LIST_HEAD(tmp); | 993 | long freed; |
916 | 994 | ||
917 | spin_lock(&dcache_lru_lock); | 995 | do { |
918 | while (!list_empty(&sb->s_dentry_lru)) { | 996 | LIST_HEAD(dispose); |
919 | list_splice_init(&sb->s_dentry_lru, &tmp); | 997 | |
920 | spin_unlock(&dcache_lru_lock); | 998 | freed = list_lru_walk(&sb->s_dentry_lru, |
921 | shrink_dentry_list(&tmp); | 999 | dentry_lru_isolate_shrink, &dispose, UINT_MAX); |
922 | spin_lock(&dcache_lru_lock); | 1000 | |
923 | } | 1001 | this_cpu_sub(nr_dentry_unused, freed); |
924 | spin_unlock(&dcache_lru_lock); | 1002 | shrink_dentry_list(&dispose); |
1003 | } while (freed > 0); | ||
925 | } | 1004 | } |
926 | EXPORT_SYMBOL(shrink_dcache_sb); | 1005 | EXPORT_SYMBOL(shrink_dcache_sb); |
927 | 1006 | ||
@@ -1283,7 +1362,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) | |||
1283 | if (dentry->d_lockref.count) { | 1362 | if (dentry->d_lockref.count) { |
1284 | dentry_lru_del(dentry); | 1363 | dentry_lru_del(dentry); |
1285 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1364 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
1286 | dentry_lru_move_list(dentry, &data->dispose); | 1365 | dentry_lru_del(dentry); |
1366 | list_add_tail(&dentry->d_lru, &data->dispose); | ||
1287 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1367 | dentry->d_flags |= DCACHE_SHRINK_LIST; |
1288 | data->found++; | 1368 | data->found++; |
1289 | ret = D_WALK_NORETRY; | 1369 | ret = D_WALK_NORETRY; |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index c00e055b6282..9fd702f5bfb2 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -44,6 +44,7 @@ static void drop_slab(void) | |||
44 | .gfp_mask = GFP_KERNEL, | 44 | .gfp_mask = GFP_KERNEL, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | nodes_setall(shrink.nodes_to_scan); | ||
47 | do { | 48 | do { |
48 | nr_objects = shrink_slab(&shrink, 1000, 1000); | 49 | nr_objects = shrink_slab(&shrink, 1000, 1000); |
49 | } while (nr_objects > 10); | 50 | } while (nr_objects > 10); |
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 2d1bdbe78c04..3981ff783950 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c | |||
@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, | |||
931 | struct ext4_inode_info *ei; | 931 | struct ext4_inode_info *ei; |
932 | struct list_head *cur, *tmp; | 932 | struct list_head *cur, *tmp; |
933 | LIST_HEAD(skipped); | 933 | LIST_HEAD(skipped); |
934 | int ret, nr_shrunk = 0; | 934 | int nr_shrunk = 0; |
935 | int retried = 0, skip_precached = 1, nr_skipped = 0; | 935 | int retried = 0, skip_precached = 1, nr_skipped = 0; |
936 | 936 | ||
937 | spin_lock(&sbi->s_es_lru_lock); | 937 | spin_lock(&sbi->s_es_lru_lock); |
938 | 938 | ||
939 | retry: | 939 | retry: |
940 | list_for_each_safe(cur, tmp, &sbi->s_es_lru) { | 940 | list_for_each_safe(cur, tmp, &sbi->s_es_lru) { |
941 | int shrunk; | ||
942 | |||
941 | /* | 943 | /* |
942 | * If we have already reclaimed all extents from extent | 944 | * If we have already reclaimed all extents from extent |
943 | * status tree, just stop the loop immediately. | 945 | * status tree, just stop the loop immediately. |
@@ -964,13 +966,13 @@ retry: | |||
964 | continue; | 966 | continue; |
965 | 967 | ||
966 | write_lock(&ei->i_es_lock); | 968 | write_lock(&ei->i_es_lock); |
967 | ret = __es_try_to_reclaim_extents(ei, nr_to_scan); | 969 | shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); |
968 | if (ei->i_es_lru_nr == 0) | 970 | if (ei->i_es_lru_nr == 0) |
969 | list_del_init(&ei->i_es_lru); | 971 | list_del_init(&ei->i_es_lru); |
970 | write_unlock(&ei->i_es_lock); | 972 | write_unlock(&ei->i_es_lock); |
971 | 973 | ||
972 | nr_shrunk += ret; | 974 | nr_shrunk += shrunk; |
973 | nr_to_scan -= ret; | 975 | nr_to_scan -= shrunk; |
974 | if (nr_to_scan == 0) | 976 | if (nr_to_scan == 0) |
975 | break; | 977 | break; |
976 | } | 978 | } |
@@ -1007,7 +1009,20 @@ retry: | |||
1007 | return nr_shrunk; | 1009 | return nr_shrunk; |
1008 | } | 1010 | } |
1009 | 1011 | ||
1010 | static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) | 1012 | static unsigned long ext4_es_count(struct shrinker *shrink, |
1013 | struct shrink_control *sc) | ||
1014 | { | ||
1015 | unsigned long nr; | ||
1016 | struct ext4_sb_info *sbi; | ||
1017 | |||
1018 | sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); | ||
1019 | nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); | ||
1020 | trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr); | ||
1021 | return nr; | ||
1022 | } | ||
1023 | |||
1024 | static unsigned long ext4_es_scan(struct shrinker *shrink, | ||
1025 | struct shrink_control *sc) | ||
1011 | { | 1026 | { |
1012 | struct ext4_sb_info *sbi = container_of(shrink, | 1027 | struct ext4_sb_info *sbi = container_of(shrink, |
1013 | struct ext4_sb_info, s_es_shrinker); | 1028 | struct ext4_sb_info, s_es_shrinker); |
@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) | |||
1022 | 1037 | ||
1023 | nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); | 1038 | nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); |
1024 | 1039 | ||
1025 | ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); | ||
1026 | trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); | 1040 | trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); |
1027 | return ret; | 1041 | return nr_shrunk; |
1028 | } | 1042 | } |
1029 | 1043 | ||
1030 | void ext4_es_register_shrinker(struct ext4_sb_info *sbi) | 1044 | void ext4_es_register_shrinker(struct ext4_sb_info *sbi) |
@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi) | |||
1032 | INIT_LIST_HEAD(&sbi->s_es_lru); | 1046 | INIT_LIST_HEAD(&sbi->s_es_lru); |
1033 | spin_lock_init(&sbi->s_es_lru_lock); | 1047 | spin_lock_init(&sbi->s_es_lru_lock); |
1034 | sbi->s_es_last_sorted = 0; | 1048 | sbi->s_es_last_sorted = 0; |
1035 | sbi->s_es_shrinker.shrink = ext4_es_shrink; | 1049 | sbi->s_es_shrinker.scan_objects = ext4_es_scan; |
1050 | sbi->s_es_shrinker.count_objects = ext4_es_count; | ||
1036 | sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; | 1051 | sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; |
1037 | register_shrinker(&sbi->s_es_shrinker); | 1052 | register_shrinker(&sbi->s_es_shrinker); |
1038 | } | 1053 | } |
@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, | |||
1076 | struct ext4_es_tree *tree = &ei->i_es_tree; | 1091 | struct ext4_es_tree *tree = &ei->i_es_tree; |
1077 | struct rb_node *node; | 1092 | struct rb_node *node; |
1078 | struct extent_status *es; | 1093 | struct extent_status *es; |
1079 | int nr_shrunk = 0; | 1094 | unsigned long nr_shrunk = 0; |
1080 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, | 1095 | static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, |
1081 | DEFAULT_RATELIMIT_BURST); | 1096 | DEFAULT_RATELIMIT_BURST); |
1082 | 1097 | ||
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 722329cac98f..c2f41b4d00b9 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1427,21 +1427,22 @@ __acquires(&lru_lock) | |||
1427 | * gfs2_dispose_glock_lru() above. | 1427 | * gfs2_dispose_glock_lru() above. |
1428 | */ | 1428 | */ |
1429 | 1429 | ||
1430 | static void gfs2_scan_glock_lru(int nr) | 1430 | static long gfs2_scan_glock_lru(int nr) |
1431 | { | 1431 | { |
1432 | struct gfs2_glock *gl; | 1432 | struct gfs2_glock *gl; |
1433 | LIST_HEAD(skipped); | 1433 | LIST_HEAD(skipped); |
1434 | LIST_HEAD(dispose); | 1434 | LIST_HEAD(dispose); |
1435 | long freed = 0; | ||
1435 | 1436 | ||
1436 | spin_lock(&lru_lock); | 1437 | spin_lock(&lru_lock); |
1437 | while(nr && !list_empty(&lru_list)) { | 1438 | while ((nr-- >= 0) && !list_empty(&lru_list)) { |
1438 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); | 1439 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); |
1439 | 1440 | ||
1440 | /* Test for being demotable */ | 1441 | /* Test for being demotable */ |
1441 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1442 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
1442 | list_move(&gl->gl_lru, &dispose); | 1443 | list_move(&gl->gl_lru, &dispose); |
1443 | atomic_dec(&lru_count); | 1444 | atomic_dec(&lru_count); |
1444 | nr--; | 1445 | freed++; |
1445 | continue; | 1446 | continue; |
1446 | } | 1447 | } |
1447 | 1448 | ||
@@ -1451,23 +1452,28 @@ static void gfs2_scan_glock_lru(int nr) | |||
1451 | if (!list_empty(&dispose)) | 1452 | if (!list_empty(&dispose)) |
1452 | gfs2_dispose_glock_lru(&dispose); | 1453 | gfs2_dispose_glock_lru(&dispose); |
1453 | spin_unlock(&lru_lock); | 1454 | spin_unlock(&lru_lock); |
1455 | |||
1456 | return freed; | ||
1454 | } | 1457 | } |
1455 | 1458 | ||
1456 | static int gfs2_shrink_glock_memory(struct shrinker *shrink, | 1459 | static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, |
1457 | struct shrink_control *sc) | 1460 | struct shrink_control *sc) |
1458 | { | 1461 | { |
1459 | if (sc->nr_to_scan) { | 1462 | if (!(sc->gfp_mask & __GFP_FS)) |
1460 | if (!(sc->gfp_mask & __GFP_FS)) | 1463 | return SHRINK_STOP; |
1461 | return -1; | 1464 | return gfs2_scan_glock_lru(sc->nr_to_scan); |
1462 | gfs2_scan_glock_lru(sc->nr_to_scan); | 1465 | } |
1463 | } | ||
1464 | 1466 | ||
1465 | return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; | 1467 | static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, |
1468 | struct shrink_control *sc) | ||
1469 | { | ||
1470 | return vfs_pressure_ratio(atomic_read(&lru_count)); | ||
1466 | } | 1471 | } |
1467 | 1472 | ||
1468 | static struct shrinker glock_shrinker = { | 1473 | static struct shrinker glock_shrinker = { |
1469 | .shrink = gfs2_shrink_glock_memory, | ||
1470 | .seeks = DEFAULT_SEEKS, | 1474 | .seeks = DEFAULT_SEEKS, |
1475 | .count_objects = gfs2_glock_shrink_count, | ||
1476 | .scan_objects = gfs2_glock_shrink_scan, | ||
1471 | }; | 1477 | }; |
1472 | 1478 | ||
1473 | /** | 1479 | /** |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 7b0f5043cf24..351586e24e30 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -32,7 +32,8 @@ | |||
32 | struct workqueue_struct *gfs2_control_wq; | 32 | struct workqueue_struct *gfs2_control_wq; |
33 | 33 | ||
34 | static struct shrinker qd_shrinker = { | 34 | static struct shrinker qd_shrinker = { |
35 | .shrink = gfs2_shrink_qd_memory, | 35 | .count_objects = gfs2_qd_shrink_count, |
36 | .scan_objects = gfs2_qd_shrink_scan, | ||
36 | .seeks = DEFAULT_SEEKS, | 37 | .seeks = DEFAULT_SEEKS, |
37 | }; | 38 | }; |
38 | 39 | ||
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 3768c2f40e43..db441359ee8c 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -75,17 +75,16 @@ static LIST_HEAD(qd_lru_list); | |||
75 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | 75 | static atomic_t qd_lru_count = ATOMIC_INIT(0); |
76 | static DEFINE_SPINLOCK(qd_lru_lock); | 76 | static DEFINE_SPINLOCK(qd_lru_lock); |
77 | 77 | ||
78 | int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) | 78 | unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, |
79 | struct shrink_control *sc) | ||
79 | { | 80 | { |
80 | struct gfs2_quota_data *qd; | 81 | struct gfs2_quota_data *qd; |
81 | struct gfs2_sbd *sdp; | 82 | struct gfs2_sbd *sdp; |
82 | int nr_to_scan = sc->nr_to_scan; | 83 | int nr_to_scan = sc->nr_to_scan; |
83 | 84 | long freed = 0; | |
84 | if (nr_to_scan == 0) | ||
85 | goto out; | ||
86 | 85 | ||
87 | if (!(sc->gfp_mask & __GFP_FS)) | 86 | if (!(sc->gfp_mask & __GFP_FS)) |
88 | return -1; | 87 | return SHRINK_STOP; |
89 | 88 | ||
90 | spin_lock(&qd_lru_lock); | 89 | spin_lock(&qd_lru_lock); |
91 | while (nr_to_scan && !list_empty(&qd_lru_list)) { | 90 | while (nr_to_scan && !list_empty(&qd_lru_list)) { |
@@ -110,11 +109,16 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) | |||
110 | kmem_cache_free(gfs2_quotad_cachep, qd); | 109 | kmem_cache_free(gfs2_quotad_cachep, qd); |
111 | spin_lock(&qd_lru_lock); | 110 | spin_lock(&qd_lru_lock); |
112 | nr_to_scan--; | 111 | nr_to_scan--; |
112 | freed++; | ||
113 | } | 113 | } |
114 | spin_unlock(&qd_lru_lock); | 114 | spin_unlock(&qd_lru_lock); |
115 | return freed; | ||
116 | } | ||
115 | 117 | ||
116 | out: | 118 | unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, |
117 | return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; | 119 | struct shrink_control *sc) |
120 | { | ||
121 | return vfs_pressure_ratio(atomic_read(&qd_lru_count)); | ||
118 | } | 122 | } |
119 | 123 | ||
120 | static u64 qd2index(struct gfs2_quota_data *qd) | 124 | static u64 qd2index(struct gfs2_quota_data *qd) |
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 4f5e6e44ed83..0f64d9deb1b0 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -53,8 +53,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
53 | return ret; | 53 | return ret; |
54 | } | 54 | } |
55 | 55 | ||
56 | extern int gfs2_shrink_qd_memory(struct shrinker *shrink, | 56 | extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, |
57 | struct shrink_control *sc); | 57 | struct shrink_control *sc); |
58 | extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, | ||
59 | struct shrink_control *sc); | ||
58 | extern const struct quotactl_ops gfs2_quotactl_ops; | 60 | extern const struct quotactl_ops gfs2_quotactl_ops; |
59 | 61 | ||
60 | #endif /* __QUOTA_DOT_H__ */ | 62 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/inode.c b/fs/inode.c index 93a0625b46e4..b33ba8e021cc 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/prefetch.h> | 17 | #include <linux/prefetch.h> |
18 | #include <linux/buffer_head.h> /* for inode_has_buffers */ | 18 | #include <linux/buffer_head.h> /* for inode_has_buffers */ |
19 | #include <linux/ratelimit.h> | 19 | #include <linux/ratelimit.h> |
20 | #include <linux/list_lru.h> | ||
20 | #include "internal.h" | 21 | #include "internal.h" |
21 | 22 | ||
22 | /* | 23 | /* |
@@ -24,7 +25,7 @@ | |||
24 | * | 25 | * |
25 | * inode->i_lock protects: | 26 | * inode->i_lock protects: |
26 | * inode->i_state, inode->i_hash, __iget() | 27 | * inode->i_state, inode->i_hash, __iget() |
27 | * inode->i_sb->s_inode_lru_lock protects: | 28 | * Inode LRU list locks protect: |
28 | * inode->i_sb->s_inode_lru, inode->i_lru | 29 | * inode->i_sb->s_inode_lru, inode->i_lru |
29 | * inode_sb_list_lock protects: | 30 | * inode_sb_list_lock protects: |
30 | * sb->s_inodes, inode->i_sb_list | 31 | * sb->s_inodes, inode->i_sb_list |
@@ -37,7 +38,7 @@ | |||
37 | * | 38 | * |
38 | * inode_sb_list_lock | 39 | * inode_sb_list_lock |
39 | * inode->i_lock | 40 | * inode->i_lock |
40 | * inode->i_sb->s_inode_lru_lock | 41 | * Inode LRU list locks |
41 | * | 42 | * |
42 | * bdi->wb.list_lock | 43 | * bdi->wb.list_lock |
43 | * inode->i_lock | 44 | * inode->i_lock |
@@ -70,33 +71,33 @@ EXPORT_SYMBOL(empty_aops); | |||
70 | */ | 71 | */ |
71 | struct inodes_stat_t inodes_stat; | 72 | struct inodes_stat_t inodes_stat; |
72 | 73 | ||
73 | static DEFINE_PER_CPU(unsigned int, nr_inodes); | 74 | static DEFINE_PER_CPU(unsigned long, nr_inodes); |
74 | static DEFINE_PER_CPU(unsigned int, nr_unused); | 75 | static DEFINE_PER_CPU(unsigned long, nr_unused); |
75 | 76 | ||
76 | static struct kmem_cache *inode_cachep __read_mostly; | 77 | static struct kmem_cache *inode_cachep __read_mostly; |
77 | 78 | ||
78 | static int get_nr_inodes(void) | 79 | static long get_nr_inodes(void) |
79 | { | 80 | { |
80 | int i; | 81 | int i; |
81 | int sum = 0; | 82 | long sum = 0; |
82 | for_each_possible_cpu(i) | 83 | for_each_possible_cpu(i) |
83 | sum += per_cpu(nr_inodes, i); | 84 | sum += per_cpu(nr_inodes, i); |
84 | return sum < 0 ? 0 : sum; | 85 | return sum < 0 ? 0 : sum; |
85 | } | 86 | } |
86 | 87 | ||
87 | static inline int get_nr_inodes_unused(void) | 88 | static inline long get_nr_inodes_unused(void) |
88 | { | 89 | { |
89 | int i; | 90 | int i; |
90 | int sum = 0; | 91 | long sum = 0; |
91 | for_each_possible_cpu(i) | 92 | for_each_possible_cpu(i) |
92 | sum += per_cpu(nr_unused, i); | 93 | sum += per_cpu(nr_unused, i); |
93 | return sum < 0 ? 0 : sum; | 94 | return sum < 0 ? 0 : sum; |
94 | } | 95 | } |
95 | 96 | ||
96 | int get_nr_dirty_inodes(void) | 97 | long get_nr_dirty_inodes(void) |
97 | { | 98 | { |
98 | /* not actually dirty inodes, but a wild approximation */ | 99 | /* not actually dirty inodes, but a wild approximation */ |
99 | int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); | 100 | long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); |
100 | return nr_dirty > 0 ? nr_dirty : 0; | 101 | return nr_dirty > 0 ? nr_dirty : 0; |
101 | } | 102 | } |
102 | 103 | ||
@@ -109,7 +110,7 @@ int proc_nr_inodes(ctl_table *table, int write, | |||
109 | { | 110 | { |
110 | inodes_stat.nr_inodes = get_nr_inodes(); | 111 | inodes_stat.nr_inodes = get_nr_inodes(); |
111 | inodes_stat.nr_unused = get_nr_inodes_unused(); | 112 | inodes_stat.nr_unused = get_nr_inodes_unused(); |
112 | return proc_dointvec(table, write, buffer, lenp, ppos); | 113 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
113 | } | 114 | } |
114 | #endif | 115 | #endif |
115 | 116 | ||
@@ -401,13 +402,8 @@ EXPORT_SYMBOL(ihold); | |||
401 | 402 | ||
402 | static void inode_lru_list_add(struct inode *inode) | 403 | static void inode_lru_list_add(struct inode *inode) |
403 | { | 404 | { |
404 | spin_lock(&inode->i_sb->s_inode_lru_lock); | 405 | if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) |
405 | if (list_empty(&inode->i_lru)) { | ||
406 | list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); | ||
407 | inode->i_sb->s_nr_inodes_unused++; | ||
408 | this_cpu_inc(nr_unused); | 406 | this_cpu_inc(nr_unused); |
409 | } | ||
410 | spin_unlock(&inode->i_sb->s_inode_lru_lock); | ||
411 | } | 407 | } |
412 | 408 | ||
413 | /* | 409 | /* |
@@ -425,13 +421,9 @@ void inode_add_lru(struct inode *inode) | |||
425 | 421 | ||
426 | static void inode_lru_list_del(struct inode *inode) | 422 | static void inode_lru_list_del(struct inode *inode) |
427 | { | 423 | { |
428 | spin_lock(&inode->i_sb->s_inode_lru_lock); | 424 | |
429 | if (!list_empty(&inode->i_lru)) { | 425 | if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) |
430 | list_del_init(&inode->i_lru); | ||
431 | inode->i_sb->s_nr_inodes_unused--; | ||
432 | this_cpu_dec(nr_unused); | 426 | this_cpu_dec(nr_unused); |
433 | } | ||
434 | spin_unlock(&inode->i_sb->s_inode_lru_lock); | ||
435 | } | 427 | } |
436 | 428 | ||
437 | /** | 429 | /** |
@@ -675,24 +667,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) | |||
675 | return busy; | 667 | return busy; |
676 | } | 668 | } |
677 | 669 | ||
678 | static int can_unuse(struct inode *inode) | ||
679 | { | ||
680 | if (inode->i_state & ~I_REFERENCED) | ||
681 | return 0; | ||
682 | if (inode_has_buffers(inode)) | ||
683 | return 0; | ||
684 | if (atomic_read(&inode->i_count)) | ||
685 | return 0; | ||
686 | if (inode->i_data.nrpages) | ||
687 | return 0; | ||
688 | return 1; | ||
689 | } | ||
690 | |||
691 | /* | 670 | /* |
692 | * Walk the superblock inode LRU for freeable inodes and attempt to free them. | 671 | * Isolate the inode from the LRU in preparation for freeing it. |
693 | * This is called from the superblock shrinker function with a number of inodes | ||
694 | * to trim from the LRU. Inodes to be freed are moved to a temporary list and | ||
695 | * then are freed outside inode_lock by dispose_list(). | ||
696 | * | 672 | * |
697 | * Any inodes which are pinned purely because of attached pagecache have their | 673 | * Any inodes which are pinned purely because of attached pagecache have their |
698 | * pagecache removed. If the inode has metadata buffers attached to | 674 | * pagecache removed. If the inode has metadata buffers attached to |
@@ -706,89 +682,82 @@ static int can_unuse(struct inode *inode) | |||
706 | * LRU does not have strict ordering. Hence we don't want to reclaim inodes | 682 | * LRU does not have strict ordering. Hence we don't want to reclaim inodes |
707 | * with this flag set because they are the inodes that are out of order. | 683 | * with this flag set because they are the inodes that are out of order. |
708 | */ | 684 | */ |
709 | void prune_icache_sb(struct super_block *sb, int nr_to_scan) | 685 | static enum lru_status |
686 | inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | ||
710 | { | 687 | { |
711 | LIST_HEAD(freeable); | 688 | struct list_head *freeable = arg; |
712 | int nr_scanned; | 689 | struct inode *inode = container_of(item, struct inode, i_lru); |
713 | unsigned long reap = 0; | ||
714 | 690 | ||
715 | spin_lock(&sb->s_inode_lru_lock); | 691 | /* |
716 | for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) { | 692 | * we are inverting the lru lock/inode->i_lock here, so use a trylock. |
717 | struct inode *inode; | 693 | * If we fail to get the lock, just skip it. |
694 | */ | ||
695 | if (!spin_trylock(&inode->i_lock)) | ||
696 | return LRU_SKIP; | ||
718 | 697 | ||
719 | if (list_empty(&sb->s_inode_lru)) | 698 | /* |
720 | break; | 699 | * Referenced or dirty inodes are still in use. Give them another pass |
700 | * through the LRU as we canot reclaim them now. | ||
701 | */ | ||
702 | if (atomic_read(&inode->i_count) || | ||
703 | (inode->i_state & ~I_REFERENCED)) { | ||
704 | list_del_init(&inode->i_lru); | ||
705 | spin_unlock(&inode->i_lock); | ||
706 | this_cpu_dec(nr_unused); | ||
707 | return LRU_REMOVED; | ||
708 | } | ||
721 | 709 | ||
722 | inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); | 710 | /* recently referenced inodes get one more pass */ |
711 | if (inode->i_state & I_REFERENCED) { | ||
712 | inode->i_state &= ~I_REFERENCED; | ||
713 | spin_unlock(&inode->i_lock); | ||
714 | return LRU_ROTATE; | ||
715 | } | ||
723 | 716 | ||
724 | /* | 717 | if (inode_has_buffers(inode) || inode->i_data.nrpages) { |
725 | * we are inverting the sb->s_inode_lru_lock/inode->i_lock here, | 718 | __iget(inode); |
726 | * so use a trylock. If we fail to get the lock, just move the | 719 | spin_unlock(&inode->i_lock); |
727 | * inode to the back of the list so we don't spin on it. | 720 | spin_unlock(lru_lock); |
728 | */ | 721 | if (remove_inode_buffers(inode)) { |
729 | if (!spin_trylock(&inode->i_lock)) { | 722 | unsigned long reap; |
730 | list_move(&inode->i_lru, &sb->s_inode_lru); | 723 | reap = invalidate_mapping_pages(&inode->i_data, 0, -1); |
731 | continue; | 724 | if (current_is_kswapd()) |
725 | __count_vm_events(KSWAPD_INODESTEAL, reap); | ||
726 | else | ||
727 | __count_vm_events(PGINODESTEAL, reap); | ||
728 | if (current->reclaim_state) | ||
729 | current->reclaim_state->reclaimed_slab += reap; | ||
732 | } | 730 | } |
731 | iput(inode); | ||
732 | spin_lock(lru_lock); | ||
733 | return LRU_RETRY; | ||
734 | } | ||
733 | 735 | ||
734 | /* | 736 | WARN_ON(inode->i_state & I_NEW); |
735 | * Referenced or dirty inodes are still in use. Give them | 737 | inode->i_state |= I_FREEING; |
736 | * another pass through the LRU as we canot reclaim them now. | 738 | list_move(&inode->i_lru, freeable); |
737 | */ | 739 | spin_unlock(&inode->i_lock); |
738 | if (atomic_read(&inode->i_count) || | ||
739 | (inode->i_state & ~I_REFERENCED)) { | ||
740 | list_del_init(&inode->i_lru); | ||
741 | spin_unlock(&inode->i_lock); | ||
742 | sb->s_nr_inodes_unused--; | ||
743 | this_cpu_dec(nr_unused); | ||
744 | continue; | ||
745 | } | ||
746 | 740 | ||
747 | /* recently referenced inodes get one more pass */ | 741 | this_cpu_dec(nr_unused); |
748 | if (inode->i_state & I_REFERENCED) { | 742 | return LRU_REMOVED; |
749 | inode->i_state &= ~I_REFERENCED; | 743 | } |
750 | list_move(&inode->i_lru, &sb->s_inode_lru); | ||
751 | spin_unlock(&inode->i_lock); | ||
752 | continue; | ||
753 | } | ||
754 | if (inode_has_buffers(inode) || inode->i_data.nrpages) { | ||
755 | __iget(inode); | ||
756 | spin_unlock(&inode->i_lock); | ||
757 | spin_unlock(&sb->s_inode_lru_lock); | ||
758 | if (remove_inode_buffers(inode)) | ||
759 | reap += invalidate_mapping_pages(&inode->i_data, | ||
760 | 0, -1); | ||
761 | iput(inode); | ||
762 | spin_lock(&sb->s_inode_lru_lock); | ||
763 | |||
764 | if (inode != list_entry(sb->s_inode_lru.next, | ||
765 | struct inode, i_lru)) | ||
766 | continue; /* wrong inode or list_empty */ | ||
767 | /* avoid lock inversions with trylock */ | ||
768 | if (!spin_trylock(&inode->i_lock)) | ||
769 | continue; | ||
770 | if (!can_unuse(inode)) { | ||
771 | spin_unlock(&inode->i_lock); | ||
772 | continue; | ||
773 | } | ||
774 | } | ||
775 | WARN_ON(inode->i_state & I_NEW); | ||
776 | inode->i_state |= I_FREEING; | ||
777 | spin_unlock(&inode->i_lock); | ||
778 | 744 | ||
779 | list_move(&inode->i_lru, &freeable); | 745 | /* |
780 | sb->s_nr_inodes_unused--; | 746 | * Walk the superblock inode LRU for freeable inodes and attempt to free them. |
781 | this_cpu_dec(nr_unused); | 747 | * This is called from the superblock shrinker function with a number of inodes |
782 | } | 748 | * to trim from the LRU. Inodes to be freed are moved to a temporary list and |
783 | if (current_is_kswapd()) | 749 | * then are freed outside inode_lock by dispose_list(). |
784 | __count_vm_events(KSWAPD_INODESTEAL, reap); | 750 | */ |
785 | else | 751 | long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, |
786 | __count_vm_events(PGINODESTEAL, reap); | 752 | int nid) |
787 | spin_unlock(&sb->s_inode_lru_lock); | 753 | { |
788 | if (current->reclaim_state) | 754 | LIST_HEAD(freeable); |
789 | current->reclaim_state->reclaimed_slab += reap; | 755 | long freed; |
790 | 756 | ||
757 | freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, | ||
758 | &freeable, &nr_to_scan); | ||
791 | dispose_list(&freeable); | 759 | dispose_list(&freeable); |
760 | return freed; | ||
792 | } | 761 | } |
793 | 762 | ||
794 | static void __wait_on_freeing_inode(struct inode *inode); | 763 | static void __wait_on_freeing_inode(struct inode *inode); |
diff --git a/fs/internal.h b/fs/internal.h index 2be46ea5dd0b..513e0d859a6c 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -114,6 +114,8 @@ extern int open_check_o_direct(struct file *f); | |||
114 | * inode.c | 114 | * inode.c |
115 | */ | 115 | */ |
116 | extern spinlock_t inode_sb_list_lock; | 116 | extern spinlock_t inode_sb_list_lock; |
117 | extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, | ||
118 | int nid); | ||
117 | extern void inode_add_lru(struct inode *inode); | 119 | extern void inode_add_lru(struct inode *inode); |
118 | 120 | ||
119 | /* | 121 | /* |
@@ -121,7 +123,7 @@ extern void inode_add_lru(struct inode *inode); | |||
121 | */ | 123 | */ |
122 | extern void inode_wb_list_del(struct inode *inode); | 124 | extern void inode_wb_list_del(struct inode *inode); |
123 | 125 | ||
124 | extern int get_nr_dirty_inodes(void); | 126 | extern long get_nr_dirty_inodes(void); |
125 | extern void evict_inodes(struct super_block *); | 127 | extern void evict_inodes(struct super_block *); |
126 | extern int invalidate_inodes(struct super_block *, bool); | 128 | extern int invalidate_inodes(struct super_block *, bool); |
127 | 129 | ||
@@ -130,6 +132,8 @@ extern int invalidate_inodes(struct super_block *, bool); | |||
130 | */ | 132 | */ |
131 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); | 133 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); |
132 | extern int d_set_mounted(struct dentry *dentry); | 134 | extern int d_set_mounted(struct dentry *dentry); |
135 | extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, | ||
136 | int nid); | ||
133 | 137 | ||
134 | /* | 138 | /* |
135 | * read_write.c | 139 | * read_write.c |
diff --git a/fs/mbcache.c b/fs/mbcache.c index 8c32ef3ba88e..e519e45bf673 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -86,18 +86,6 @@ static LIST_HEAD(mb_cache_list); | |||
86 | static LIST_HEAD(mb_cache_lru_list); | 86 | static LIST_HEAD(mb_cache_lru_list); |
87 | static DEFINE_SPINLOCK(mb_cache_spinlock); | 87 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
88 | 88 | ||
89 | /* | ||
90 | * What the mbcache registers as to get shrunk dynamically. | ||
91 | */ | ||
92 | |||
93 | static int mb_cache_shrink_fn(struct shrinker *shrink, | ||
94 | struct shrink_control *sc); | ||
95 | |||
96 | static struct shrinker mb_cache_shrinker = { | ||
97 | .shrink = mb_cache_shrink_fn, | ||
98 | .seeks = DEFAULT_SEEKS, | ||
99 | }; | ||
100 | |||
101 | static inline int | 89 | static inline int |
102 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | 90 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) |
103 | { | 91 | { |
@@ -151,7 +139,7 @@ forget: | |||
151 | 139 | ||
152 | 140 | ||
153 | /* | 141 | /* |
154 | * mb_cache_shrink_fn() memory pressure callback | 142 | * mb_cache_shrink_scan() memory pressure callback |
155 | * | 143 | * |
156 | * This function is called by the kernel memory management when memory | 144 | * This function is called by the kernel memory management when memory |
157 | * gets low. | 145 | * gets low. |
@@ -159,17 +147,16 @@ forget: | |||
159 | * @shrink: (ignored) | 147 | * @shrink: (ignored) |
160 | * @sc: shrink_control passed from reclaim | 148 | * @sc: shrink_control passed from reclaim |
161 | * | 149 | * |
162 | * Returns the number of objects which are present in the cache. | 150 | * Returns the number of objects freed. |
163 | */ | 151 | */ |
164 | static int | 152 | static unsigned long |
165 | mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) | 153 | mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
166 | { | 154 | { |
167 | LIST_HEAD(free_list); | 155 | LIST_HEAD(free_list); |
168 | struct mb_cache *cache; | ||
169 | struct mb_cache_entry *entry, *tmp; | 156 | struct mb_cache_entry *entry, *tmp; |
170 | int count = 0; | ||
171 | int nr_to_scan = sc->nr_to_scan; | 157 | int nr_to_scan = sc->nr_to_scan; |
172 | gfp_t gfp_mask = sc->gfp_mask; | 158 | gfp_t gfp_mask = sc->gfp_mask; |
159 | unsigned long freed = 0; | ||
173 | 160 | ||
174 | mb_debug("trying to free %d entries", nr_to_scan); | 161 | mb_debug("trying to free %d entries", nr_to_scan); |
175 | spin_lock(&mb_cache_spinlock); | 162 | spin_lock(&mb_cache_spinlock); |
@@ -179,19 +166,37 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) | |||
179 | struct mb_cache_entry, e_lru_list); | 166 | struct mb_cache_entry, e_lru_list); |
180 | list_move_tail(&ce->e_lru_list, &free_list); | 167 | list_move_tail(&ce->e_lru_list, &free_list); |
181 | __mb_cache_entry_unhash(ce); | 168 | __mb_cache_entry_unhash(ce); |
169 | freed++; | ||
170 | } | ||
171 | spin_unlock(&mb_cache_spinlock); | ||
172 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { | ||
173 | __mb_cache_entry_forget(entry, gfp_mask); | ||
182 | } | 174 | } |
175 | return freed; | ||
176 | } | ||
177 | |||
178 | static unsigned long | ||
179 | mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | ||
180 | { | ||
181 | struct mb_cache *cache; | ||
182 | unsigned long count = 0; | ||
183 | |||
184 | spin_lock(&mb_cache_spinlock); | ||
183 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { | 185 | list_for_each_entry(cache, &mb_cache_list, c_cache_list) { |
184 | mb_debug("cache %s (%d)", cache->c_name, | 186 | mb_debug("cache %s (%d)", cache->c_name, |
185 | atomic_read(&cache->c_entry_count)); | 187 | atomic_read(&cache->c_entry_count)); |
186 | count += atomic_read(&cache->c_entry_count); | 188 | count += atomic_read(&cache->c_entry_count); |
187 | } | 189 | } |
188 | spin_unlock(&mb_cache_spinlock); | 190 | spin_unlock(&mb_cache_spinlock); |
189 | list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) { | 191 | |
190 | __mb_cache_entry_forget(entry, gfp_mask); | 192 | return vfs_pressure_ratio(count); |
191 | } | ||
192 | return (count / 100) * sysctl_vfs_cache_pressure; | ||
193 | } | 193 | } |
194 | 194 | ||
195 | static struct shrinker mb_cache_shrinker = { | ||
196 | .count_objects = mb_cache_shrink_count, | ||
197 | .scan_objects = mb_cache_shrink_scan, | ||
198 | .seeks = DEFAULT_SEEKS, | ||
199 | }; | ||
195 | 200 | ||
196 | /* | 201 | /* |
197 | * mb_cache_create() create a new cache | 202 | * mb_cache_create() create a new cache |
diff --git a/fs/namei.c b/fs/namei.c index 409a441ba2ae..0dc4cbf21f37 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -660,29 +660,6 @@ static __always_inline void set_root_rcu(struct nameidata *nd) | |||
660 | } | 660 | } |
661 | } | 661 | } |
662 | 662 | ||
663 | static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) | ||
664 | { | ||
665 | int ret; | ||
666 | |||
667 | if (IS_ERR(link)) | ||
668 | goto fail; | ||
669 | |||
670 | if (*link == '/') { | ||
671 | set_root(nd); | ||
672 | path_put(&nd->path); | ||
673 | nd->path = nd->root; | ||
674 | path_get(&nd->root); | ||
675 | nd->flags |= LOOKUP_JUMPED; | ||
676 | } | ||
677 | nd->inode = nd->path.dentry->d_inode; | ||
678 | |||
679 | ret = link_path_walk(link, nd); | ||
680 | return ret; | ||
681 | fail: | ||
682 | path_put(&nd->path); | ||
683 | return PTR_ERR(link); | ||
684 | } | ||
685 | |||
686 | static void path_put_conditional(struct path *path, struct nameidata *nd) | 663 | static void path_put_conditional(struct path *path, struct nameidata *nd) |
687 | { | 664 | { |
688 | dput(path->dentry); | 665 | dput(path->dentry); |
@@ -874,7 +851,20 @@ follow_link(struct path *link, struct nameidata *nd, void **p) | |||
874 | error = 0; | 851 | error = 0; |
875 | s = nd_get_link(nd); | 852 | s = nd_get_link(nd); |
876 | if (s) { | 853 | if (s) { |
877 | error = __vfs_follow_link(nd, s); | 854 | if (unlikely(IS_ERR(s))) { |
855 | path_put(&nd->path); | ||
856 | put_link(nd, link, *p); | ||
857 | return PTR_ERR(s); | ||
858 | } | ||
859 | if (*s == '/') { | ||
860 | set_root(nd); | ||
861 | path_put(&nd->path); | ||
862 | nd->path = nd->root; | ||
863 | path_get(&nd->root); | ||
864 | nd->flags |= LOOKUP_JUMPED; | ||
865 | } | ||
866 | nd->inode = nd->path.dentry->d_inode; | ||
867 | error = link_path_walk(s, nd); | ||
878 | if (unlikely(error)) | 868 | if (unlikely(error)) |
879 | put_link(nd, link, *p); | 869 | put_link(nd, link, *p); |
880 | } | 870 | } |
@@ -2271,12 +2261,15 @@ mountpoint_last(struct nameidata *nd, struct path *path) | |||
2271 | dentry = d_alloc(dir, &nd->last); | 2261 | dentry = d_alloc(dir, &nd->last); |
2272 | if (!dentry) { | 2262 | if (!dentry) { |
2273 | error = -ENOMEM; | 2263 | error = -ENOMEM; |
2264 | mutex_unlock(&dir->d_inode->i_mutex); | ||
2274 | goto out; | 2265 | goto out; |
2275 | } | 2266 | } |
2276 | dentry = lookup_real(dir->d_inode, dentry, nd->flags); | 2267 | dentry = lookup_real(dir->d_inode, dentry, nd->flags); |
2277 | error = PTR_ERR(dentry); | 2268 | error = PTR_ERR(dentry); |
2278 | if (IS_ERR(dentry)) | 2269 | if (IS_ERR(dentry)) { |
2270 | mutex_unlock(&dir->d_inode->i_mutex); | ||
2279 | goto out; | 2271 | goto out; |
2272 | } | ||
2280 | } | 2273 | } |
2281 | mutex_unlock(&dir->d_inode->i_mutex); | 2274 | mutex_unlock(&dir->d_inode->i_mutex); |
2282 | 2275 | ||
@@ -4236,11 +4229,6 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) | |||
4236 | return res; | 4229 | return res; |
4237 | } | 4230 | } |
4238 | 4231 | ||
4239 | int vfs_follow_link(struct nameidata *nd, const char *link) | ||
4240 | { | ||
4241 | return __vfs_follow_link(nd, link); | ||
4242 | } | ||
4243 | |||
4244 | /* get the link contents into pagecache */ | 4232 | /* get the link contents into pagecache */ |
4245 | static char *page_getlink(struct dentry * dentry, struct page **ppage) | 4233 | static char *page_getlink(struct dentry * dentry, struct page **ppage) |
4246 | { | 4234 | { |
@@ -4352,7 +4340,6 @@ EXPORT_SYMBOL(vfs_path_lookup); | |||
4352 | EXPORT_SYMBOL(inode_permission); | 4340 | EXPORT_SYMBOL(inode_permission); |
4353 | EXPORT_SYMBOL(unlock_rename); | 4341 | EXPORT_SYMBOL(unlock_rename); |
4354 | EXPORT_SYMBOL(vfs_create); | 4342 | EXPORT_SYMBOL(vfs_create); |
4355 | EXPORT_SYMBOL(vfs_follow_link); | ||
4356 | EXPORT_SYMBOL(vfs_link); | 4343 | EXPORT_SYMBOL(vfs_link); |
4357 | EXPORT_SYMBOL(vfs_mkdir); | 4344 | EXPORT_SYMBOL(vfs_mkdir); |
4358 | EXPORT_SYMBOL(vfs_mknod); | 4345 | EXPORT_SYMBOL(vfs_mknod); |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e79bc6ce828e..de434f309af0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -2006,17 +2006,18 @@ static void nfs_access_free_list(struct list_head *head) | |||
2006 | } | 2006 | } |
2007 | } | 2007 | } |
2008 | 2008 | ||
2009 | int nfs_access_cache_shrinker(struct shrinker *shrink, | 2009 | unsigned long |
2010 | struct shrink_control *sc) | 2010 | nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
2011 | { | 2011 | { |
2012 | LIST_HEAD(head); | 2012 | LIST_HEAD(head); |
2013 | struct nfs_inode *nfsi, *next; | 2013 | struct nfs_inode *nfsi, *next; |
2014 | struct nfs_access_entry *cache; | 2014 | struct nfs_access_entry *cache; |
2015 | int nr_to_scan = sc->nr_to_scan; | 2015 | int nr_to_scan = sc->nr_to_scan; |
2016 | gfp_t gfp_mask = sc->gfp_mask; | 2016 | gfp_t gfp_mask = sc->gfp_mask; |
2017 | long freed = 0; | ||
2017 | 2018 | ||
2018 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) | 2019 | if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) |
2019 | return (nr_to_scan == 0) ? 0 : -1; | 2020 | return SHRINK_STOP; |
2020 | 2021 | ||
2021 | spin_lock(&nfs_access_lru_lock); | 2022 | spin_lock(&nfs_access_lru_lock); |
2022 | list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { | 2023 | list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { |
@@ -2032,6 +2033,7 @@ int nfs_access_cache_shrinker(struct shrinker *shrink, | |||
2032 | struct nfs_access_entry, lru); | 2033 | struct nfs_access_entry, lru); |
2033 | list_move(&cache->lru, &head); | 2034 | list_move(&cache->lru, &head); |
2034 | rb_erase(&cache->rb_node, &nfsi->access_cache); | 2035 | rb_erase(&cache->rb_node, &nfsi->access_cache); |
2036 | freed++; | ||
2035 | if (!list_empty(&nfsi->access_cache_entry_lru)) | 2037 | if (!list_empty(&nfsi->access_cache_entry_lru)) |
2036 | list_move_tail(&nfsi->access_cache_inode_lru, | 2038 | list_move_tail(&nfsi->access_cache_inode_lru, |
2037 | &nfs_access_lru_list); | 2039 | &nfs_access_lru_list); |
@@ -2046,7 +2048,13 @@ remove_lru_entry: | |||
2046 | } | 2048 | } |
2047 | spin_unlock(&nfs_access_lru_lock); | 2049 | spin_unlock(&nfs_access_lru_lock); |
2048 | nfs_access_free_list(&head); | 2050 | nfs_access_free_list(&head); |
2049 | return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure; | 2051 | return freed; |
2052 | } | ||
2053 | |||
2054 | unsigned long | ||
2055 | nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc) | ||
2056 | { | ||
2057 | return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); | ||
2050 | } | 2058 | } |
2051 | 2059 | ||
2052 | static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) | 2060 | static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d388302c005f..38da8c2b81ac 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -273,8 +273,10 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp, | |||
273 | const char *ip_addr); | 273 | const char *ip_addr); |
274 | 274 | ||
275 | /* dir.c */ | 275 | /* dir.c */ |
276 | extern int nfs_access_cache_shrinker(struct shrinker *shrink, | 276 | extern unsigned long nfs_access_cache_count(struct shrinker *shrink, |
277 | struct shrink_control *sc); | 277 | struct shrink_control *sc); |
278 | extern unsigned long nfs_access_cache_scan(struct shrinker *shrink, | ||
279 | struct shrink_control *sc); | ||
278 | struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int); | 280 | struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int); |
279 | int nfs_create(struct inode *, struct dentry *, umode_t, bool); | 281 | int nfs_create(struct inode *, struct dentry *, umode_t, bool); |
280 | int nfs_mkdir(struct inode *, struct dentry *, umode_t); | 282 | int nfs_mkdir(struct inode *, struct dentry *, umode_t); |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 5793f24613c8..a03b9c6f9489 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -360,7 +360,8 @@ static void unregister_nfs4_fs(void) | |||
360 | #endif | 360 | #endif |
361 | 361 | ||
362 | static struct shrinker acl_shrinker = { | 362 | static struct shrinker acl_shrinker = { |
363 | .shrink = nfs_access_cache_shrinker, | 363 | .count_objects = nfs_access_cache_count, |
364 | .scan_objects = nfs_access_cache_scan, | ||
364 | .seeks = DEFAULT_SEEKS, | 365 | .seeks = DEFAULT_SEEKS, |
365 | }; | 366 | }; |
366 | 367 | ||
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index e76244edd748..9186c7ce0b14 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize; | |||
59 | 59 | ||
60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); | 60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
61 | static void cache_cleaner_func(struct work_struct *unused); | 61 | static void cache_cleaner_func(struct work_struct *unused); |
62 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, | 62 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
63 | struct shrink_control *sc); | 63 | struct shrink_control *sc); |
64 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, | ||
65 | struct shrink_control *sc); | ||
64 | 66 | ||
65 | static struct shrinker nfsd_reply_cache_shrinker = { | 67 | static struct shrinker nfsd_reply_cache_shrinker = { |
66 | .shrink = nfsd_reply_cache_shrink, | 68 | .scan_objects = nfsd_reply_cache_scan, |
69 | .count_objects = nfsd_reply_cache_count, | ||
67 | .seeks = 1, | 70 | .seeks = 1, |
68 | }; | 71 | }; |
69 | 72 | ||
@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp) | |||
232 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | 235 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
233 | * Also prune the oldest ones when the total exceeds the max number of entries. | 236 | * Also prune the oldest ones when the total exceeds the max number of entries. |
234 | */ | 237 | */ |
235 | static void | 238 | static long |
236 | prune_cache_entries(void) | 239 | prune_cache_entries(void) |
237 | { | 240 | { |
238 | struct svc_cacherep *rp, *tmp; | 241 | struct svc_cacherep *rp, *tmp; |
242 | long freed = 0; | ||
239 | 243 | ||
240 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { | 244 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { |
241 | if (!nfsd_cache_entry_expired(rp) && | 245 | if (!nfsd_cache_entry_expired(rp) && |
242 | num_drc_entries <= max_drc_entries) | 246 | num_drc_entries <= max_drc_entries) |
243 | break; | 247 | break; |
244 | nfsd_reply_cache_free_locked(rp); | 248 | nfsd_reply_cache_free_locked(rp); |
249 | freed++; | ||
245 | } | 250 | } |
246 | 251 | ||
247 | /* | 252 | /* |
@@ -254,6 +259,7 @@ prune_cache_entries(void) | |||
254 | cancel_delayed_work(&cache_cleaner); | 259 | cancel_delayed_work(&cache_cleaner); |
255 | else | 260 | else |
256 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); | 261 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); |
262 | return freed; | ||
257 | } | 263 | } |
258 | 264 | ||
259 | static void | 265 | static void |
@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused) | |||
264 | spin_unlock(&cache_lock); | 270 | spin_unlock(&cache_lock); |
265 | } | 271 | } |
266 | 272 | ||
267 | static int | 273 | static unsigned long |
268 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) | 274 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
269 | { | 275 | { |
270 | unsigned int num; | 276 | unsigned long num; |
271 | 277 | ||
272 | spin_lock(&cache_lock); | 278 | spin_lock(&cache_lock); |
273 | if (sc->nr_to_scan) | ||
274 | prune_cache_entries(); | ||
275 | num = num_drc_entries; | 279 | num = num_drc_entries; |
276 | spin_unlock(&cache_lock); | 280 | spin_unlock(&cache_lock); |
277 | 281 | ||
278 | return num; | 282 | return num; |
279 | } | 283 | } |
280 | 284 | ||
285 | static unsigned long | ||
286 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) | ||
287 | { | ||
288 | unsigned long freed; | ||
289 | |||
290 | spin_lock(&cache_lock); | ||
291 | freed = prune_cache_entries(); | ||
292 | spin_unlock(&cache_lock); | ||
293 | return freed; | ||
294 | } | ||
281 | /* | 295 | /* |
282 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | 296 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
283 | */ | 297 | */ |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 9a702e193538..831d49a4111f 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -687,45 +687,37 @@ int dquot_quota_sync(struct super_block *sb, int type) | |||
687 | } | 687 | } |
688 | EXPORT_SYMBOL(dquot_quota_sync); | 688 | EXPORT_SYMBOL(dquot_quota_sync); |
689 | 689 | ||
690 | /* Free unused dquots from cache */ | 690 | static unsigned long |
691 | static void prune_dqcache(int count) | 691 | dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
692 | { | 692 | { |
693 | struct list_head *head; | 693 | struct list_head *head; |
694 | struct dquot *dquot; | 694 | struct dquot *dquot; |
695 | unsigned long freed = 0; | ||
695 | 696 | ||
696 | head = free_dquots.prev; | 697 | head = free_dquots.prev; |
697 | while (head != &free_dquots && count) { | 698 | while (head != &free_dquots && sc->nr_to_scan) { |
698 | dquot = list_entry(head, struct dquot, dq_free); | 699 | dquot = list_entry(head, struct dquot, dq_free); |
699 | remove_dquot_hash(dquot); | 700 | remove_dquot_hash(dquot); |
700 | remove_free_dquot(dquot); | 701 | remove_free_dquot(dquot); |
701 | remove_inuse(dquot); | 702 | remove_inuse(dquot); |
702 | do_destroy_dquot(dquot); | 703 | do_destroy_dquot(dquot); |
703 | count--; | 704 | sc->nr_to_scan--; |
705 | freed++; | ||
704 | head = free_dquots.prev; | 706 | head = free_dquots.prev; |
705 | } | 707 | } |
708 | return freed; | ||
706 | } | 709 | } |
707 | 710 | ||
708 | /* | 711 | static unsigned long |
709 | * This is called from kswapd when we think we need some | 712 | dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
710 | * more memory | ||
711 | */ | ||
712 | static int shrink_dqcache_memory(struct shrinker *shrink, | ||
713 | struct shrink_control *sc) | ||
714 | { | 713 | { |
715 | int nr = sc->nr_to_scan; | 714 | return vfs_pressure_ratio( |
716 | 715 | percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); | |
717 | if (nr) { | ||
718 | spin_lock(&dq_list_lock); | ||
719 | prune_dqcache(nr); | ||
720 | spin_unlock(&dq_list_lock); | ||
721 | } | ||
722 | return ((unsigned) | ||
723 | percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]) | ||
724 | /100) * sysctl_vfs_cache_pressure; | ||
725 | } | 716 | } |
726 | 717 | ||
727 | static struct shrinker dqcache_shrinker = { | 718 | static struct shrinker dqcache_shrinker = { |
728 | .shrink = shrink_dqcache_memory, | 719 | .count_objects = dqcache_shrink_count, |
720 | .scan_objects = dqcache_shrink_scan, | ||
729 | .seeks = DEFAULT_SEEKS, | 721 | .seeks = DEFAULT_SEEKS, |
730 | }; | 722 | }; |
731 | 723 | ||
diff --git a/fs/super.c b/fs/super.c index f6961ea84c56..3a96c9783a8b 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -53,11 +53,15 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = { | |||
53 | * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we | 53 | * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we |
54 | * take a passive reference to the superblock to avoid this from occurring. | 54 | * take a passive reference to the superblock to avoid this from occurring. |
55 | */ | 55 | */ |
56 | static int prune_super(struct shrinker *shrink, struct shrink_control *sc) | 56 | static unsigned long super_cache_scan(struct shrinker *shrink, |
57 | struct shrink_control *sc) | ||
57 | { | 58 | { |
58 | struct super_block *sb; | 59 | struct super_block *sb; |
59 | int fs_objects = 0; | 60 | long fs_objects = 0; |
60 | int total_objects; | 61 | long total_objects; |
62 | long freed = 0; | ||
63 | long dentries; | ||
64 | long inodes; | ||
61 | 65 | ||
62 | sb = container_of(shrink, struct super_block, s_shrink); | 66 | sb = container_of(shrink, struct super_block, s_shrink); |
63 | 67 | ||
@@ -65,46 +69,62 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc) | |||
65 | * Deadlock avoidance. We may hold various FS locks, and we don't want | 69 | * Deadlock avoidance. We may hold various FS locks, and we don't want |
66 | * to recurse into the FS that called us in clear_inode() and friends.. | 70 | * to recurse into the FS that called us in clear_inode() and friends.. |
67 | */ | 71 | */ |
68 | if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) | 72 | if (!(sc->gfp_mask & __GFP_FS)) |
69 | return -1; | 73 | return SHRINK_STOP; |
70 | 74 | ||
71 | if (!grab_super_passive(sb)) | 75 | if (!grab_super_passive(sb)) |
72 | return -1; | 76 | return SHRINK_STOP; |
73 | 77 | ||
74 | if (sb->s_op->nr_cached_objects) | 78 | if (sb->s_op->nr_cached_objects) |
75 | fs_objects = sb->s_op->nr_cached_objects(sb); | 79 | fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); |
76 | |||
77 | total_objects = sb->s_nr_dentry_unused + | ||
78 | sb->s_nr_inodes_unused + fs_objects + 1; | ||
79 | |||
80 | if (sc->nr_to_scan) { | ||
81 | int dentries; | ||
82 | int inodes; | ||
83 | |||
84 | /* proportion the scan between the caches */ | ||
85 | dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / | ||
86 | total_objects; | ||
87 | inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / | ||
88 | total_objects; | ||
89 | if (fs_objects) | ||
90 | fs_objects = (sc->nr_to_scan * fs_objects) / | ||
91 | total_objects; | ||
92 | /* | ||
93 | * prune the dcache first as the icache is pinned by it, then | ||
94 | * prune the icache, followed by the filesystem specific caches | ||
95 | */ | ||
96 | prune_dcache_sb(sb, dentries); | ||
97 | prune_icache_sb(sb, inodes); | ||
98 | 80 | ||
99 | if (fs_objects && sb->s_op->free_cached_objects) { | 81 | inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); |
100 | sb->s_op->free_cached_objects(sb, fs_objects); | 82 | dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); |
101 | fs_objects = sb->s_op->nr_cached_objects(sb); | 83 | total_objects = dentries + inodes + fs_objects + 1; |
102 | } | 84 | |
103 | total_objects = sb->s_nr_dentry_unused + | 85 | /* proportion the scan between the caches */ |
104 | sb->s_nr_inodes_unused + fs_objects; | 86 | dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); |
87 | inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); | ||
88 | |||
89 | /* | ||
90 | * prune the dcache first as the icache is pinned by it, then | ||
91 | * prune the icache, followed by the filesystem specific caches | ||
92 | */ | ||
93 | freed = prune_dcache_sb(sb, dentries, sc->nid); | ||
94 | freed += prune_icache_sb(sb, inodes, sc->nid); | ||
95 | |||
96 | if (fs_objects) { | ||
97 | fs_objects = mult_frac(sc->nr_to_scan, fs_objects, | ||
98 | total_objects); | ||
99 | freed += sb->s_op->free_cached_objects(sb, fs_objects, | ||
100 | sc->nid); | ||
105 | } | 101 | } |
106 | 102 | ||
107 | total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; | 103 | drop_super(sb); |
104 | return freed; | ||
105 | } | ||
106 | |||
107 | static unsigned long super_cache_count(struct shrinker *shrink, | ||
108 | struct shrink_control *sc) | ||
109 | { | ||
110 | struct super_block *sb; | ||
111 | long total_objects = 0; | ||
112 | |||
113 | sb = container_of(shrink, struct super_block, s_shrink); | ||
114 | |||
115 | if (!grab_super_passive(sb)) | ||
116 | return 0; | ||
117 | |||
118 | if (sb->s_op && sb->s_op->nr_cached_objects) | ||
119 | total_objects = sb->s_op->nr_cached_objects(sb, | ||
120 | sc->nid); | ||
121 | |||
122 | total_objects += list_lru_count_node(&sb->s_dentry_lru, | ||
123 | sc->nid); | ||
124 | total_objects += list_lru_count_node(&sb->s_inode_lru, | ||
125 | sc->nid); | ||
126 | |||
127 | total_objects = vfs_pressure_ratio(total_objects); | ||
108 | drop_super(sb); | 128 | drop_super(sb); |
109 | return total_objects; | 129 | return total_objects; |
110 | } | 130 | } |
@@ -175,9 +195,12 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
175 | INIT_HLIST_NODE(&s->s_instances); | 195 | INIT_HLIST_NODE(&s->s_instances); |
176 | INIT_HLIST_BL_HEAD(&s->s_anon); | 196 | INIT_HLIST_BL_HEAD(&s->s_anon); |
177 | INIT_LIST_HEAD(&s->s_inodes); | 197 | INIT_LIST_HEAD(&s->s_inodes); |
178 | INIT_LIST_HEAD(&s->s_dentry_lru); | 198 | |
179 | INIT_LIST_HEAD(&s->s_inode_lru); | 199 | if (list_lru_init(&s->s_dentry_lru)) |
180 | spin_lock_init(&s->s_inode_lru_lock); | 200 | goto err_out; |
201 | if (list_lru_init(&s->s_inode_lru)) | ||
202 | goto err_out_dentry_lru; | ||
203 | |||
181 | INIT_LIST_HEAD(&s->s_mounts); | 204 | INIT_LIST_HEAD(&s->s_mounts); |
182 | init_rwsem(&s->s_umount); | 205 | init_rwsem(&s->s_umount); |
183 | lockdep_set_class(&s->s_umount, &type->s_umount_key); | 206 | lockdep_set_class(&s->s_umount, &type->s_umount_key); |
@@ -210,11 +233,16 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
210 | s->cleancache_poolid = -1; | 233 | s->cleancache_poolid = -1; |
211 | 234 | ||
212 | s->s_shrink.seeks = DEFAULT_SEEKS; | 235 | s->s_shrink.seeks = DEFAULT_SEEKS; |
213 | s->s_shrink.shrink = prune_super; | 236 | s->s_shrink.scan_objects = super_cache_scan; |
237 | s->s_shrink.count_objects = super_cache_count; | ||
214 | s->s_shrink.batch = 1024; | 238 | s->s_shrink.batch = 1024; |
239 | s->s_shrink.flags = SHRINKER_NUMA_AWARE; | ||
215 | } | 240 | } |
216 | out: | 241 | out: |
217 | return s; | 242 | return s; |
243 | |||
244 | err_out_dentry_lru: | ||
245 | list_lru_destroy(&s->s_dentry_lru); | ||
218 | err_out: | 246 | err_out: |
219 | security_sb_free(s); | 247 | security_sb_free(s); |
220 | #ifdef CONFIG_SMP | 248 | #ifdef CONFIG_SMP |
@@ -295,6 +323,9 @@ void deactivate_locked_super(struct super_block *s) | |||
295 | 323 | ||
296 | /* caches are now gone, we can safely kill the shrinker now */ | 324 | /* caches are now gone, we can safely kill the shrinker now */ |
297 | unregister_shrinker(&s->s_shrink); | 325 | unregister_shrinker(&s->s_shrink); |
326 | list_lru_destroy(&s->s_dentry_lru); | ||
327 | list_lru_destroy(&s->s_inode_lru); | ||
328 | |||
298 | put_filesystem(fs); | 329 | put_filesystem(fs); |
299 | put_super(s); | 330 | put_super(s); |
300 | } else { | 331 | } else { |
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 9e1d05666fed..f35135e28e96 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c | |||
@@ -277,18 +277,25 @@ static int kick_a_thread(void) | |||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) | 280 | unsigned long ubifs_shrink_count(struct shrinker *shrink, |
281 | struct shrink_control *sc) | ||
281 | { | 282 | { |
282 | int nr = sc->nr_to_scan; | ||
283 | int freed, contention = 0; | ||
284 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); | 283 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); |
285 | 284 | ||
286 | if (nr == 0) | 285 | /* |
287 | /* | 286 | * Due to the way UBIFS updates the clean znode counter it may |
288 | * Due to the way UBIFS updates the clean znode counter it may | 287 | * temporarily be negative. |
289 | * temporarily be negative. | 288 | */ |
290 | */ | 289 | return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; |
291 | return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; | 290 | } |
291 | |||
292 | unsigned long ubifs_shrink_scan(struct shrinker *shrink, | ||
293 | struct shrink_control *sc) | ||
294 | { | ||
295 | unsigned long nr = sc->nr_to_scan; | ||
296 | int contention = 0; | ||
297 | unsigned long freed; | ||
298 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); | ||
292 | 299 | ||
293 | if (!clean_zn_cnt) { | 300 | if (!clean_zn_cnt) { |
294 | /* | 301 | /* |
@@ -316,10 +323,10 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) | |||
316 | 323 | ||
317 | if (!freed && contention) { | 324 | if (!freed && contention) { |
318 | dbg_tnc("freed nothing, but contention"); | 325 | dbg_tnc("freed nothing, but contention"); |
319 | return -1; | 326 | return SHRINK_STOP; |
320 | } | 327 | } |
321 | 328 | ||
322 | out: | 329 | out: |
323 | dbg_tnc("%d znodes were freed, requested %d", freed, nr); | 330 | dbg_tnc("%lu znodes were freed, requested %lu", freed, nr); |
324 | return freed; | 331 | return freed; |
325 | } | 332 | } |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 879b9976c12b..3e4aa7281e04 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -49,7 +49,8 @@ struct kmem_cache *ubifs_inode_slab; | |||
49 | 49 | ||
50 | /* UBIFS TNC shrinker description */ | 50 | /* UBIFS TNC shrinker description */ |
51 | static struct shrinker ubifs_shrinker_info = { | 51 | static struct shrinker ubifs_shrinker_info = { |
52 | .shrink = ubifs_shrinker, | 52 | .scan_objects = ubifs_shrink_scan, |
53 | .count_objects = ubifs_shrink_count, | ||
53 | .seeks = DEFAULT_SEEKS, | 54 | .seeks = DEFAULT_SEEKS, |
54 | }; | 55 | }; |
55 | 56 | ||
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index b2babce4d70f..e8c8cfe1435c 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -1624,7 +1624,10 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); | |||
1624 | int ubifs_tnc_end_commit(struct ubifs_info *c); | 1624 | int ubifs_tnc_end_commit(struct ubifs_info *c); |
1625 | 1625 | ||
1626 | /* shrinker.c */ | 1626 | /* shrinker.c */ |
1627 | int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc); | 1627 | unsigned long ubifs_shrink_scan(struct shrinker *shrink, |
1628 | struct shrink_control *sc); | ||
1629 | unsigned long ubifs_shrink_count(struct shrinker *shrink, | ||
1630 | struct shrink_control *sc); | ||
1628 | 1631 | ||
1629 | /* commit.c */ | 1632 | /* commit.c */ |
1630 | int ubifs_bg_thread(void *info); | 1633 | int ubifs_bg_thread(void *info); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index c06823fe10d3..263470075ea2 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -81,54 +81,6 @@ xfs_buf_vmap_len( | |||
81 | } | 81 | } |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * xfs_buf_lru_add - add a buffer to the LRU. | ||
85 | * | ||
86 | * The LRU takes a new reference to the buffer so that it will only be freed | ||
87 | * once the shrinker takes the buffer off the LRU. | ||
88 | */ | ||
89 | STATIC void | ||
90 | xfs_buf_lru_add( | ||
91 | struct xfs_buf *bp) | ||
92 | { | ||
93 | struct xfs_buftarg *btp = bp->b_target; | ||
94 | |||
95 | spin_lock(&btp->bt_lru_lock); | ||
96 | if (list_empty(&bp->b_lru)) { | ||
97 | atomic_inc(&bp->b_hold); | ||
98 | list_add_tail(&bp->b_lru, &btp->bt_lru); | ||
99 | btp->bt_lru_nr++; | ||
100 | bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; | ||
101 | } | ||
102 | spin_unlock(&btp->bt_lru_lock); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * xfs_buf_lru_del - remove a buffer from the LRU | ||
107 | * | ||
108 | * The unlocked check is safe here because it only occurs when there are not | ||
109 | * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there | ||
110 | * to optimise the shrinker removing the buffer from the LRU and calling | ||
111 | * xfs_buf_free(). i.e. it removes an unnecessary round trip on the | ||
112 | * bt_lru_lock. | ||
113 | */ | ||
114 | STATIC void | ||
115 | xfs_buf_lru_del( | ||
116 | struct xfs_buf *bp) | ||
117 | { | ||
118 | struct xfs_buftarg *btp = bp->b_target; | ||
119 | |||
120 | if (list_empty(&bp->b_lru)) | ||
121 | return; | ||
122 | |||
123 | spin_lock(&btp->bt_lru_lock); | ||
124 | if (!list_empty(&bp->b_lru)) { | ||
125 | list_del_init(&bp->b_lru); | ||
126 | btp->bt_lru_nr--; | ||
127 | } | ||
128 | spin_unlock(&btp->bt_lru_lock); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * When we mark a buffer stale, we remove the buffer from the LRU and clear the | 84 | * When we mark a buffer stale, we remove the buffer from the LRU and clear the |
133 | * b_lru_ref count so that the buffer is freed immediately when the buffer | 85 | * b_lru_ref count so that the buffer is freed immediately when the buffer |
134 | * reference count falls to zero. If the buffer is already on the LRU, we need | 86 | * reference count falls to zero. If the buffer is already on the LRU, we need |
@@ -151,20 +103,14 @@ xfs_buf_stale( | |||
151 | */ | 103 | */ |
152 | bp->b_flags &= ~_XBF_DELWRI_Q; | 104 | bp->b_flags &= ~_XBF_DELWRI_Q; |
153 | 105 | ||
154 | atomic_set(&(bp)->b_lru_ref, 0); | 106 | spin_lock(&bp->b_lock); |
155 | if (!list_empty(&bp->b_lru)) { | 107 | atomic_set(&bp->b_lru_ref, 0); |
156 | struct xfs_buftarg *btp = bp->b_target; | 108 | if (!(bp->b_state & XFS_BSTATE_DISPOSE) && |
109 | (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) | ||
110 | atomic_dec(&bp->b_hold); | ||
157 | 111 | ||
158 | spin_lock(&btp->bt_lru_lock); | ||
159 | if (!list_empty(&bp->b_lru) && | ||
160 | !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) { | ||
161 | list_del_init(&bp->b_lru); | ||
162 | btp->bt_lru_nr--; | ||
163 | atomic_dec(&bp->b_hold); | ||
164 | } | ||
165 | spin_unlock(&btp->bt_lru_lock); | ||
166 | } | ||
167 | ASSERT(atomic_read(&bp->b_hold) >= 1); | 112 | ASSERT(atomic_read(&bp->b_hold) >= 1); |
113 | spin_unlock(&bp->b_lock); | ||
168 | } | 114 | } |
169 | 115 | ||
170 | static int | 116 | static int |
@@ -228,6 +174,7 @@ _xfs_buf_alloc( | |||
228 | INIT_LIST_HEAD(&bp->b_list); | 174 | INIT_LIST_HEAD(&bp->b_list); |
229 | RB_CLEAR_NODE(&bp->b_rbnode); | 175 | RB_CLEAR_NODE(&bp->b_rbnode); |
230 | sema_init(&bp->b_sema, 0); /* held, no waiters */ | 176 | sema_init(&bp->b_sema, 0); /* held, no waiters */ |
177 | spin_lock_init(&bp->b_lock); | ||
231 | XB_SET_OWNER(bp); | 178 | XB_SET_OWNER(bp); |
232 | bp->b_target = target; | 179 | bp->b_target = target; |
233 | bp->b_flags = flags; | 180 | bp->b_flags = flags; |
@@ -917,12 +864,33 @@ xfs_buf_rele( | |||
917 | 864 | ||
918 | ASSERT(atomic_read(&bp->b_hold) > 0); | 865 | ASSERT(atomic_read(&bp->b_hold) > 0); |
919 | if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { | 866 | if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { |
920 | if (!(bp->b_flags & XBF_STALE) && | 867 | spin_lock(&bp->b_lock); |
921 | atomic_read(&bp->b_lru_ref)) { | 868 | if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { |
922 | xfs_buf_lru_add(bp); | 869 | /* |
870 | * If the buffer is added to the LRU take a new | ||
871 | * reference to the buffer for the LRU and clear the | ||
872 | * (now stale) dispose list state flag | ||
873 | */ | ||
874 | if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { | ||
875 | bp->b_state &= ~XFS_BSTATE_DISPOSE; | ||
876 | atomic_inc(&bp->b_hold); | ||
877 | } | ||
878 | spin_unlock(&bp->b_lock); | ||
923 | spin_unlock(&pag->pag_buf_lock); | 879 | spin_unlock(&pag->pag_buf_lock); |
924 | } else { | 880 | } else { |
925 | xfs_buf_lru_del(bp); | 881 | /* |
882 | * most of the time buffers will already be removed from | ||
883 | * the LRU, so optimise that case by checking for the | ||
884 | * XFS_BSTATE_DISPOSE flag indicating the last list the | ||
885 | * buffer was on was the disposal list | ||
886 | */ | ||
887 | if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { | ||
888 | list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); | ||
889 | } else { | ||
890 | ASSERT(list_empty(&bp->b_lru)); | ||
891 | } | ||
892 | spin_unlock(&bp->b_lock); | ||
893 | |||
926 | ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); | 894 | ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); |
927 | rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); | 895 | rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); |
928 | spin_unlock(&pag->pag_buf_lock); | 896 | spin_unlock(&pag->pag_buf_lock); |
@@ -1502,83 +1470,121 @@ xfs_buf_iomove( | |||
1502 | * returned. These buffers will have an elevated hold count, so wait on those | 1470 | * returned. These buffers will have an elevated hold count, so wait on those |
1503 | * while freeing all the buffers only held by the LRU. | 1471 | * while freeing all the buffers only held by the LRU. |
1504 | */ | 1472 | */ |
1473 | static enum lru_status | ||
1474 | xfs_buftarg_wait_rele( | ||
1475 | struct list_head *item, | ||
1476 | spinlock_t *lru_lock, | ||
1477 | void *arg) | ||
1478 | |||
1479 | { | ||
1480 | struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); | ||
1481 | struct list_head *dispose = arg; | ||
1482 | |||
1483 | if (atomic_read(&bp->b_hold) > 1) { | ||
1484 | /* need to wait, so skip it this pass */ | ||
1485 | trace_xfs_buf_wait_buftarg(bp, _RET_IP_); | ||
1486 | return LRU_SKIP; | ||
1487 | } | ||
1488 | if (!spin_trylock(&bp->b_lock)) | ||
1489 | return LRU_SKIP; | ||
1490 | |||
1491 | /* | ||
1492 | * clear the LRU reference count so the buffer doesn't get | ||
1493 | * ignored in xfs_buf_rele(). | ||
1494 | */ | ||
1495 | atomic_set(&bp->b_lru_ref, 0); | ||
1496 | bp->b_state |= XFS_BSTATE_DISPOSE; | ||
1497 | list_move(item, dispose); | ||
1498 | spin_unlock(&bp->b_lock); | ||
1499 | return LRU_REMOVED; | ||
1500 | } | ||
1501 | |||
1505 | void | 1502 | void |
1506 | xfs_wait_buftarg( | 1503 | xfs_wait_buftarg( |
1507 | struct xfs_buftarg *btp) | 1504 | struct xfs_buftarg *btp) |
1508 | { | 1505 | { |
1509 | struct xfs_buf *bp; | 1506 | LIST_HEAD(dispose); |
1507 | int loop = 0; | ||
1510 | 1508 | ||
1511 | restart: | 1509 | /* loop until there is nothing left on the lru list. */ |
1512 | spin_lock(&btp->bt_lru_lock); | 1510 | while (list_lru_count(&btp->bt_lru)) { |
1513 | while (!list_empty(&btp->bt_lru)) { | 1511 | list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, |
1514 | bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); | 1512 | &dispose, LONG_MAX); |
1515 | if (atomic_read(&bp->b_hold) > 1) { | 1513 | |
1516 | trace_xfs_buf_wait_buftarg(bp, _RET_IP_); | 1514 | while (!list_empty(&dispose)) { |
1517 | list_move_tail(&bp->b_lru, &btp->bt_lru); | 1515 | struct xfs_buf *bp; |
1518 | spin_unlock(&btp->bt_lru_lock); | 1516 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); |
1519 | delay(100); | 1517 | list_del_init(&bp->b_lru); |
1520 | goto restart; | 1518 | xfs_buf_rele(bp); |
1521 | } | 1519 | } |
1522 | /* | 1520 | if (loop++ != 0) |
1523 | * clear the LRU reference count so the buffer doesn't get | 1521 | delay(100); |
1524 | * ignored in xfs_buf_rele(). | ||
1525 | */ | ||
1526 | atomic_set(&bp->b_lru_ref, 0); | ||
1527 | spin_unlock(&btp->bt_lru_lock); | ||
1528 | xfs_buf_rele(bp); | ||
1529 | spin_lock(&btp->bt_lru_lock); | ||
1530 | } | 1522 | } |
1531 | spin_unlock(&btp->bt_lru_lock); | ||
1532 | } | 1523 | } |
1533 | 1524 | ||
1534 | int | 1525 | static enum lru_status |
1535 | xfs_buftarg_shrink( | 1526 | xfs_buftarg_isolate( |
1527 | struct list_head *item, | ||
1528 | spinlock_t *lru_lock, | ||
1529 | void *arg) | ||
1530 | { | ||
1531 | struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); | ||
1532 | struct list_head *dispose = arg; | ||
1533 | |||
1534 | /* | ||
1535 | * we are inverting the lru lock/bp->b_lock here, so use a trylock. | ||
1536 | * If we fail to get the lock, just skip it. | ||
1537 | */ | ||
1538 | if (!spin_trylock(&bp->b_lock)) | ||
1539 | return LRU_SKIP; | ||
1540 | /* | ||
1541 | * Decrement the b_lru_ref count unless the value is already | ||
1542 | * zero. If the value is already zero, we need to reclaim the | ||
1543 | * buffer, otherwise it gets another trip through the LRU. | ||
1544 | */ | ||
1545 | if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { | ||
1546 | spin_unlock(&bp->b_lock); | ||
1547 | return LRU_ROTATE; | ||
1548 | } | ||
1549 | |||
1550 | bp->b_state |= XFS_BSTATE_DISPOSE; | ||
1551 | list_move(item, dispose); | ||
1552 | spin_unlock(&bp->b_lock); | ||
1553 | return LRU_REMOVED; | ||
1554 | } | ||
1555 | |||
1556 | static unsigned long | ||
1557 | xfs_buftarg_shrink_scan( | ||
1536 | struct shrinker *shrink, | 1558 | struct shrinker *shrink, |
1537 | struct shrink_control *sc) | 1559 | struct shrink_control *sc) |
1538 | { | 1560 | { |
1539 | struct xfs_buftarg *btp = container_of(shrink, | 1561 | struct xfs_buftarg *btp = container_of(shrink, |
1540 | struct xfs_buftarg, bt_shrinker); | 1562 | struct xfs_buftarg, bt_shrinker); |
1541 | struct xfs_buf *bp; | ||
1542 | int nr_to_scan = sc->nr_to_scan; | ||
1543 | LIST_HEAD(dispose); | 1563 | LIST_HEAD(dispose); |
1564 | unsigned long freed; | ||
1565 | unsigned long nr_to_scan = sc->nr_to_scan; | ||
1544 | 1566 | ||
1545 | if (!nr_to_scan) | 1567 | freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate, |
1546 | return btp->bt_lru_nr; | 1568 | &dispose, &nr_to_scan); |
1547 | |||
1548 | spin_lock(&btp->bt_lru_lock); | ||
1549 | while (!list_empty(&btp->bt_lru)) { | ||
1550 | if (nr_to_scan-- <= 0) | ||
1551 | break; | ||
1552 | |||
1553 | bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); | ||
1554 | |||
1555 | /* | ||
1556 | * Decrement the b_lru_ref count unless the value is already | ||
1557 | * zero. If the value is already zero, we need to reclaim the | ||
1558 | * buffer, otherwise it gets another trip through the LRU. | ||
1559 | */ | ||
1560 | if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { | ||
1561 | list_move_tail(&bp->b_lru, &btp->bt_lru); | ||
1562 | continue; | ||
1563 | } | ||
1564 | |||
1565 | /* | ||
1566 | * remove the buffer from the LRU now to avoid needing another | ||
1567 | * lock round trip inside xfs_buf_rele(). | ||
1568 | */ | ||
1569 | list_move(&bp->b_lru, &dispose); | ||
1570 | btp->bt_lru_nr--; | ||
1571 | bp->b_lru_flags |= _XBF_LRU_DISPOSE; | ||
1572 | } | ||
1573 | spin_unlock(&btp->bt_lru_lock); | ||
1574 | 1569 | ||
1575 | while (!list_empty(&dispose)) { | 1570 | while (!list_empty(&dispose)) { |
1571 | struct xfs_buf *bp; | ||
1576 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); | 1572 | bp = list_first_entry(&dispose, struct xfs_buf, b_lru); |
1577 | list_del_init(&bp->b_lru); | 1573 | list_del_init(&bp->b_lru); |
1578 | xfs_buf_rele(bp); | 1574 | xfs_buf_rele(bp); |
1579 | } | 1575 | } |
1580 | 1576 | ||
1581 | return btp->bt_lru_nr; | 1577 | return freed; |
1578 | } | ||
1579 | |||
1580 | static unsigned long | ||
1581 | xfs_buftarg_shrink_count( | ||
1582 | struct shrinker *shrink, | ||
1583 | struct shrink_control *sc) | ||
1584 | { | ||
1585 | struct xfs_buftarg *btp = container_of(shrink, | ||
1586 | struct xfs_buftarg, bt_shrinker); | ||
1587 | return list_lru_count_node(&btp->bt_lru, sc->nid); | ||
1582 | } | 1588 | } |
1583 | 1589 | ||
1584 | void | 1590 | void |
@@ -1587,6 +1593,7 @@ xfs_free_buftarg( | |||
1587 | struct xfs_buftarg *btp) | 1593 | struct xfs_buftarg *btp) |
1588 | { | 1594 | { |
1589 | unregister_shrinker(&btp->bt_shrinker); | 1595 | unregister_shrinker(&btp->bt_shrinker); |
1596 | list_lru_destroy(&btp->bt_lru); | ||
1590 | 1597 | ||
1591 | if (mp->m_flags & XFS_MOUNT_BARRIER) | 1598 | if (mp->m_flags & XFS_MOUNT_BARRIER) |
1592 | xfs_blkdev_issue_flush(btp); | 1599 | xfs_blkdev_issue_flush(btp); |
@@ -1660,12 +1667,16 @@ xfs_alloc_buftarg( | |||
1660 | if (!btp->bt_bdi) | 1667 | if (!btp->bt_bdi) |
1661 | goto error; | 1668 | goto error; |
1662 | 1669 | ||
1663 | INIT_LIST_HEAD(&btp->bt_lru); | ||
1664 | spin_lock_init(&btp->bt_lru_lock); | ||
1665 | if (xfs_setsize_buftarg_early(btp, bdev)) | 1670 | if (xfs_setsize_buftarg_early(btp, bdev)) |
1666 | goto error; | 1671 | goto error; |
1667 | btp->bt_shrinker.shrink = xfs_buftarg_shrink; | 1672 | |
1673 | if (list_lru_init(&btp->bt_lru)) | ||
1674 | goto error; | ||
1675 | |||
1676 | btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; | ||
1677 | btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; | ||
1668 | btp->bt_shrinker.seeks = DEFAULT_SEEKS; | 1678 | btp->bt_shrinker.seeks = DEFAULT_SEEKS; |
1679 | btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; | ||
1669 | register_shrinker(&btp->bt_shrinker); | 1680 | register_shrinker(&btp->bt_shrinker); |
1670 | return btp; | 1681 | return btp; |
1671 | 1682 | ||
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 433a12ed7b17..e65683361017 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/buffer_head.h> | 26 | #include <linux/buffer_head.h> |
27 | #include <linux/uio.h> | 27 | #include <linux/uio.h> |
28 | #include <linux/list_lru.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * Base types | 31 | * Base types |
@@ -59,7 +60,6 @@ typedef enum { | |||
59 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ | 60 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ |
60 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ | 61 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ |
61 | #define _XBF_COMPOUND (1 << 23)/* compound buffer */ | 62 | #define _XBF_COMPOUND (1 << 23)/* compound buffer */ |
62 | #define _XBF_LRU_DISPOSE (1 << 24)/* buffer being discarded */ | ||
63 | 63 | ||
64 | typedef unsigned int xfs_buf_flags_t; | 64 | typedef unsigned int xfs_buf_flags_t; |
65 | 65 | ||
@@ -78,8 +78,12 @@ typedef unsigned int xfs_buf_flags_t; | |||
78 | { _XBF_PAGES, "PAGES" }, \ | 78 | { _XBF_PAGES, "PAGES" }, \ |
79 | { _XBF_KMEM, "KMEM" }, \ | 79 | { _XBF_KMEM, "KMEM" }, \ |
80 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 80 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
81 | { _XBF_COMPOUND, "COMPOUND" }, \ | 81 | { _XBF_COMPOUND, "COMPOUND" } |
82 | { _XBF_LRU_DISPOSE, "LRU_DISPOSE" } | 82 | |
83 | /* | ||
84 | * Internal state flags. | ||
85 | */ | ||
86 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ | ||
83 | 87 | ||
84 | typedef struct xfs_buftarg { | 88 | typedef struct xfs_buftarg { |
85 | dev_t bt_dev; | 89 | dev_t bt_dev; |
@@ -92,9 +96,7 @@ typedef struct xfs_buftarg { | |||
92 | 96 | ||
93 | /* LRU control structures */ | 97 | /* LRU control structures */ |
94 | struct shrinker bt_shrinker; | 98 | struct shrinker bt_shrinker; |
95 | struct list_head bt_lru; | 99 | struct list_lru bt_lru; |
96 | spinlock_t bt_lru_lock; | ||
97 | unsigned int bt_lru_nr; | ||
98 | } xfs_buftarg_t; | 100 | } xfs_buftarg_t; |
99 | 101 | ||
100 | struct xfs_buf; | 102 | struct xfs_buf; |
@@ -137,7 +139,8 @@ typedef struct xfs_buf { | |||
137 | * bt_lru_lock and not by b_sema | 139 | * bt_lru_lock and not by b_sema |
138 | */ | 140 | */ |
139 | struct list_head b_lru; /* lru list */ | 141 | struct list_head b_lru; /* lru list */ |
140 | xfs_buf_flags_t b_lru_flags; /* internal lru status flags */ | 142 | spinlock_t b_lock; /* internal state lock */ |
143 | unsigned int b_state; /* internal state flags */ | ||
141 | wait_queue_head_t b_waiters; /* unpin waiters */ | 144 | wait_queue_head_t b_waiters; /* unpin waiters */ |
142 | struct list_head b_list; | 145 | struct list_head b_list; |
143 | struct xfs_perag *b_pag; /* contains rbtree root */ | 146 | struct xfs_perag *b_pag; /* contains rbtree root */ |
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 251c66632e5e..71520e6e5d65 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c | |||
@@ -940,13 +940,8 @@ xfs_qm_dqput_final( | |||
940 | 940 | ||
941 | trace_xfs_dqput_free(dqp); | 941 | trace_xfs_dqput_free(dqp); |
942 | 942 | ||
943 | mutex_lock(&qi->qi_lru_lock); | 943 | if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) |
944 | if (list_empty(&dqp->q_lru)) { | ||
945 | list_add_tail(&dqp->q_lru, &qi->qi_lru_list); | ||
946 | qi->qi_lru_count++; | ||
947 | XFS_STATS_INC(xs_qm_dquot_unused); | 944 | XFS_STATS_INC(xs_qm_dquot_unused); |
948 | } | ||
949 | mutex_unlock(&qi->qi_lru_lock); | ||
950 | 945 | ||
951 | /* | 946 | /* |
952 | * If we just added a udquot to the freelist, then we want to release | 947 | * If we just added a udquot to the freelist, then we want to release |
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 16219b9c6790..73b62a24ceac 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c | |||
@@ -1167,7 +1167,7 @@ xfs_reclaim_inodes( | |||
1167 | * them to be cleaned, which we hope will not be very long due to the | 1167 | * them to be cleaned, which we hope will not be very long due to the |
1168 | * background walker having already kicked the IO off on those dirty inodes. | 1168 | * background walker having already kicked the IO off on those dirty inodes. |
1169 | */ | 1169 | */ |
1170 | void | 1170 | long |
1171 | xfs_reclaim_inodes_nr( | 1171 | xfs_reclaim_inodes_nr( |
1172 | struct xfs_mount *mp, | 1172 | struct xfs_mount *mp, |
1173 | int nr_to_scan) | 1173 | int nr_to_scan) |
@@ -1176,7 +1176,7 @@ xfs_reclaim_inodes_nr( | |||
1176 | xfs_reclaim_work_queue(mp); | 1176 | xfs_reclaim_work_queue(mp); |
1177 | xfs_ail_push_all(mp->m_ail); | 1177 | xfs_ail_push_all(mp->m_ail); |
1178 | 1178 | ||
1179 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); | 1179 | return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | /* | 1182 | /* |
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 8a89f7d791bd..456f0144e1b6 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h | |||
@@ -46,7 +46,7 @@ void xfs_reclaim_worker(struct work_struct *work); | |||
46 | 46 | ||
47 | int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); | 47 | int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); |
48 | int xfs_reclaim_inodes_count(struct xfs_mount *mp); | 48 | int xfs_reclaim_inodes_count(struct xfs_mount *mp); |
49 | void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); | 49 | long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); |
50 | 50 | ||
51 | void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); | 51 | void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); |
52 | 52 | ||
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 6218a0aeeeea..3e6c2e6c9cd2 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
@@ -51,8 +51,9 @@ | |||
51 | */ | 51 | */ |
52 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | 52 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
53 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 53 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
54 | STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); | ||
55 | 54 | ||
55 | |||
56 | STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); | ||
56 | /* | 57 | /* |
57 | * We use the batch lookup interface to iterate over the dquots as it | 58 | * We use the batch lookup interface to iterate over the dquots as it |
58 | * currently is the only interface into the radix tree code that allows | 59 | * currently is the only interface into the radix tree code that allows |
@@ -203,12 +204,9 @@ xfs_qm_dqpurge( | |||
203 | * We move dquots to the freelist as soon as their reference count | 204 | * We move dquots to the freelist as soon as their reference count |
204 | * hits zero, so it really should be on the freelist here. | 205 | * hits zero, so it really should be on the freelist here. |
205 | */ | 206 | */ |
206 | mutex_lock(&qi->qi_lru_lock); | ||
207 | ASSERT(!list_empty(&dqp->q_lru)); | 207 | ASSERT(!list_empty(&dqp->q_lru)); |
208 | list_del_init(&dqp->q_lru); | 208 | list_lru_del(&qi->qi_lru, &dqp->q_lru); |
209 | qi->qi_lru_count--; | ||
210 | XFS_STATS_DEC(xs_qm_dquot_unused); | 209 | XFS_STATS_DEC(xs_qm_dquot_unused); |
211 | mutex_unlock(&qi->qi_lru_lock); | ||
212 | 210 | ||
213 | xfs_qm_dqdestroy(dqp); | 211 | xfs_qm_dqdestroy(dqp); |
214 | 212 | ||
@@ -680,6 +678,143 @@ xfs_qm_calc_dquots_per_chunk( | |||
680 | return ndquots; | 678 | return ndquots; |
681 | } | 679 | } |
682 | 680 | ||
681 | struct xfs_qm_isolate { | ||
682 | struct list_head buffers; | ||
683 | struct list_head dispose; | ||
684 | }; | ||
685 | |||
686 | static enum lru_status | ||
687 | xfs_qm_dquot_isolate( | ||
688 | struct list_head *item, | ||
689 | spinlock_t *lru_lock, | ||
690 | void *arg) | ||
691 | { | ||
692 | struct xfs_dquot *dqp = container_of(item, | ||
693 | struct xfs_dquot, q_lru); | ||
694 | struct xfs_qm_isolate *isol = arg; | ||
695 | |||
696 | if (!xfs_dqlock_nowait(dqp)) | ||
697 | goto out_miss_busy; | ||
698 | |||
699 | /* | ||
700 | * This dquot has acquired a reference in the meantime remove it from | ||
701 | * the freelist and try again. | ||
702 | */ | ||
703 | if (dqp->q_nrefs) { | ||
704 | xfs_dqunlock(dqp); | ||
705 | XFS_STATS_INC(xs_qm_dqwants); | ||
706 | |||
707 | trace_xfs_dqreclaim_want(dqp); | ||
708 | list_del_init(&dqp->q_lru); | ||
709 | XFS_STATS_DEC(xs_qm_dquot_unused); | ||
710 | return LRU_REMOVED; | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * If the dquot is dirty, flush it. If it's already being flushed, just | ||
715 | * skip it so there is time for the IO to complete before we try to | ||
716 | * reclaim it again on the next LRU pass. | ||
717 | */ | ||
718 | if (!xfs_dqflock_nowait(dqp)) { | ||
719 | xfs_dqunlock(dqp); | ||
720 | goto out_miss_busy; | ||
721 | } | ||
722 | |||
723 | if (XFS_DQ_IS_DIRTY(dqp)) { | ||
724 | struct xfs_buf *bp = NULL; | ||
725 | int error; | ||
726 | |||
727 | trace_xfs_dqreclaim_dirty(dqp); | ||
728 | |||
729 | /* we have to drop the LRU lock to flush the dquot */ | ||
730 | spin_unlock(lru_lock); | ||
731 | |||
732 | error = xfs_qm_dqflush(dqp, &bp); | ||
733 | if (error) { | ||
734 | xfs_warn(dqp->q_mount, "%s: dquot %p flush failed", | ||
735 | __func__, dqp); | ||
736 | goto out_unlock_dirty; | ||
737 | } | ||
738 | |||
739 | xfs_buf_delwri_queue(bp, &isol->buffers); | ||
740 | xfs_buf_relse(bp); | ||
741 | goto out_unlock_dirty; | ||
742 | } | ||
743 | xfs_dqfunlock(dqp); | ||
744 | |||
745 | /* | ||
746 | * Prevent lookups now that we are past the point of no return. | ||
747 | */ | ||
748 | dqp->dq_flags |= XFS_DQ_FREEING; | ||
749 | xfs_dqunlock(dqp); | ||
750 | |||
751 | ASSERT(dqp->q_nrefs == 0); | ||
752 | list_move_tail(&dqp->q_lru, &isol->dispose); | ||
753 | XFS_STATS_DEC(xs_qm_dquot_unused); | ||
754 | trace_xfs_dqreclaim_done(dqp); | ||
755 | XFS_STATS_INC(xs_qm_dqreclaims); | ||
756 | return LRU_REMOVED; | ||
757 | |||
758 | out_miss_busy: | ||
759 | trace_xfs_dqreclaim_busy(dqp); | ||
760 | XFS_STATS_INC(xs_qm_dqreclaim_misses); | ||
761 | return LRU_SKIP; | ||
762 | |||
763 | out_unlock_dirty: | ||
764 | trace_xfs_dqreclaim_busy(dqp); | ||
765 | XFS_STATS_INC(xs_qm_dqreclaim_misses); | ||
766 | xfs_dqunlock(dqp); | ||
767 | spin_lock(lru_lock); | ||
768 | return LRU_RETRY; | ||
769 | } | ||
770 | |||
771 | static unsigned long | ||
772 | xfs_qm_shrink_scan( | ||
773 | struct shrinker *shrink, | ||
774 | struct shrink_control *sc) | ||
775 | { | ||
776 | struct xfs_quotainfo *qi = container_of(shrink, | ||
777 | struct xfs_quotainfo, qi_shrinker); | ||
778 | struct xfs_qm_isolate isol; | ||
779 | unsigned long freed; | ||
780 | int error; | ||
781 | unsigned long nr_to_scan = sc->nr_to_scan; | ||
782 | |||
783 | if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) | ||
784 | return 0; | ||
785 | |||
786 | INIT_LIST_HEAD(&isol.buffers); | ||
787 | INIT_LIST_HEAD(&isol.dispose); | ||
788 | |||
789 | freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol, | ||
790 | &nr_to_scan); | ||
791 | |||
792 | error = xfs_buf_delwri_submit(&isol.buffers); | ||
793 | if (error) | ||
794 | xfs_warn(NULL, "%s: dquot reclaim failed", __func__); | ||
795 | |||
796 | while (!list_empty(&isol.dispose)) { | ||
797 | struct xfs_dquot *dqp; | ||
798 | |||
799 | dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); | ||
800 | list_del_init(&dqp->q_lru); | ||
801 | xfs_qm_dqfree_one(dqp); | ||
802 | } | ||
803 | |||
804 | return freed; | ||
805 | } | ||
806 | |||
807 | static unsigned long | ||
808 | xfs_qm_shrink_count( | ||
809 | struct shrinker *shrink, | ||
810 | struct shrink_control *sc) | ||
811 | { | ||
812 | struct xfs_quotainfo *qi = container_of(shrink, | ||
813 | struct xfs_quotainfo, qi_shrinker); | ||
814 | |||
815 | return list_lru_count_node(&qi->qi_lru, sc->nid); | ||
816 | } | ||
817 | |||
683 | /* | 818 | /* |
684 | * This initializes all the quota information that's kept in the | 819 | * This initializes all the quota information that's kept in the |
685 | * mount structure | 820 | * mount structure |
@@ -696,11 +831,18 @@ xfs_qm_init_quotainfo( | |||
696 | 831 | ||
697 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); | 832 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); |
698 | 833 | ||
834 | if ((error = list_lru_init(&qinf->qi_lru))) { | ||
835 | kmem_free(qinf); | ||
836 | mp->m_quotainfo = NULL; | ||
837 | return error; | ||
838 | } | ||
839 | |||
699 | /* | 840 | /* |
700 | * See if quotainodes are setup, and if not, allocate them, | 841 | * See if quotainodes are setup, and if not, allocate them, |
701 | * and change the superblock accordingly. | 842 | * and change the superblock accordingly. |
702 | */ | 843 | */ |
703 | if ((error = xfs_qm_init_quotainos(mp))) { | 844 | if ((error = xfs_qm_init_quotainos(mp))) { |
845 | list_lru_destroy(&qinf->qi_lru); | ||
704 | kmem_free(qinf); | 846 | kmem_free(qinf); |
705 | mp->m_quotainfo = NULL; | 847 | mp->m_quotainfo = NULL; |
706 | return error; | 848 | return error; |
@@ -711,10 +853,6 @@ xfs_qm_init_quotainfo( | |||
711 | INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); | 853 | INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); |
712 | mutex_init(&qinf->qi_tree_lock); | 854 | mutex_init(&qinf->qi_tree_lock); |
713 | 855 | ||
714 | INIT_LIST_HEAD(&qinf->qi_lru_list); | ||
715 | qinf->qi_lru_count = 0; | ||
716 | mutex_init(&qinf->qi_lru_lock); | ||
717 | |||
718 | /* mutex used to serialize quotaoffs */ | 856 | /* mutex used to serialize quotaoffs */ |
719 | mutex_init(&qinf->qi_quotaofflock); | 857 | mutex_init(&qinf->qi_quotaofflock); |
720 | 858 | ||
@@ -779,8 +917,10 @@ xfs_qm_init_quotainfo( | |||
779 | qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; | 917 | qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; |
780 | } | 918 | } |
781 | 919 | ||
782 | qinf->qi_shrinker.shrink = xfs_qm_shake; | 920 | qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; |
921 | qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; | ||
783 | qinf->qi_shrinker.seeks = DEFAULT_SEEKS; | 922 | qinf->qi_shrinker.seeks = DEFAULT_SEEKS; |
923 | qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; | ||
784 | register_shrinker(&qinf->qi_shrinker); | 924 | register_shrinker(&qinf->qi_shrinker); |
785 | return 0; | 925 | return 0; |
786 | } | 926 | } |
@@ -801,6 +941,7 @@ xfs_qm_destroy_quotainfo( | |||
801 | ASSERT(qi != NULL); | 941 | ASSERT(qi != NULL); |
802 | 942 | ||
803 | unregister_shrinker(&qi->qi_shrinker); | 943 | unregister_shrinker(&qi->qi_shrinker); |
944 | list_lru_destroy(&qi->qi_lru); | ||
804 | 945 | ||
805 | if (qi->qi_uquotaip) { | 946 | if (qi->qi_uquotaip) { |
806 | IRELE(qi->qi_uquotaip); | 947 | IRELE(qi->qi_uquotaip); |
@@ -1599,132 +1740,6 @@ xfs_qm_dqfree_one( | |||
1599 | xfs_qm_dqdestroy(dqp); | 1740 | xfs_qm_dqdestroy(dqp); |
1600 | } | 1741 | } |
1601 | 1742 | ||
1602 | STATIC void | ||
1603 | xfs_qm_dqreclaim_one( | ||
1604 | struct xfs_dquot *dqp, | ||
1605 | struct list_head *buffer_list, | ||
1606 | struct list_head *dispose_list) | ||
1607 | { | ||
1608 | struct xfs_mount *mp = dqp->q_mount; | ||
1609 | struct xfs_quotainfo *qi = mp->m_quotainfo; | ||
1610 | int error; | ||
1611 | |||
1612 | if (!xfs_dqlock_nowait(dqp)) | ||
1613 | goto out_move_tail; | ||
1614 | |||
1615 | /* | ||
1616 | * This dquot has acquired a reference in the meantime remove it from | ||
1617 | * the freelist and try again. | ||
1618 | */ | ||
1619 | if (dqp->q_nrefs) { | ||
1620 | xfs_dqunlock(dqp); | ||
1621 | |||
1622 | trace_xfs_dqreclaim_want(dqp); | ||
1623 | XFS_STATS_INC(xs_qm_dqwants); | ||
1624 | |||
1625 | list_del_init(&dqp->q_lru); | ||
1626 | qi->qi_lru_count--; | ||
1627 | XFS_STATS_DEC(xs_qm_dquot_unused); | ||
1628 | return; | ||
1629 | } | ||
1630 | |||
1631 | /* | ||
1632 | * Try to grab the flush lock. If this dquot is in the process of | ||
1633 | * getting flushed to disk, we don't want to reclaim it. | ||
1634 | */ | ||
1635 | if (!xfs_dqflock_nowait(dqp)) | ||
1636 | goto out_unlock_move_tail; | ||
1637 | |||
1638 | if (XFS_DQ_IS_DIRTY(dqp)) { | ||
1639 | struct xfs_buf *bp = NULL; | ||
1640 | |||
1641 | trace_xfs_dqreclaim_dirty(dqp); | ||
1642 | |||
1643 | error = xfs_qm_dqflush(dqp, &bp); | ||
1644 | if (error) { | ||
1645 | xfs_warn(mp, "%s: dquot %p flush failed", | ||
1646 | __func__, dqp); | ||
1647 | goto out_unlock_move_tail; | ||
1648 | } | ||
1649 | |||
1650 | xfs_buf_delwri_queue(bp, buffer_list); | ||
1651 | xfs_buf_relse(bp); | ||
1652 | /* | ||
1653 | * Give the dquot another try on the freelist, as the | ||
1654 | * flushing will take some time. | ||
1655 | */ | ||
1656 | goto out_unlock_move_tail; | ||
1657 | } | ||
1658 | xfs_dqfunlock(dqp); | ||
1659 | |||
1660 | /* | ||
1661 | * Prevent lookups now that we are past the point of no return. | ||
1662 | */ | ||
1663 | dqp->dq_flags |= XFS_DQ_FREEING; | ||
1664 | xfs_dqunlock(dqp); | ||
1665 | |||
1666 | ASSERT(dqp->q_nrefs == 0); | ||
1667 | list_move_tail(&dqp->q_lru, dispose_list); | ||
1668 | qi->qi_lru_count--; | ||
1669 | XFS_STATS_DEC(xs_qm_dquot_unused); | ||
1670 | |||
1671 | trace_xfs_dqreclaim_done(dqp); | ||
1672 | XFS_STATS_INC(xs_qm_dqreclaims); | ||
1673 | return; | ||
1674 | |||
1675 | /* | ||
1676 | * Move the dquot to the tail of the list so that we don't spin on it. | ||
1677 | */ | ||
1678 | out_unlock_move_tail: | ||
1679 | xfs_dqunlock(dqp); | ||
1680 | out_move_tail: | ||
1681 | list_move_tail(&dqp->q_lru, &qi->qi_lru_list); | ||
1682 | trace_xfs_dqreclaim_busy(dqp); | ||
1683 | XFS_STATS_INC(xs_qm_dqreclaim_misses); | ||
1684 | } | ||
1685 | |||
1686 | STATIC int | ||
1687 | xfs_qm_shake( | ||
1688 | struct shrinker *shrink, | ||
1689 | struct shrink_control *sc) | ||
1690 | { | ||
1691 | struct xfs_quotainfo *qi = | ||
1692 | container_of(shrink, struct xfs_quotainfo, qi_shrinker); | ||
1693 | int nr_to_scan = sc->nr_to_scan; | ||
1694 | LIST_HEAD (buffer_list); | ||
1695 | LIST_HEAD (dispose_list); | ||
1696 | struct xfs_dquot *dqp; | ||
1697 | int error; | ||
1698 | |||
1699 | if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) | ||
1700 | return 0; | ||
1701 | if (!nr_to_scan) | ||
1702 | goto out; | ||
1703 | |||
1704 | mutex_lock(&qi->qi_lru_lock); | ||
1705 | while (!list_empty(&qi->qi_lru_list)) { | ||
1706 | if (nr_to_scan-- <= 0) | ||
1707 | break; | ||
1708 | dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot, | ||
1709 | q_lru); | ||
1710 | xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list); | ||
1711 | } | ||
1712 | mutex_unlock(&qi->qi_lru_lock); | ||
1713 | |||
1714 | error = xfs_buf_delwri_submit(&buffer_list); | ||
1715 | if (error) | ||
1716 | xfs_warn(NULL, "%s: dquot reclaim failed", __func__); | ||
1717 | |||
1718 | while (!list_empty(&dispose_list)) { | ||
1719 | dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru); | ||
1720 | list_del_init(&dqp->q_lru); | ||
1721 | xfs_qm_dqfree_one(dqp); | ||
1722 | } | ||
1723 | |||
1724 | out: | ||
1725 | return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure; | ||
1726 | } | ||
1727 | |||
1728 | /* | 1743 | /* |
1729 | * Start a transaction and write the incore superblock changes to | 1744 | * Start a transaction and write the incore superblock changes to |
1730 | * disk. flags parameter indicates which fields have changed. | 1745 | * disk. flags parameter indicates which fields have changed. |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 670cd4464070..2b602df9c242 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -49,9 +49,7 @@ typedef struct xfs_quotainfo { | |||
49 | struct xfs_inode *qi_uquotaip; /* user quota inode */ | 49 | struct xfs_inode *qi_uquotaip; /* user quota inode */ |
50 | struct xfs_inode *qi_gquotaip; /* group quota inode */ | 50 | struct xfs_inode *qi_gquotaip; /* group quota inode */ |
51 | struct xfs_inode *qi_pquotaip; /* project quota inode */ | 51 | struct xfs_inode *qi_pquotaip; /* project quota inode */ |
52 | struct list_head qi_lru_list; | 52 | struct list_lru qi_lru; |
53 | struct mutex qi_lru_lock; | ||
54 | int qi_lru_count; | ||
55 | int qi_dquots; | 53 | int qi_dquots; |
56 | time_t qi_btimelimit; /* limit for blks timer */ | 54 | time_t qi_btimelimit; /* limit for blks timer */ |
57 | time_t qi_itimelimit; /* limit for inodes timer */ | 55 | time_t qi_itimelimit; /* limit for inodes timer */ |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 979a77d4b87d..15188cc99449 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -1535,19 +1535,21 @@ xfs_fs_mount( | |||
1535 | return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); | 1535 | return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); |
1536 | } | 1536 | } |
1537 | 1537 | ||
1538 | static int | 1538 | static long |
1539 | xfs_fs_nr_cached_objects( | 1539 | xfs_fs_nr_cached_objects( |
1540 | struct super_block *sb) | 1540 | struct super_block *sb, |
1541 | int nid) | ||
1541 | { | 1542 | { |
1542 | return xfs_reclaim_inodes_count(XFS_M(sb)); | 1543 | return xfs_reclaim_inodes_count(XFS_M(sb)); |
1543 | } | 1544 | } |
1544 | 1545 | ||
1545 | static void | 1546 | static long |
1546 | xfs_fs_free_cached_objects( | 1547 | xfs_fs_free_cached_objects( |
1547 | struct super_block *sb, | 1548 | struct super_block *sb, |
1548 | int nr_to_scan) | 1549 | long nr_to_scan, |
1550 | int nid) | ||
1549 | { | 1551 | { |
1550 | xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); | 1552 | return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); |
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | static const struct super_operations xfs_super_operations = { | 1555 | static const struct super_operations xfs_super_operations = { |