diff options
-rw-r--r-- | fs/dcache.c | 7 | ||||
-rw-r--r-- | fs/dquot.c | 7 | ||||
-rw-r--r-- | fs/inode.c | 7 | ||||
-rw-r--r-- | fs/mbcache.c | 9 | ||||
-rw-r--r-- | fs/nfs/super.c | 10 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 14 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm.c | 10 | ||||
-rw-r--r-- | include/linux/mm.h | 38 | ||||
-rw-r--r-- | mm/vmscan.c | 42 | ||||
-rw-r--r-- | net/sunrpc/auth.c | 11 |
10 files changed, 81 insertions, 74 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 0e73aa0a0e8b..cb9d05056b54 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -883,6 +883,11 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask) | |||
883 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | 883 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; |
884 | } | 884 | } |
885 | 885 | ||
886 | static struct shrinker dcache_shrinker = { | ||
887 | .shrink = shrink_dcache_memory, | ||
888 | .seeks = DEFAULT_SEEKS, | ||
889 | }; | ||
890 | |||
886 | /** | 891 | /** |
887 | * d_alloc - allocate a dcache entry | 892 | * d_alloc - allocate a dcache entry |
888 | * @parent: parent of entry to allocate | 893 | * @parent: parent of entry to allocate |
@@ -2115,7 +2120,7 @@ static void __init dcache_init(unsigned long mempages) | |||
2115 | dentry_cache = KMEM_CACHE(dentry, | 2120 | dentry_cache = KMEM_CACHE(dentry, |
2116 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); | 2121 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); |
2117 | 2122 | ||
2118 | set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory); | 2123 | register_shrinker(&dcache_shrinker); |
2119 | 2124 | ||
2120 | /* Hash may have been set up in dcache_init_early */ | 2125 | /* Hash may have been set up in dcache_init_early */ |
2121 | if (!hashdist) | 2126 | if (!hashdist) |
diff --git a/fs/dquot.c b/fs/dquot.c index 8819d281500c..7e273151f589 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | |||
538 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; | 538 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; |
539 | } | 539 | } |
540 | 540 | ||
541 | static struct shrinker dqcache_shrinker = { | ||
542 | .shrink = shrink_dqcache_memory, | ||
543 | .seeks = DEFAULT_SEEKS, | ||
544 | }; | ||
545 | |||
541 | /* | 546 | /* |
542 | * Put reference to dquot | 547 | * Put reference to dquot |
543 | * NOTE: If you change this function please check whether dqput_blocks() works right... | 548 | * NOTE: If you change this function please check whether dqput_blocks() works right... |
@@ -1870,7 +1875,7 @@ static int __init dquot_init(void) | |||
1870 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", | 1875 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", |
1871 | nr_hash, order, (PAGE_SIZE << order)); | 1876 | nr_hash, order, (PAGE_SIZE << order)); |
1872 | 1877 | ||
1873 | set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory); | 1878 | register_shrinker(&dqcache_shrinker); |
1874 | 1879 | ||
1875 | return 0; | 1880 | return 0; |
1876 | } | 1881 | } |
diff --git a/fs/inode.c b/fs/inode.c index 47b87b071de3..320e088d0b28 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -462,6 +462,11 @@ static int shrink_icache_memory(int nr, gfp_t gfp_mask) | |||
462 | return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | 462 | return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; |
463 | } | 463 | } |
464 | 464 | ||
465 | static struct shrinker icache_shrinker = { | ||
466 | .shrink = shrink_icache_memory, | ||
467 | .seeks = DEFAULT_SEEKS, | ||
468 | }; | ||
469 | |||
465 | static void __wait_on_freeing_inode(struct inode *inode); | 470 | static void __wait_on_freeing_inode(struct inode *inode); |
466 | /* | 471 | /* |
467 | * Called with the inode lock held. | 472 | * Called with the inode lock held. |
@@ -1385,7 +1390,7 @@ void __init inode_init(unsigned long mempages) | |||
1385 | SLAB_MEM_SPREAD), | 1390 | SLAB_MEM_SPREAD), |
1386 | init_once, | 1391 | init_once, |
1387 | NULL); | 1392 | NULL); |
1388 | set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); | 1393 | register_shrinker(&icache_shrinker); |
1389 | 1394 | ||
1390 | /* Hash may have been set up in inode_init_early */ | 1395 | /* Hash may have been set up in inode_init_early */ |
1391 | if (!hashdist) | 1396 | if (!hashdist) |
diff --git a/fs/mbcache.c b/fs/mbcache.c index deeb9dc062d9..fbb1d02f8791 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -100,7 +100,6 @@ struct mb_cache { | |||
100 | static LIST_HEAD(mb_cache_list); | 100 | static LIST_HEAD(mb_cache_list); |
101 | static LIST_HEAD(mb_cache_lru_list); | 101 | static LIST_HEAD(mb_cache_lru_list); |
102 | static DEFINE_SPINLOCK(mb_cache_spinlock); | 102 | static DEFINE_SPINLOCK(mb_cache_spinlock); |
103 | static struct shrinker *mb_shrinker; | ||
104 | 103 | ||
105 | static inline int | 104 | static inline int |
106 | mb_cache_indexes(struct mb_cache *cache) | 105 | mb_cache_indexes(struct mb_cache *cache) |
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache) | |||
118 | 117 | ||
119 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); | 118 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); |
120 | 119 | ||
120 | static struct shrinker mb_cache_shrinker = { | ||
121 | .shrink = mb_cache_shrink_fn, | ||
122 | .seeks = DEFAULT_SEEKS, | ||
123 | }; | ||
121 | 124 | ||
122 | static inline int | 125 | static inline int |
123 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) | 126 | __mb_cache_entry_is_hashed(struct mb_cache_entry *ce) |
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index, | |||
662 | 665 | ||
663 | static int __init init_mbcache(void) | 666 | static int __init init_mbcache(void) |
664 | { | 667 | { |
665 | mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn); | 668 | register_shrinker(&mb_cache_shrinker); |
666 | return 0; | 669 | return 0; |
667 | } | 670 | } |
668 | 671 | ||
669 | static void __exit exit_mbcache(void) | 672 | static void __exit exit_mbcache(void) |
670 | { | 673 | { |
671 | remove_shrinker(mb_shrinker); | 674 | unregister_shrinker(&mb_cache_shrinker); |
672 | } | 675 | } |
673 | 676 | ||
674 | module_init(init_mbcache) | 677 | module_init(init_mbcache) |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index a2b1af89ca1a..adffe1615c51 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -300,7 +300,10 @@ static const struct super_operations nfs4_sops = { | |||
300 | }; | 300 | }; |
301 | #endif | 301 | #endif |
302 | 302 | ||
303 | static struct shrinker *acl_shrinker; | 303 | static struct shrinker acl_shrinker = { |
304 | .shrink = nfs_access_cache_shrinker, | ||
305 | .seeks = DEFAULT_SEEKS, | ||
306 | }; | ||
304 | 307 | ||
305 | /* | 308 | /* |
306 | * Register the NFS filesystems | 309 | * Register the NFS filesystems |
@@ -321,7 +324,7 @@ int __init register_nfs_fs(void) | |||
321 | if (ret < 0) | 324 | if (ret < 0) |
322 | goto error_2; | 325 | goto error_2; |
323 | #endif | 326 | #endif |
324 | acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker); | 327 | register_shrinker(&acl_shrinker); |
325 | return 0; | 328 | return 0; |
326 | 329 | ||
327 | #ifdef CONFIG_NFS_V4 | 330 | #ifdef CONFIG_NFS_V4 |
@@ -339,8 +342,7 @@ error_0: | |||
339 | */ | 342 | */ |
340 | void __exit unregister_nfs_fs(void) | 343 | void __exit unregister_nfs_fs(void) |
341 | { | 344 | { |
342 | if (acl_shrinker != NULL) | 345 | unregister_shrinker(&acl_shrinker); |
343 | remove_shrinker(acl_shrinker); | ||
344 | #ifdef CONFIG_NFS_V4 | 346 | #ifdef CONFIG_NFS_V4 |
345 | unregister_filesystem(&nfs4_fs_type); | 347 | unregister_filesystem(&nfs4_fs_type); |
346 | nfs_unregister_sysctl(); | 348 | nfs_unregister_sysctl(); |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 2df63622354e..b0f0e58866de 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -35,10 +35,13 @@ | |||
35 | #include <linux/freezer.h> | 35 | #include <linux/freezer.h> |
36 | 36 | ||
37 | static kmem_zone_t *xfs_buf_zone; | 37 | static kmem_zone_t *xfs_buf_zone; |
38 | static struct shrinker *xfs_buf_shake; | ||
39 | STATIC int xfsbufd(void *); | 38 | STATIC int xfsbufd(void *); |
40 | STATIC int xfsbufd_wakeup(int, gfp_t); | 39 | STATIC int xfsbufd_wakeup(int, gfp_t); |
41 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); | 40 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); |
41 | static struct shrinker xfs_buf_shake = { | ||
42 | .shrink = xfsbufd_wakeup, | ||
43 | .seeks = DEFAULT_SEEKS, | ||
44 | }; | ||
42 | 45 | ||
43 | static struct workqueue_struct *xfslogd_workqueue; | 46 | static struct workqueue_struct *xfslogd_workqueue; |
44 | struct workqueue_struct *xfsdatad_workqueue; | 47 | struct workqueue_struct *xfsdatad_workqueue; |
@@ -1832,14 +1835,9 @@ xfs_buf_init(void) | |||
1832 | if (!xfsdatad_workqueue) | 1835 | if (!xfsdatad_workqueue) |
1833 | goto out_destroy_xfslogd_workqueue; | 1836 | goto out_destroy_xfslogd_workqueue; |
1834 | 1837 | ||
1835 | xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup); | 1838 | register_shrinker(&xfs_buf_shake); |
1836 | if (!xfs_buf_shake) | ||
1837 | goto out_destroy_xfsdatad_workqueue; | ||
1838 | |||
1839 | return 0; | 1839 | return 0; |
1840 | 1840 | ||
1841 | out_destroy_xfsdatad_workqueue: | ||
1842 | destroy_workqueue(xfsdatad_workqueue); | ||
1843 | out_destroy_xfslogd_workqueue: | 1841 | out_destroy_xfslogd_workqueue: |
1844 | destroy_workqueue(xfslogd_workqueue); | 1842 | destroy_workqueue(xfslogd_workqueue); |
1845 | out_free_buf_zone: | 1843 | out_free_buf_zone: |
@@ -1854,7 +1852,7 @@ xfs_buf_init(void) | |||
1854 | void | 1852 | void |
1855 | xfs_buf_terminate(void) | 1853 | xfs_buf_terminate(void) |
1856 | { | 1854 | { |
1857 | remove_shrinker(xfs_buf_shake); | 1855 | unregister_shrinker(&xfs_buf_shake); |
1858 | destroy_workqueue(xfsdatad_workqueue); | 1856 | destroy_workqueue(xfsdatad_workqueue); |
1859 | destroy_workqueue(xfslogd_workqueue); | 1857 | destroy_workqueue(xfslogd_workqueue); |
1860 | kmem_zone_destroy(xfs_buf_zone); | 1858 | kmem_zone_destroy(xfs_buf_zone); |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 7def4c699343..2d274b23ade5 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -62,7 +62,6 @@ uint ndquot; | |||
62 | 62 | ||
63 | kmem_zone_t *qm_dqzone; | 63 | kmem_zone_t *qm_dqzone; |
64 | kmem_zone_t *qm_dqtrxzone; | 64 | kmem_zone_t *qm_dqtrxzone; |
65 | static struct shrinker *xfs_qm_shaker; | ||
66 | 65 | ||
67 | static cred_t xfs_zerocr; | 66 | static cred_t xfs_zerocr; |
68 | 67 | ||
@@ -78,6 +77,11 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | |||
78 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 77 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
79 | STATIC int xfs_qm_shake(int, gfp_t); | 78 | STATIC int xfs_qm_shake(int, gfp_t); |
80 | 79 | ||
80 | static struct shrinker xfs_qm_shaker = { | ||
81 | .shrink = xfs_qm_shake, | ||
82 | .seeks = DEFAULT_SEEKS, | ||
83 | }; | ||
84 | |||
81 | #ifdef DEBUG | 85 | #ifdef DEBUG |
82 | extern mutex_t qcheck_lock; | 86 | extern mutex_t qcheck_lock; |
83 | #endif | 87 | #endif |
@@ -149,7 +153,7 @@ xfs_Gqm_init(void) | |||
149 | } else | 153 | } else |
150 | xqm->qm_dqzone = qm_dqzone; | 154 | xqm->qm_dqzone = qm_dqzone; |
151 | 155 | ||
152 | xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake); | 156 | register_shrinker(&xfs_qm_shaker); |
153 | 157 | ||
154 | /* | 158 | /* |
155 | * The t_dqinfo portion of transactions. | 159 | * The t_dqinfo portion of transactions. |
@@ -181,7 +185,7 @@ xfs_qm_destroy( | |||
181 | 185 | ||
182 | ASSERT(xqm != NULL); | 186 | ASSERT(xqm != NULL); |
183 | ASSERT(xqm->qm_nrefs == 0); | 187 | ASSERT(xqm->qm_nrefs == 0); |
184 | remove_shrinker(xfs_qm_shaker); | 188 | unregister_shrinker(&xfs_qm_shaker); |
185 | hsize = xqm->qm_dqhashmask + 1; | 189 | hsize = xqm->qm_dqhashmask + 1; |
186 | for (i = 0; i < hsize; i++) { | 190 | for (i = 0; i < hsize; i++) { |
187 | xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); | 191 | xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 97d0cddfd223..4c482a3ee870 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -810,27 +810,31 @@ extern unsigned long do_mremap(unsigned long addr, | |||
810 | unsigned long flags, unsigned long new_addr); | 810 | unsigned long flags, unsigned long new_addr); |
811 | 811 | ||
812 | /* | 812 | /* |
813 | * Prototype to add a shrinker callback for ageable caches. | 813 | * A callback you can register to apply pressure to ageable caches. |
814 | * | ||
815 | * These functions are passed a count `nr_to_scan' and a gfpmask. They should | ||
816 | * scan `nr_to_scan' objects, attempting to free them. | ||
817 | * | 814 | * |
818 | * The callback must return the number of objects which remain in the cache. | 815 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
816 | * look through the least-recently-used 'nr_to_scan' entries and | ||
817 | * attempt to free them up. It should return the number of objects | ||
818 | * which remain in the cache. If it returns -1, it means it cannot do | ||
819 | * any scanning at this time (eg. there is a risk of deadlock). | ||
819 | * | 820 | * |
820 | * The callback will be passed nr_to_scan == 0 when the VM is querying the | 821 | * The 'gfpmask' refers to the allocation we are currently trying to |
821 | * cache size, so a fastpath for that case is appropriate. | 822 | * fulfil. |
822 | */ | 823 | * |
823 | typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); | 824 | * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is |
824 | 825 | * querying the cache size, so a fastpath for that case is appropriate. | |
825 | /* | ||
826 | * Add an aging callback. The int is the number of 'seeks' it takes | ||
827 | * to recreate one of the objects that these functions age. | ||
828 | */ | 826 | */ |
827 | struct shrinker { | ||
828 | int (*shrink)(int nr_to_scan, gfp_t gfp_mask); | ||
829 | int seeks; /* seeks to recreate an obj */ | ||
829 | 830 | ||
830 | #define DEFAULT_SEEKS 2 | 831 | /* These are for internal use */ |
831 | struct shrinker; | 832 | struct list_head list; |
832 | extern struct shrinker *set_shrinker(int, shrinker_t); | 833 | long nr; /* objs pending delete */ |
833 | extern void remove_shrinker(struct shrinker *shrinker); | 834 | }; |
835 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ | ||
836 | extern void register_shrinker(struct shrinker *); | ||
837 | extern void unregister_shrinker(struct shrinker *); | ||
834 | 838 | ||
835 | /* | 839 | /* |
836 | * Some shared mappigns will want the pages marked read-only | 840 | * Some shared mappigns will want the pages marked read-only |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 1d9971d8924b..2225b7c9df85 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -70,17 +70,6 @@ struct scan_control { | |||
70 | int order; | 70 | int order; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | /* | ||
74 | * The list of shrinker callbacks used by to apply pressure to | ||
75 | * ageable caches. | ||
76 | */ | ||
77 | struct shrinker { | ||
78 | shrinker_t shrinker; | ||
79 | struct list_head list; | ||
80 | int seeks; /* seeks to recreate an obj */ | ||
81 | long nr; /* objs pending delete */ | ||
82 | }; | ||
83 | |||
84 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) | 73 | #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) |
85 | 74 | ||
86 | #ifdef ARCH_HAS_PREFETCH | 75 | #ifdef ARCH_HAS_PREFETCH |
@@ -123,34 +112,25 @@ static DECLARE_RWSEM(shrinker_rwsem); | |||
123 | /* | 112 | /* |
124 | * Add a shrinker callback to be called from the vm | 113 | * Add a shrinker callback to be called from the vm |
125 | */ | 114 | */ |
126 | struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) | 115 | void register_shrinker(struct shrinker *shrinker) |
127 | { | 116 | { |
128 | struct shrinker *shrinker; | 117 | shrinker->nr = 0; |
129 | 118 | down_write(&shrinker_rwsem); | |
130 | shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); | 119 | list_add_tail(&shrinker->list, &shrinker_list); |
131 | if (shrinker) { | 120 | up_write(&shrinker_rwsem); |
132 | shrinker->shrinker = theshrinker; | ||
133 | shrinker->seeks = seeks; | ||
134 | shrinker->nr = 0; | ||
135 | down_write(&shrinker_rwsem); | ||
136 | list_add_tail(&shrinker->list, &shrinker_list); | ||
137 | up_write(&shrinker_rwsem); | ||
138 | } | ||
139 | return shrinker; | ||
140 | } | 121 | } |
141 | EXPORT_SYMBOL(set_shrinker); | 122 | EXPORT_SYMBOL(register_shrinker); |
142 | 123 | ||
143 | /* | 124 | /* |
144 | * Remove one | 125 | * Remove one |
145 | */ | 126 | */ |
146 | void remove_shrinker(struct shrinker *shrinker) | 127 | void unregister_shrinker(struct shrinker *shrinker) |
147 | { | 128 | { |
148 | down_write(&shrinker_rwsem); | 129 | down_write(&shrinker_rwsem); |
149 | list_del(&shrinker->list); | 130 | list_del(&shrinker->list); |
150 | up_write(&shrinker_rwsem); | 131 | up_write(&shrinker_rwsem); |
151 | kfree(shrinker); | ||
152 | } | 132 | } |
153 | EXPORT_SYMBOL(remove_shrinker); | 133 | EXPORT_SYMBOL(unregister_shrinker); |
154 | 134 | ||
155 | #define SHRINK_BATCH 128 | 135 | #define SHRINK_BATCH 128 |
156 | /* | 136 | /* |
@@ -187,7 +167,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
187 | list_for_each_entry(shrinker, &shrinker_list, list) { | 167 | list_for_each_entry(shrinker, &shrinker_list, list) { |
188 | unsigned long long delta; | 168 | unsigned long long delta; |
189 | unsigned long total_scan; | 169 | unsigned long total_scan; |
190 | unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); | 170 | unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); |
191 | 171 | ||
192 | delta = (4 * scanned) / shrinker->seeks; | 172 | delta = (4 * scanned) / shrinker->seeks; |
193 | delta *= max_pass; | 173 | delta *= max_pass; |
@@ -215,8 +195,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
215 | int shrink_ret; | 195 | int shrink_ret; |
216 | int nr_before; | 196 | int nr_before; |
217 | 197 | ||
218 | nr_before = (*shrinker->shrinker)(0, gfp_mask); | 198 | nr_before = (*shrinker->shrink)(0, gfp_mask); |
219 | shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); | 199 | shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); |
220 | if (shrink_ret == -1) | 200 | if (shrink_ret == -1) |
221 | break; | 201 | break; |
222 | if (shrink_ret < nr_before) | 202 | if (shrink_ret < nr_before) |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index aa55d0a03e6f..29a8ecc60928 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -543,17 +543,18 @@ rpcauth_uptodatecred(struct rpc_task *task) | |||
543 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; | 543 | test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; |
544 | } | 544 | } |
545 | 545 | ||
546 | 546 | static struct shrinker rpc_cred_shrinker = { | |
547 | static struct shrinker *rpc_cred_shrinker; | 547 | .shrink = rpcauth_cache_shrinker, |
548 | .seeks = DEFAULT_SEEKS, | ||
549 | }; | ||
548 | 550 | ||
549 | void __init rpcauth_init_module(void) | 551 | void __init rpcauth_init_module(void) |
550 | { | 552 | { |
551 | rpc_init_authunix(); | 553 | rpc_init_authunix(); |
552 | rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); | 554 | register_shrinker(&rpc_cred_shrinker); |
553 | } | 555 | } |
554 | 556 | ||
555 | void __exit rpcauth_remove_module(void) | 557 | void __exit rpcauth_remove_module(void) |
556 | { | 558 | { |
557 | if (rpc_cred_shrinker != NULL) | 559 | unregister_shrinker(&rpc_cred_shrinker); |
558 | remove_shrinker(rpc_cred_shrinker); | ||
559 | } | 560 | } |