aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--mm/vmscan.c17
4 files changed, 10 insertions, 12 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 019dc558df1a..e0bc4ffb8e7f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
393#include <linux/semaphore.h> 393#include <linux/semaphore.h>
394#include <linux/fiemap.h> 394#include <linux/fiemap.h>
395#include <linux/rculist_bl.h> 395#include <linux/rculist_bl.h>
396#include <linux/shrinker.h>
397#include <linux/atomic.h> 396#include <linux/atomic.h>
397#include <linux/shrinker.h>
398 398
399#include <asm/byteorder.h> 399#include <asm/byteorder.h>
400 400
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c485..4baadd18f4ad 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
10#include <linux/mmzone.h> 10#include <linux/mmzone.h>
11#include <linux/rbtree.h> 11#include <linux/rbtree.h>
12#include <linux/prio_tree.h> 12#include <linux/prio_tree.h>
13#include <linux/atomic.h>
13#include <linux/debug_locks.h> 14#include <linux/debug_locks.h>
14#include <linux/mm_types.h> 15#include <linux/mm_types.h>
15#include <linux/range.h> 16#include <linux/range.h>
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a2..07ceb97d53fa 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
35 35
36 /* These are for internal use */ 36 /* These are for internal use */
37 struct list_head list; 37 struct list_head list;
38 long nr; /* objs pending delete */ 38 atomic_long_t nr_in_batch; /* objs pending delete */
39}; 39};
40#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 40#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
41extern void register_shrinker(struct shrinker *); 41extern void register_shrinker(struct shrinker *);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f5255442ae2b..f54a05b7a61d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
183 */ 183 */
184void register_shrinker(struct shrinker *shrinker) 184void register_shrinker(struct shrinker *shrinker)
185{ 185{
186 shrinker->nr = 0; 186 atomic_long_set(&shrinker->nr_in_batch, 0);
187 down_write(&shrinker_rwsem); 187 down_write(&shrinker_rwsem);
188 list_add_tail(&shrinker->list, &shrinker_list); 188 list_add_tail(&shrinker->list, &shrinker_list);
189 up_write(&shrinker_rwsem); 189 up_write(&shrinker_rwsem);
@@ -264,9 +264,7 @@ unsigned long shrink_slab(struct shrink_control *shrink,
264 * and zero it so that other concurrent shrinker invocations 264 * and zero it so that other concurrent shrinker invocations
265 * don't also do this scanning work. 265 * don't also do this scanning work.
266 */ 266 */
267 do { 267 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
268 nr = shrinker->nr;
269 } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
270 268
271 total_scan = nr; 269 total_scan = nr;
272 delta = (4 * nr_pages_scanned) / shrinker->seeks; 270 delta = (4 * nr_pages_scanned) / shrinker->seeks;
@@ -328,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
328 * manner that handles concurrent updates. If we exhausted the 326 * manner that handles concurrent updates. If we exhausted the
329 * scan, there is no need to do an update. 327 * scan, there is no need to do an update.
330 */ 328 */
331 do { 329 if (total_scan > 0)
332 nr = shrinker->nr; 330 new_nr = atomic_long_add_return(total_scan,
333 new_nr = total_scan + nr; 331 &shrinker->nr_in_batch);
334 if (total_scan <= 0) 332 else
335 break; 333 new_nr = atomic_long_read(&shrinker->nr_in_batch);
336 } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
337 334
338 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 335 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
339 } 336 }