diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2011-12-30 10:59:37 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2011-12-30 10:59:37 -0500 |
commit | b4d48c942c17ce3d3a330ad91e109e522bc97378 (patch) | |
tree | 3365292f3a5a502edb51492d011fd326c930ca40 /mm/vmscan.c | |
parent | 1a5cd29631a6b75e49e6ad8a770ab9d69cda0fa2 (diff) | |
parent | 5f0a6e2d503896062f641639dacfe5055c2f593b (diff) |
Merge tag 'v3.2-rc7' into staging/for_v3.3
Linux 3.2-rc7
* tag 'v3.2-rc7': (1304 commits)
Linux 3.2-rc7
netfilter: xt_connbytes: handle negation correctly
Btrfs: call d_instantiate after all ops are setup
Btrfs: fix worker lock misuse in find_worker
net: relax rcvbuf limits
rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt()
net: introduce DST_NOPEER dst flag
mqprio: Avoid panic if no options are provided
bridge: provide a mtu() method for fake_dst_ops
md/bitmap: It is OK to clear bits during recovery.
md: don't give up looking for spares on first failure-to-add
md/raid5: ensure correct assessment of drives during degraded reshape.
md/linear: fix hot-add of devices to linear arrays.
sparc64: Fix MSIQ HV call ordering in pci_sun4v_msiq_build_irq().
pata_of_platform: Add missing CONFIG_OF_IRQ dependency.
ipv4: using prefetch requires including prefetch.h
VFS: Fix race between CPU hotplug and lglocks
vfs: __read_cache_page should use gfp argument rather than GFP_KERNEL
USB: Fix usb/isp1760 build on sparc
net: Add a flow_cache_flush_deferred function
...
Conflicts:
drivers/media/common/tuners/tda18218.c
drivers/media/video/omap3isp/ispccdc.c
drivers/staging/media/as102/as102_drv.h
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index a1893c050795..f54a05b7a61d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone, | |||
183 | */ | 183 | */ |
184 | void register_shrinker(struct shrinker *shrinker) | 184 | void register_shrinker(struct shrinker *shrinker) |
185 | { | 185 | { |
186 | shrinker->nr = 0; | 186 | atomic_long_set(&shrinker->nr_in_batch, 0); |
187 | down_write(&shrinker_rwsem); | 187 | down_write(&shrinker_rwsem); |
188 | list_add_tail(&shrinker->list, &shrinker_list); | 188 | list_add_tail(&shrinker->list, &shrinker_list); |
189 | up_write(&shrinker_rwsem); | 189 | up_write(&shrinker_rwsem); |
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink, | |||
247 | 247 | ||
248 | list_for_each_entry(shrinker, &shrinker_list, list) { | 248 | list_for_each_entry(shrinker, &shrinker_list, list) { |
249 | unsigned long long delta; | 249 | unsigned long long delta; |
250 | unsigned long total_scan; | 250 | long total_scan; |
251 | unsigned long max_pass; | 251 | long max_pass; |
252 | int shrink_ret = 0; | 252 | int shrink_ret = 0; |
253 | long nr; | 253 | long nr; |
254 | long new_nr; | 254 | long new_nr; |
255 | long batch_size = shrinker->batch ? shrinker->batch | 255 | long batch_size = shrinker->batch ? shrinker->batch |
256 | : SHRINK_BATCH; | 256 | : SHRINK_BATCH; |
257 | 257 | ||
258 | max_pass = do_shrinker_shrink(shrinker, shrink, 0); | ||
259 | if (max_pass <= 0) | ||
260 | continue; | ||
261 | |||
258 | /* | 262 | /* |
259 | * copy the current shrinker scan count into a local variable | 263 | * copy the current shrinker scan count into a local variable |
260 | * and zero it so that other concurrent shrinker invocations | 264 | * and zero it so that other concurrent shrinker invocations |
261 | * don't also do this scanning work. | 265 | * don't also do this scanning work. |
262 | */ | 266 | */ |
263 | do { | 267 | nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); |
264 | nr = shrinker->nr; | ||
265 | } while (cmpxchg(&shrinker->nr, nr, 0) != nr); | ||
266 | 268 | ||
267 | total_scan = nr; | 269 | total_scan = nr; |
268 | max_pass = do_shrinker_shrink(shrinker, shrink, 0); | ||
269 | delta = (4 * nr_pages_scanned) / shrinker->seeks; | 270 | delta = (4 * nr_pages_scanned) / shrinker->seeks; |
270 | delta *= max_pass; | 271 | delta *= max_pass; |
271 | do_div(delta, lru_pages + 1); | 272 | do_div(delta, lru_pages + 1); |
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink, | |||
325 | * manner that handles concurrent updates. If we exhausted the | 326 | * manner that handles concurrent updates. If we exhausted the |
326 | * scan, there is no need to do an update. | 327 | * scan, there is no need to do an update. |
327 | */ | 328 | */ |
328 | do { | 329 | if (total_scan > 0) |
329 | nr = shrinker->nr; | 330 | new_nr = atomic_long_add_return(total_scan, |
330 | new_nr = total_scan + nr; | 331 | &shrinker->nr_in_batch); |
331 | if (total_scan <= 0) | 332 | else |
332 | break; | 333 | new_nr = atomic_long_read(&shrinker->nr_in_batch); |
333 | } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr); | ||
334 | 334 | ||
335 | trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); | 335 | trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); |
336 | } | 336 | } |