diff options
author | Tejun Heo <tj@kernel.org> | 2010-12-14 10:21:17 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-12-15 04:56:11 -0500 |
commit | afe2c511fb2d75f1515081ff1be15bd79cfe722d (patch) | |
tree | 28aa74e9e0c654a95bf3306101e10ac1d16919d1 /mm | |
parent | 2d64672ed38721b7a3815009d79bfb90a1f34a17 (diff) |
workqueue: convert cancel_rearming_delayed_work[queue]() users to cancel_delayed_work_sync()
cancel_rearming_delayed_work[queue]() has been superceded by
cancel_delayed_work_sync() quite some time ago. Convert all the
in-kernel users. The conversions are completely equivalent and
trivial.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "David S. Miller" <davem@davemloft.net>
Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Acked-by: Evgeniy Polyakov <zbr@ioremap.net>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mauro Carvalho Chehab <mchehab@infradead.org>
Cc: netdev@vger.kernel.org
Cc: Anton Vorontsov <cbou@mail.ru>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: xfs-masters@oss.sgi.com
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: netfilter-devel@vger.kernel.org
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: linux-nfs@vger.kernel.org
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
2 files changed, 2 insertions, 2 deletions
@@ -1293,7 +1293,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1293 | * anything expensive but will only modify reap_work | 1293 | * anything expensive but will only modify reap_work |
1294 | * and reschedule the timer. | 1294 | * and reschedule the timer. |
1295 | */ | 1295 | */ |
1296 | cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); | 1296 | cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); |
1297 | /* Now the cache_reaper is guaranteed to be not running. */ | 1297 | /* Now the cache_reaper is guaranteed to be not running. */ |
1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; | 1298 | per_cpu(slab_reap_work, cpu).work.func = NULL; |
1299 | break; | 1299 | break; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 42eac4d33216..d1f3cb63a8a9 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1033,7 +1033,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |||
1033 | break; | 1033 | break; |
1034 | case CPU_DOWN_PREPARE: | 1034 | case CPU_DOWN_PREPARE: |
1035 | case CPU_DOWN_PREPARE_FROZEN: | 1035 | case CPU_DOWN_PREPARE_FROZEN: |
1036 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); | 1036 | cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); |
1037 | per_cpu(vmstat_work, cpu).work.func = NULL; | 1037 | per_cpu(vmstat_work, cpu).work.func = NULL; |
1038 | break; | 1038 | break; |
1039 | case CPU_DOWN_FAILED: | 1039 | case CPU_DOWN_FAILED: |