aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--fs/aio.c4
-rw-r--r--include/linux/workqueue.h21
-rw-r--r--kernel/workqueue.c36
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c2
9 files changed, 41 insertions, 40 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c059767c552c..df506571ed60 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3633,7 +3633,7 @@ EXPORT_SYMBOL(kblockd_schedule_work);
3633 3633
3634void kblockd_flush_work(struct work_struct *work) 3634void kblockd_flush_work(struct work_struct *work)
3635{ 3635{
3636 flush_work(kblockd_workqueue, work); 3636 cancel_work_sync(work);
3637} 3637}
3638EXPORT_SYMBOL(kblockd_flush_work); 3638EXPORT_SYMBOL(kblockd_flush_work);
3639 3639
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b74e56caba6f..fef87dd70d17 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
1316 spin_unlock_irqrestore(ap->lock, flags); 1316 spin_unlock_irqrestore(ap->lock, flags);
1317 1317
1318 DPRINTK("flush #1\n"); 1318 DPRINTK("flush #1\n");
1319 flush_work(ata_wq, &ap->port_task.work); /* akpm: seems unneeded */ 1319 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1320 1320
1321 /* 1321 /*
1322 * At this point, if a task is running, it's guaranteed to see 1322 * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
1327 if (ata_msg_ctl(ap)) 1327 if (ata_msg_ctl(ap))
1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329 __FUNCTION__); 1329 __FUNCTION__);
1330 flush_work(ata_wq, &ap->port_task.work); 1330 cancel_work_sync(&ap->port_task.work);
1331 } 1331 }
1332 1332
1333 spin_lock_irqsave(ap->lock, flags); 1333 spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
6475 /* Flush hotplug task. The sequence is similar to 6475 /* Flush hotplug task. The sequence is similar to
6476 * ata_port_flush_task(). 6476 * ata_port_flush_task().
6477 */ 6477 */
6478 flush_work(ata_aux_wq, &ap->hotplug_task.work); /* akpm: why? */ 6478 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6479 cancel_delayed_work(&ap->hotplug_task); 6479 cancel_delayed_work(&ap->hotplug_task);
6480 flush_work(ata_aux_wq, &ap->hotplug_task.work); 6480 cancel_work_sync(&ap->hotplug_task.work);
6481 6481
6482 skip_eh: 6482 skip_eh:
6483 /* remove the associated SCSI host */ 6483 /* remove the associated SCSI host */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 397e25bdbfec..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_work_keventd(&adapter->reset_task); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f445c465b14e..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -663,9 +663,9 @@ int phy_stop_interrupts(struct phy_device *phydev)
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled to be called 665 * Finish any pending work; we might have been scheduled to be called
666 * from keventd ourselves, but flush_work_keventd() handles that. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 flush_work_keventd(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 0c0f9c817321..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7386,7 +7386,7 @@ static int tg3_close(struct net_device *dev)
7386{ 7386{
7387 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7388 7388
7389 flush_work_keventd(&tp->reset_task); 7389 cancel_work_sync(&tp->reset_task);
7390 7390
7391 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7392 7392
diff --git a/fs/aio.c b/fs/aio.c
index d18690bb03e9..ac1c1587aa02 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -348,7 +348,7 @@ void fastcall exit_aio(struct mm_struct *mm)
348 /* 348 /*
349 * Ensure we don't leave the ctx on the aio_wq 349 * Ensure we don't leave the ctx on the aio_wq
350 */ 350 */
351 flush_work(aio_wq, &ctx->wq.work); 351 cancel_work_sync(&ctx->wq.work);
352 352
353 if (1 != atomic_read(&ctx->users)) 353 if (1 != atomic_read(&ctx->users))
354 printk(KERN_DEBUG 354 printk(KERN_DEBUG
@@ -371,7 +371,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
371 BUG_ON(ctx->reqs_active); 371 BUG_ON(ctx->reqs_active);
372 372
373 cancel_delayed_work(&ctx->wq); 373 cancel_delayed_work(&ctx->wq);
374 flush_work(aio_wq, &ctx->wq.work); 374 cancel_work_sync(&ctx->wq.work);
375 aio_free_ring(ctx); 375 aio_free_ring(ctx);
376 mmdrop(ctx->mm); 376 mmdrop(ctx->mm);
377 ctx->mm = NULL; 377 ctx->mm = NULL;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e1581dce5890..d555f31c0746 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -128,30 +128,33 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
128extern void destroy_workqueue(struct workqueue_struct *wq); 128extern void destroy_workqueue(struct workqueue_struct *wq);
129 129
130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 130extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); 131extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
132 struct delayed_work *work, unsigned long delay));
132extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 133extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
133 struct delayed_work *work, unsigned long delay); 134 struct delayed_work *work, unsigned long delay);
135
134extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 136extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
135extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); 137extern void flush_scheduled_work(void);
136extern void flush_work_keventd(struct work_struct *work);
137 138
138extern int FASTCALL(schedule_work(struct work_struct *work)); 139extern int FASTCALL(schedule_work(struct work_struct *work));
139extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 140extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
140 141 unsigned long delay));
141extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 142extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
143 unsigned long delay);
142extern int schedule_on_each_cpu(work_func_t func); 144extern int schedule_on_each_cpu(work_func_t func);
143extern void flush_scheduled_work(void);
144extern int current_is_keventd(void); 145extern int current_is_keventd(void);
145extern int keventd_up(void); 146extern int keventd_up(void);
146 147
147extern void init_workqueues(void); 148extern void init_workqueues(void);
148int execute_in_process_context(work_func_t fn, struct execute_work *); 149int execute_in_process_context(work_func_t fn, struct execute_work *);
149 150
151extern void cancel_work_sync(struct work_struct *work);
152
150/* 153/*
151 * Kill off a pending schedule_delayed_work(). Note that the work callback 154 * Kill off a pending schedule_delayed_work(). Note that the work callback
152 * function may still be running on return from cancel_delayed_work(), unless 155 * function may still be running on return from cancel_delayed_work(), unless
153 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 156 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
154 * flush_work() or cancel_work_sync() to wait on it. 157 * cancel_work_sync() to wait on it.
155 */ 158 */
156static inline int cancel_delayed_work(struct delayed_work *work) 159static inline int cancel_delayed_work(struct delayed_work *work)
157{ 160{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 63885abf1ba0..c9ab4293904f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
413} 413}
414 414
415/** 415/**
416 * flush_work - block until a work_struct's callback has terminated 416 * cancel_work_sync - block until a work_struct's callback has terminated
417 * @wq: the workqueue on which the work is queued
418 * @work: the work which is to be flushed 417 * @work: the work which is to be flushed
419 * 418 *
420 * flush_work() will attempt to cancel the work if it is queued. If the work's 419 * cancel_work_sync() will attempt to cancel the work if it is queued. If the
421 * callback appears to be running, flush_work() will block until it has 420 * work's callback appears to be running, cancel_work_sync() will block until
422 * completed. 421 * it has completed.
423 * 422 *
424 * flush_work() is designed to be used when the caller is tearing down data 423 * cancel_work_sync() is designed to be used when the caller is tearing down
425 * structures which the callback function operates upon. It is expected that, 424 * data structures which the callback function operates upon. It is expected
426 * prior to calling flush_work(), the caller has arranged for the work to not 425 * that, prior to calling cancel_work_sync(), the caller has arranged for the
427 * be requeued. 426 * work to not be requeued.
428 */ 427 */
429void flush_work(struct workqueue_struct *wq, struct work_struct *work) 428void cancel_work_sync(struct work_struct *work)
430{ 429{
431 const cpumask_t *cpu_map = wq_cpu_map(wq);
432 struct cpu_workqueue_struct *cwq; 430 struct cpu_workqueue_struct *cwq;
431 struct workqueue_struct *wq;
432 const cpumask_t *cpu_map;
433 int cpu; 433 int cpu;
434 434
435 might_sleep(); 435 might_sleep();
@@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
448 work_clear_pending(work); 448 work_clear_pending(work);
449 spin_unlock_irq(&cwq->lock); 449 spin_unlock_irq(&cwq->lock);
450 450
451 wq = cwq->wq;
452 cpu_map = wq_cpu_map(wq);
453
451 for_each_cpu_mask(cpu, *cpu_map) 454 for_each_cpu_mask(cpu, *cpu_map)
452 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 455 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
453} 456}
454EXPORT_SYMBOL_GPL(flush_work); 457EXPORT_SYMBOL_GPL(cancel_work_sync);
455 458
456 459
457static struct workqueue_struct *keventd_wq; 460static struct workqueue_struct *keventd_wq;
@@ -540,18 +543,13 @@ void flush_scheduled_work(void)
540} 543}
541EXPORT_SYMBOL(flush_scheduled_work); 544EXPORT_SYMBOL(flush_scheduled_work);
542 545
543void flush_work_keventd(struct work_struct *work)
544{
545 flush_work(keventd_wq, work);
546}
547EXPORT_SYMBOL(flush_work_keventd);
548
549/** 546/**
550 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. 547 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
551 * @dwork: the delayed work struct 548 * @dwork: the delayed work struct
552 * 549 *
553 * Note that the work callback function may still be running on return from 550 * Note that the work callback function may still be running on return from
554 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. 551 * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
552 * on it.
555 */ 553 */
556void cancel_rearming_delayed_work(struct delayed_work *dwork) 554void cancel_rearming_delayed_work(struct delayed_work *dwork)
557{ 555{
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 342e836677a1..68fe1d4d0210 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -2387,7 +2387,7 @@ void ip_vs_control_cleanup(void)
2387 EnterFunction(2); 2387 EnterFunction(2);
2388 ip_vs_trash_cleanup(); 2388 ip_vs_trash_cleanup();
2389 cancel_rearming_delayed_work(&defense_work); 2389 cancel_rearming_delayed_work(&defense_work);
2390 flush_work_keventd(&defense_work.work); 2390 cancel_work_sync(&defense_work.work);
2391 ip_vs_kill_estimator(&ip_vs_stats); 2391 ip_vs_kill_estimator(&ip_vs_stats);
2392 unregister_sysctl_table(sysctl_header); 2392 unregister_sysctl_table(sysctl_header);
2393 proc_net_remove("ip_vs_stats"); 2393 proc_net_remove("ip_vs_stats");