aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-20 17:51:24 -0400
committerTejun Heo <tj@kernel.org>2012-08-20 17:51:24 -0400
commit3b07e9ca26866697616097044f25fbe53dbab693 (patch)
tree7621d076c91c05bc0fc3c63bdc54c279a487dfea
parent43829731dd372d04d6706c51052b9dabab9ca356 (diff)
workqueue: deprecate system_nrt[_freezable]_wq
system_nrt[_freezable]_wq are now spurious. Mark them deprecated and convert all users to system[_freezable]_wq. If you're cc'd and wondering what's going on: Now all workqueues are non-reentrant, so there's no reason to use system_nrt[_freezable]_wq. Please use system[_freezable]_wq instead. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-By: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: David Airlie <airlied@linux.ie> Cc: Jiri Kosina <jkosina@suse.cz> Cc: "David S. Miller" <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com>
-rw-r--r--block/blk-throttle.c7
-rw-r--r--block/genhd.c10
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--kernel/srcu.c4
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c2
10 files changed, 29 insertions, 30 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e287c19908c8..5a58e779912b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
180 180
181/* 181/*
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the 182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for 183 * system_wq once there are some groups on the alloc_list waiting for
184 * allocation. 184 * allocation.
185 */ 185 */
186static void tg_stats_alloc_fn(struct work_struct *work) 186static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
194 stats_cpu = alloc_percpu(struct tg_stats_cpu); 194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
195 if (!stats_cpu) { 195 if (!stats_cpu) {
196 /* allocation failed, try again after some time */ 196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork, 197 schedule_delayed_work(dwork, msecs_to_jiffies(10));
198 msecs_to_jiffies(10));
199 return; 198 return;
200 } 199 }
201 } 200 }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
238 */ 237 */
239 spin_lock_irqsave(&tg_stats_alloc_lock, flags); 238 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
240 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); 239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
241 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); 240 schedule_delayed_work(&tg_stats_alloc_work, 0);
242 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); 241 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
243} 242}
244 243
diff --git a/block/genhd.c b/block/genhd.c
index 5d8b44a6442b..a2f3d6a5f55c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1490 intv = disk_events_poll_jiffies(disk); 1490 intv = disk_events_poll_jiffies(disk);
1491 set_timer_slack(&ev->dwork.timer, intv / 4); 1491 set_timer_slack(&ev->dwork.timer, intv / 4);
1492 if (check_now) 1492 if (check_now)
1493 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1493 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1494 else if (intv) 1494 else if (intv)
1495 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1495 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1496out_unlock: 1496out_unlock:
1497 spin_unlock_irqrestore(&ev->lock, flags); 1497 spin_unlock_irqrestore(&ev->lock, flags);
1498} 1498}
@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
1535 spin_lock_irq(&ev->lock); 1535 spin_lock_irq(&ev->lock);
1536 ev->clearing |= mask; 1536 ev->clearing |= mask;
1537 if (!ev->block) 1537 if (!ev->block)
1538 mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1538 mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
1539 spin_unlock_irq(&ev->lock); 1539 spin_unlock_irq(&ev->lock);
1540} 1540}
1541 1541
@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1571 1571
1572 /* uncondtionally schedule event check and wait for it to finish */ 1572 /* uncondtionally schedule event check and wait for it to finish */
1573 disk_block_events(disk); 1573 disk_block_events(disk);
1574 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1574 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1575 flush_delayed_work(&ev->dwork); 1575 flush_delayed_work(&ev->dwork);
1576 __disk_unblock_events(disk, false); 1576 __disk_unblock_events(disk, false);
1577 1577
@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
1608 1608
1609 intv = disk_events_poll_jiffies(disk); 1609 intv = disk_events_poll_jiffies(disk);
1610 if (!ev->block && intv) 1610 if (!ev->block && intv)
1611 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1611 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1612 1612
1613 spin_unlock_irq(&ev->lock); 1613 spin_unlock_irq(&ev->lock);
1614 1614
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3252e7067d8b..8fa9d52820d9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
968 } 968 }
969 969
970 if (repoll) 970 if (repoll)
971 queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); 971 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
972} 972}
973 973
974void drm_kms_helper_poll_disable(struct drm_device *dev) 974void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
993 } 993 }
994 994
995 if (poll) 995 if (poll)
996 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 996 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
997} 997}
998EXPORT_SYMBOL(drm_kms_helper_poll_enable); 998EXPORT_SYMBOL(drm_kms_helper_poll_enable);
999 999
@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1020 /* kill timer and schedule immediate execution, this doesn't block */ 1020 /* kill timer and schedule immediate execution, this doesn't block */
1021 cancel_delayed_work(&dev->mode_config.output_poll_work); 1021 cancel_delayed_work(&dev->mode_config.output_poll_work);
1022 if (drm_kms_helper_poll) 1022 if (drm_kms_helper_poll)
1023 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 1023 schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
1024} 1024}
1025EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1025EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index 0a1805c9b0e5..d37cd092ffc7 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -204,7 +204,7 @@ static void wiiext_worker(struct work_struct *work)
204/* schedule work only once, otherwise mark for reschedule */ 204/* schedule work only once, otherwise mark for reschedule */
205static void wiiext_schedule(struct wiimote_ext *ext) 205static void wiiext_schedule(struct wiimote_ext *ext)
206{ 206{
207 queue_work(system_nrt_wq, &ext->worker); 207 schedule_work(&ext->worker);
208} 208}
209 209
210/* 210/*
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 597f189b4427..ee2e16b17017 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
204 host->clk_requests--; 204 host->clk_requests--;
205 if (mmc_host_may_gate_card(host->card) && 205 if (mmc_host_may_gate_card(host->card) &&
206 !host->clk_requests) 206 !host->clk_requests)
207 queue_delayed_work(system_nrt_wq, &host->clk_gate_work, 207 schedule_delayed_work(&host->clk_gate_work,
208 msecs_to_jiffies(host->clkgate_delay)); 208 msecs_to_jiffies(host->clkgate_delay));
209 spin_unlock_irqrestore(&host->clk_lock, flags); 209 spin_unlock_irqrestore(&host->clk_lock, flags);
210} 210}
211 211
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 83d2b0c34c5e..9650c413e11f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
521 /* In theory, this can happen: if we don't get any buffers in 521 /* In theory, this can happen: if we don't get any buffers in
522 * we will *never* try to fill again. */ 522 * we will *never* try to fill again. */
523 if (still_empty) 523 if (still_empty)
524 queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); 524 schedule_delayed_work(&vi->refill, HZ/2);
525} 525}
526 526
527static int virtnet_poll(struct napi_struct *napi, int budget) 527static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
540 540
541 if (vi->num < vi->max / 2) { 541 if (vi->num < vi->max / 2) {
542 if (!try_fill_recv(vi, GFP_ATOMIC)) 542 if (!try_fill_recv(vi, GFP_ATOMIC))
543 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 543 schedule_delayed_work(&vi->refill, 0);
544 } 544 }
545 545
546 /* Out of packets? */ 546 /* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
745 745
746 /* Make sure we have some buffers: if oom use wq. */ 746 /* Make sure we have some buffers: if oom use wq. */
747 if (!try_fill_recv(vi, GFP_KERNEL)) 747 if (!try_fill_recv(vi, GFP_KERNEL))
748 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 748 schedule_delayed_work(&vi->refill, 0);
749 749
750 virtnet_napi_enable(vi); 750 virtnet_napi_enable(vi);
751 return 0; 751 return 0;
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1020{ 1020{
1021 struct virtnet_info *vi = vdev->priv; 1021 struct virtnet_info *vi = vdev->priv;
1022 1022
1023 queue_work(system_nrt_wq, &vi->config_work); 1023 schedule_work(&vi->config_work);
1024} 1024}
1025 1025
1026static int init_vqs(struct virtnet_info *vi) 1026static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1152 otherwise get link status from config. */ 1152 otherwise get link status from config. */
1153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1154 netif_carrier_off(dev); 1154 netif_carrier_off(dev);
1155 queue_work(system_nrt_wq, &vi->config_work); 1155 schedule_work(&vi->config_work);
1156 } else { 1156 } else {
1157 vi->status = VIRTIO_NET_S_LINK_UP; 1157 vi->status = VIRTIO_NET_S_LINK_UP;
1158 netif_carrier_on(dev); 1158 netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1264 netif_device_attach(vi->dev); 1264 netif_device_attach(vi->dev);
1265 1265
1266 if (!try_fill_recv(vi, GFP_KERNEL)) 1266 if (!try_fill_recv(vi, GFP_KERNEL))
1267 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1267 schedule_delayed_work(&vi->refill, 0);
1268 1268
1269 mutex_lock(&vi->config_lock); 1269 mutex_lock(&vi->config_lock);
1270 vi->config_enable = true; 1270 vi->config_enable = true;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a351be7c3e91..1ce3fb08308d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -310,12 +310,12 @@ extern struct workqueue_struct *system_long_wq;
310extern struct workqueue_struct *system_unbound_wq; 310extern struct workqueue_struct *system_unbound_wq;
311extern struct workqueue_struct *system_freezable_wq; 311extern struct workqueue_struct *system_freezable_wq;
312 312
313static inline struct workqueue_struct *__system_nrt_wq(void) 313static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
314{ 314{
315 return system_wq; 315 return system_wq;
316} 316}
317 317
318static inline struct workqueue_struct *__system_nrt_freezable_wq(void) 318static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
319{ 319{
320 return system_freezable_wq; 320 return system_freezable_wq;
321} 321}
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2095be3318d5..97c465ebd844 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
379 rcu_batch_queue(&sp->batch_queue, head); 379 rcu_batch_queue(&sp->batch_queue, head);
380 if (!sp->running) { 380 if (!sp->running) {
381 sp->running = true; 381 sp->running = true;
382 queue_delayed_work(system_nrt_wq, &sp->work, 0); 382 schedule_delayed_work(&sp->work, 0);
383 } 383 }
384 spin_unlock_irqrestore(&sp->queue_lock, flags); 384 spin_unlock_irqrestore(&sp->queue_lock, flags);
385} 385}
@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
631 } 631 }
632 632
633 if (pending) 633 if (pending)
634 queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL); 634 schedule_delayed_work(&sp->work, SRCU_INTERVAL);
635} 635}
636 636
637/* 637/*
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 61ab7c82ebb1..d67c97bb1025 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
62 62
63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { 63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
64 kdebug("IMMEDIATE"); 64 kdebug("IMMEDIATE");
65 queue_work(system_nrt_wq, &key_gc_work); 65 schedule_work(&key_gc_work);
66 } else if (gc_at < key_gc_next_run) { 66 } else if (gc_at < key_gc_next_run) {
67 kdebug("DEFERRED"); 67 kdebug("DEFERRED");
68 key_gc_next_run = gc_at; 68 key_gc_next_run = gc_at;
@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
77void key_schedule_gc_links(void) 77void key_schedule_gc_links(void)
78{ 78{
79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); 79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
80 queue_work(system_nrt_wq, &key_gc_work); 80 schedule_work(&key_gc_work);
81} 81}
82 82
83/* 83/*
@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); 120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
121 121
122 kdebug("schedule"); 122 kdebug("schedule");
123 queue_work(system_nrt_wq, &key_gc_work); 123 schedule_work(&key_gc_work);
124 124
125 kdebug("sleep"); 125 kdebug("sleep");
126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, 126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -369,7 +369,7 @@ maybe_resched:
369 } 369 }
370 370
371 if (gc_state & KEY_GC_REAP_AGAIN) 371 if (gc_state & KEY_GC_REAP_AGAIN)
372 queue_work(system_nrt_wq, &key_gc_work); 372 schedule_work(&key_gc_work);
373 kleave(" [end %x]", gc_state); 373 kleave(" [end %x]", gc_state);
374 return; 374 return;
375 375
diff --git a/security/keys/key.c b/security/keys/key.c
index 50d96d4e06f2..3cbe3529c418 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -598,7 +598,7 @@ void key_put(struct key *key)
598 key_check(key); 598 key_check(key);
599 599
600 if (atomic_dec_and_test(&key->usage)) 600 if (atomic_dec_and_test(&key->usage))
601 queue_work(system_nrt_wq, &key_gc_work); 601 schedule_work(&key_gc_work);
602 } 602 }
603} 603}
604EXPORT_SYMBOL(key_put); 604EXPORT_SYMBOL(key_put);