diff options
author | Tejun Heo <tj@kernel.org> | 2012-08-20 17:51:24 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-08-20 17:51:24 -0400 |
commit | 3b07e9ca26866697616097044f25fbe53dbab693 (patch) | |
tree | 7621d076c91c05bc0fc3c63bdc54c279a487dfea /block | |
parent | 43829731dd372d04d6706c51052b9dabab9ca356 (diff) |
workqueue: deprecate system_nrt[_freezable]_wq
system_nrt[_freezable]_wq are now spurious. Mark them deprecated and
convert all users to system[_freezable]_wq.
If you're cc'd and wondering what's going on: Now all workqueues are
non-reentrant, so there's no reason to use system_nrt[_freezable]_wq.
Please use system[_freezable]_wq instead.
This patch doesn't make any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-By: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: David Airlie <airlied@linux.ie>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 7 | ||||
-rw-r--r-- | block/genhd.c | 10 |
2 files changed, 8 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e287c19908c8..5a58e779912b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td) | |||
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Worker for allocating per cpu stat for tgs. This is scheduled on the | 182 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
183 | * system_nrt_wq once there are some groups on the alloc_list waiting for | 183 | * system_wq once there are some groups on the alloc_list waiting for |
184 | * allocation. | 184 | * allocation. |
185 | */ | 185 | */ |
186 | static void tg_stats_alloc_fn(struct work_struct *work) | 186 | static void tg_stats_alloc_fn(struct work_struct *work) |
@@ -194,8 +194,7 @@ alloc_stats: | |||
194 | stats_cpu = alloc_percpu(struct tg_stats_cpu); | 194 | stats_cpu = alloc_percpu(struct tg_stats_cpu); |
195 | if (!stats_cpu) { | 195 | if (!stats_cpu) { |
196 | /* allocation failed, try again after some time */ | 196 | /* allocation failed, try again after some time */ |
197 | queue_delayed_work(system_nrt_wq, dwork, | 197 | schedule_delayed_work(dwork, msecs_to_jiffies(10)); |
198 | msecs_to_jiffies(10)); | ||
199 | return; | 198 | return; |
200 | } | 199 | } |
201 | } | 200 | } |
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg) | |||
238 | */ | 237 | */ |
239 | spin_lock_irqsave(&tg_stats_alloc_lock, flags); | 238 | spin_lock_irqsave(&tg_stats_alloc_lock, flags); |
240 | list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); | 239 | list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); |
241 | queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); | 240 | schedule_delayed_work(&tg_stats_alloc_work, 0); |
242 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); | 241 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); |
243 | } | 242 | } |
244 | 243 | ||
diff --git a/block/genhd.c b/block/genhd.c index 5d8b44a6442b..a2f3d6a5f55c 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) | |||
1490 | intv = disk_events_poll_jiffies(disk); | 1490 | intv = disk_events_poll_jiffies(disk); |
1491 | set_timer_slack(&ev->dwork.timer, intv / 4); | 1491 | set_timer_slack(&ev->dwork.timer, intv / 4); |
1492 | if (check_now) | 1492 | if (check_now) |
1493 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); | 1493 | queue_delayed_work(system_freezable_wq, &ev->dwork, 0); |
1494 | else if (intv) | 1494 | else if (intv) |
1495 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); | 1495 | queue_delayed_work(system_freezable_wq, &ev->dwork, intv); |
1496 | out_unlock: | 1496 | out_unlock: |
1497 | spin_unlock_irqrestore(&ev->lock, flags); | 1497 | spin_unlock_irqrestore(&ev->lock, flags); |
1498 | } | 1498 | } |
@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) | |||
1535 | spin_lock_irq(&ev->lock); | 1535 | spin_lock_irq(&ev->lock); |
1536 | ev->clearing |= mask; | 1536 | ev->clearing |= mask; |
1537 | if (!ev->block) | 1537 | if (!ev->block) |
1538 | mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); | 1538 | mod_delayed_work(system_freezable_wq, &ev->dwork, 0); |
1539 | spin_unlock_irq(&ev->lock); | 1539 | spin_unlock_irq(&ev->lock); |
1540 | } | 1540 | } |
1541 | 1541 | ||
@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1571 | 1571 | ||
1572 | /* uncondtionally schedule event check and wait for it to finish */ | 1572 | /* uncondtionally schedule event check and wait for it to finish */ |
1573 | disk_block_events(disk); | 1573 | disk_block_events(disk); |
1574 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); | 1574 | queue_delayed_work(system_freezable_wq, &ev->dwork, 0); |
1575 | flush_delayed_work(&ev->dwork); | 1575 | flush_delayed_work(&ev->dwork); |
1576 | __disk_unblock_events(disk, false); | 1576 | __disk_unblock_events(disk, false); |
1577 | 1577 | ||
@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work) | |||
1608 | 1608 | ||
1609 | intv = disk_events_poll_jiffies(disk); | 1609 | intv = disk_events_poll_jiffies(disk); |
1610 | if (!ev->block && intv) | 1610 | if (!ev->block && intv) |
1611 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); | 1611 | queue_delayed_work(system_freezable_wq, &ev->dwork, intv); |
1612 | 1612 | ||
1613 | spin_unlock_irq(&ev->lock); | 1613 | spin_unlock_irq(&ev->lock); |
1614 | 1614 | ||