aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/genhd.c14
3 files changed, 13 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ee3cb3a5e278..d2da64170513 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
262 **/ 262 **/
263void blk_stop_queue(struct request_queue *q) 263void blk_stop_queue(struct request_queue *q)
264{ 264{
265 __cancel_delayed_work(&q->delay_work); 265 cancel_delayed_work(&q->delay_work);
266 queue_flag_set(QUEUE_FLAG_STOPPED, q); 266 queue_flag_set(QUEUE_FLAG_STOPPED, q);
267} 267}
268EXPORT_SYMBOL(blk_stop_queue); 268EXPORT_SYMBOL(blk_stop_queue);
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
319 */ 319 */
320void blk_run_queue_async(struct request_queue *q) 320void blk_run_queue_async(struct request_queue *q)
321{ 321{
322 if (likely(!blk_queue_stopped(q))) { 322 if (likely(!blk_queue_stopped(q)))
323 __cancel_delayed_work(&q->delay_work); 323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
325 }
326} 324}
327EXPORT_SYMBOL(blk_run_queue_async); 325EXPORT_SYMBOL(blk_run_queue_async);
328 326
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e287c19908c8..a9664fa0b609 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
180 180
181/* 181/*
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the 182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for 183 * system_wq once there are some groups on the alloc_list waiting for
184 * allocation. 184 * allocation.
185 */ 185 */
186static void tg_stats_alloc_fn(struct work_struct *work) 186static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
194 stats_cpu = alloc_percpu(struct tg_stats_cpu); 194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
195 if (!stats_cpu) { 195 if (!stats_cpu) {
196 /* allocation failed, try again after some time */ 196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork, 197 schedule_delayed_work(dwork, msecs_to_jiffies(10));
198 msecs_to_jiffies(10));
199 return; 198 return;
200 } 199 }
201 } 200 }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
238 */ 237 */
239 spin_lock_irqsave(&tg_stats_alloc_lock, flags); 238 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
240 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); 239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
241 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); 240 schedule_delayed_work(&tg_stats_alloc_work, 0);
242 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); 241 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
243} 242}
244 243
@@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
930 929
931 /* schedule work if limits changed even if no bio is queued */ 930 /* schedule work if limits changed even if no bio is queued */
932 if (total_nr_queued(td) || td->limits_changed) { 931 if (total_nr_queued(td) || td->limits_changed) {
933 /* 932 mod_delayed_work(kthrotld_workqueue, dwork, delay);
934 * We might have a work scheduled to be executed in future.
935 * Cancel that and schedule a new one.
936 */
937 __cancel_delayed_work(dwork);
938 queue_delayed_work(kthrotld_workqueue, dwork, delay);
939 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 933 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
940 delay, jiffies); 934 delay, jiffies);
941 } 935 }
diff --git a/block/genhd.c b/block/genhd.c
index d839723303c8..6cace663a80e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1490 intv = disk_events_poll_jiffies(disk); 1490 intv = disk_events_poll_jiffies(disk);
1491 set_timer_slack(&ev->dwork.timer, intv / 4); 1491 set_timer_slack(&ev->dwork.timer, intv / 4);
1492 if (check_now) 1492 if (check_now)
1493 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1493 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1494 else if (intv) 1494 else if (intv)
1495 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1495 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1496out_unlock: 1496out_unlock:
1497 spin_unlock_irqrestore(&ev->lock, flags); 1497 spin_unlock_irqrestore(&ev->lock, flags);
1498} 1498}
@@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
1534 1534
1535 spin_lock_irq(&ev->lock); 1535 spin_lock_irq(&ev->lock);
1536 ev->clearing |= mask; 1536 ev->clearing |= mask;
1537 if (!ev->block) { 1537 if (!ev->block)
1538 cancel_delayed_work(&ev->dwork); 1538 mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
1539 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
1540 }
1541 spin_unlock_irq(&ev->lock); 1539 spin_unlock_irq(&ev->lock);
1542} 1540}
1543 1541
@@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1573 1571
1574 /* uncondtionally schedule event check and wait for it to finish */ 1572 /* uncondtionally schedule event check and wait for it to finish */
1575 disk_block_events(disk); 1573 disk_block_events(disk);
1576 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1574 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1577 flush_delayed_work(&ev->dwork); 1575 flush_delayed_work(&ev->dwork);
1578 __disk_unblock_events(disk, false); 1576 __disk_unblock_events(disk, false);
1579 1577
@@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
1610 1608
1611 intv = disk_events_poll_jiffies(disk); 1609 intv = disk_events_poll_jiffies(disk);
1612 if (!ev->block && intv) 1610 if (!ev->block && intv)
1613 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1611 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1614 1612
1615 spin_unlock_irq(&ev->lock); 1613 spin_unlock_irq(&ev->lock);
1616 1614