aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/cfq-iosched.c22
-rw-r--r--block/ll_rw_blk.c25
3 files changed, 25 insertions, 25 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index f6dc95489316..bc13dc0b29be 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1280,8 +1280,7 @@ static void as_work_handler(void *data)
1280 unsigned long flags; 1280 unsigned long flags;
1281 1281
1282 spin_lock_irqsave(q->queue_lock, flags); 1282 spin_lock_irqsave(q->queue_lock, flags);
1283 if (!as_queue_empty(q)) 1283 blk_start_queueing(q);
1284 q->request_fn(q);
1285 spin_unlock_irqrestore(q->queue_lock, flags); 1284 spin_unlock_irqrestore(q->queue_lock, flags);
1286} 1285}
1287 1286
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6fb1613d44d7..9f684cc66bd1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1553,19 +1553,6 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1553} 1553}
1554 1554
1555/* 1555/*
1556 * should really be a ll_rw_blk.c helper
1557 */
1558static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1559{
1560 request_queue_t *q = cfqd->queue;
1561
1562 if (!blk_queue_plugged(q))
1563 q->request_fn(q);
1564 else
1565 __generic_unplug_device(q);
1566}
1567
1568/*
1569 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1556 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1570 * something we should do about it 1557 * something we should do about it
1571 */ 1558 */
@@ -1593,7 +1580,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1593 if (cic == cfqd->active_cic && 1580 if (cic == cfqd->active_cic &&
1594 del_timer(&cfqd->idle_slice_timer)) { 1581 del_timer(&cfqd->idle_slice_timer)) {
1595 cfq_slice_expired(cfqd, 0); 1582 cfq_slice_expired(cfqd, 0);
1596 cfq_start_queueing(cfqd, cfqq); 1583 blk_start_queueing(cfqd->queue);
1597 } 1584 }
1598 return; 1585 return;
1599 } 1586 }
@@ -1614,7 +1601,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1614 if (cfq_cfqq_wait_request(cfqq)) { 1601 if (cfq_cfqq_wait_request(cfqq)) {
1615 cfq_mark_cfqq_must_dispatch(cfqq); 1602 cfq_mark_cfqq_must_dispatch(cfqq);
1616 del_timer(&cfqd->idle_slice_timer); 1603 del_timer(&cfqd->idle_slice_timer);
1617 cfq_start_queueing(cfqd, cfqq); 1604 blk_start_queueing(cfqd->queue);
1618 } 1605 }
1619 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1606 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1620 /* 1607 /*
@@ -1624,7 +1611,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1624 */ 1611 */
1625 cfq_preempt_queue(cfqd, cfqq); 1612 cfq_preempt_queue(cfqd, cfqq);
1626 cfq_mark_cfqq_must_dispatch(cfqq); 1613 cfq_mark_cfqq_must_dispatch(cfqq);
1627 cfq_start_queueing(cfqd, cfqq); 1614 blk_start_queueing(cfqd->queue);
1628 } 1615 }
1629} 1616}
1630 1617
@@ -1832,8 +1819,7 @@ static void cfq_kick_queue(void *data)
1832 unsigned long flags; 1819 unsigned long flags;
1833 1820
1834 spin_lock_irqsave(q->queue_lock, flags); 1821 spin_lock_irqsave(q->queue_lock, flags);
1835 blk_remove_plug(q); 1822 blk_start_queueing(q);
1836 q->request_fn(q);
1837 spin_unlock_irqrestore(q->queue_lock, flags); 1823 spin_unlock_irqrestore(q->queue_lock, flags);
1838} 1824}
1839 1825
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c6dfa889206c..346be9ae31f6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2267,6 +2267,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
2267EXPORT_SYMBOL(blk_get_request); 2267EXPORT_SYMBOL(blk_get_request);
2268 2268
2269/** 2269/**
2270 * blk_start_queueing - initiate dispatch of requests to device
2271 * @q: request queue to kick into gear
2272 *
2273 * This is basically a helper to remove the need to know whether a queue
2274 * is plugged or not if someone just wants to initiate dispatch of requests
2275 * for this queue.
2276 *
2277 * The queue lock must be held with interrupts disabled.
2278 */
2279void blk_start_queueing(request_queue_t *q)
2280{
2281 if (!blk_queue_plugged(q))
2282 q->request_fn(q);
2283 else
2284 __generic_unplug_device(q);
2285}
2286EXPORT_SYMBOL(blk_start_queueing);
2287
2288/**
2270 * blk_requeue_request - put a request back on queue 2289 * blk_requeue_request - put a request back on queue
2271 * @q: request queue where request should be inserted 2290 * @q: request queue where request should be inserted
2272 * @rq: request to be inserted 2291 * @rq: request to be inserted
@@ -2333,11 +2352,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
2333 2352
2334 drive_stat_acct(rq, rq->nr_sectors, 1); 2353 drive_stat_acct(rq, rq->nr_sectors, 1);
2335 __elv_add_request(q, rq, where, 0); 2354 __elv_add_request(q, rq, where, 0);
2336 2355 blk_start_queueing(q);
2337 if (blk_queue_plugged(q))
2338 __generic_unplug_device(q);
2339 else
2340 q->request_fn(q);
2341 spin_unlock_irqrestore(q->queue_lock, flags); 2356 spin_unlock_irqrestore(q->queue_lock, flags);
2342} 2357}
2343 2358