diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 41 |
1 files changed, 17 insertions, 24 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 75c98d58f4dd..3b927be03850 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1143 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1144 | { | 1144 | { |
1145 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1146 | struct request *rq; | ||
1147 | |||
1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { | ||
1149 | rq = list_entry_rq(tmp); | ||
1150 | 1146 | ||
1151 | if (rq->tag == -1) { | 1147 | list_for_each_safe(tmp, n, &q->tag_busy_list) |
1152 | printk(KERN_ERR | 1148 | blk_requeue_request(q, list_entry_rq(tmp)); |
1153 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1154 | list_del_init(&rq->queuelist); | ||
1155 | rq->cmd_flags &= ~REQ_QUEUED; | ||
1156 | } else | ||
1157 | blk_queue_end_tag(q, rq); | ||
1158 | |||
1159 | rq->cmd_flags &= ~REQ_STARTED; | ||
1160 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1161 | } | ||
1162 | } | 1149 | } |
1163 | 1150 | ||
1164 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | 1151 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |
@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1634 | { | 1621 | { |
1635 | struct request_queue *q = bdi->unplug_io_data; | 1622 | struct request_queue *q = bdi->unplug_io_data; |
1636 | 1623 | ||
1637 | /* | 1624 | blk_unplug(q); |
1638 | * devices don't necessarily have an ->unplug_fn defined | ||
1639 | */ | ||
1640 | if (q->unplug_fn) { | ||
1641 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1642 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1643 | |||
1644 | q->unplug_fn(q); | ||
1645 | } | ||
1646 | } | 1625 | } |
1647 | 1626 | ||
1648 | static void blk_unplug_work(struct work_struct *work) | 1627 | static void blk_unplug_work(struct work_struct *work) |
@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data) | |||
1666 | kblockd_schedule_work(&q->unplug_work); | 1645 | kblockd_schedule_work(&q->unplug_work); |
1667 | } | 1646 | } |
1668 | 1647 | ||
1648 | void blk_unplug(struct request_queue *q) | ||
1649 | { | ||
1650 | /* | ||
1651 | * devices don't necessarily have an ->unplug_fn defined | ||
1652 | */ | ||
1653 | if (q->unplug_fn) { | ||
1654 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1655 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1656 | |||
1657 | q->unplug_fn(q); | ||
1658 | } | ||
1659 | } | ||
1660 | EXPORT_SYMBOL(blk_unplug); | ||
1661 | |||
1669 | /** | 1662 | /** |
1670 | * blk_start_queue - restart a previously stopped queue | 1663 | * blk_start_queue - restart a previously stopped queue |
1671 | * @q: The &struct request_queue in question | 1664 | * @q: The &struct request_queue in question |