diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 34 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 41 |
2 files changed, 38 insertions, 37 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e47a9309eb48..0b4a47905575 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -789,6 +789,20 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) | |||
789 | __cfq_slice_expired(cfqd, cfqq, timed_out); | 789 | __cfq_slice_expired(cfqd, cfqq, timed_out); |
790 | } | 790 | } |
791 | 791 | ||
792 | static int start_idle_class_timer(struct cfq_data *cfqd) | ||
793 | { | ||
794 | unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
795 | unsigned long now = jiffies; | ||
796 | |||
797 | if (time_before(now, end) && | ||
798 | time_after_eq(now, cfqd->last_end_request)) { | ||
799 | mod_timer(&cfqd->idle_class_timer, end); | ||
800 | return 1; | ||
801 | } | ||
802 | |||
803 | return 0; | ||
804 | } | ||
805 | |||
792 | /* | 806 | /* |
793 | * Get next queue for service. Unless we have a queue preemption, | 807 | * Get next queue for service. Unless we have a queue preemption, |
794 | * we'll simply select the first cfqq in the service tree. | 808 | * we'll simply select the first cfqq in the service tree. |
@@ -805,19 +819,14 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
805 | cfqq = rb_entry(n, struct cfq_queue, rb_node); | 819 | cfqq = rb_entry(n, struct cfq_queue, rb_node); |
806 | 820 | ||
807 | if (cfq_class_idle(cfqq)) { | 821 | if (cfq_class_idle(cfqq)) { |
808 | unsigned long end; | ||
809 | |||
810 | /* | 822 | /* |
811 | * if we have idle queues and no rt or be queues had | 823 | * if we have idle queues and no rt or be queues had |
812 | * pending requests, either allow immediate service if | 824 | * pending requests, either allow immediate service if |
813 | * the grace period has passed or arm the idle grace | 825 | * the grace period has passed or arm the idle grace |
814 | * timer | 826 | * timer |
815 | */ | 827 | */ |
816 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | 828 | if (start_idle_class_timer(cfqd)) |
817 | if (time_before(jiffies, end)) { | ||
818 | mod_timer(&cfqd->idle_class_timer, end); | ||
819 | cfqq = NULL; | 829 | cfqq = NULL; |
820 | } | ||
821 | } | 830 | } |
822 | 831 | ||
823 | return cfqq; | 832 | return cfqq; |
@@ -2036,17 +2045,14 @@ out_cont: | |||
2036 | static void cfq_idle_class_timer(unsigned long data) | 2045 | static void cfq_idle_class_timer(unsigned long data) |
2037 | { | 2046 | { |
2038 | struct cfq_data *cfqd = (struct cfq_data *) data; | 2047 | struct cfq_data *cfqd = (struct cfq_data *) data; |
2039 | unsigned long flags, end; | 2048 | unsigned long flags; |
2040 | 2049 | ||
2041 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 2050 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
2042 | 2051 | ||
2043 | /* | 2052 | /* |
2044 | * race with a non-idle queue, reset timer | 2053 | * race with a non-idle queue, reset timer |
2045 | */ | 2054 | */ |
2046 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | 2055 | if (!start_idle_class_timer(cfqd)) |
2047 | if (!time_after_eq(jiffies, end)) | ||
2048 | mod_timer(&cfqd->idle_class_timer, end); | ||
2049 | else | ||
2050 | cfq_schedule_dispatch(cfqd); | 2056 | cfq_schedule_dispatch(cfqd); |
2051 | 2057 | ||
2052 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2058 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
@@ -2068,9 +2074,10 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) | |||
2068 | cfq_put_queue(cfqd->async_cfqq[0][i]); | 2074 | cfq_put_queue(cfqd->async_cfqq[0][i]); |
2069 | if (cfqd->async_cfqq[1][i]) | 2075 | if (cfqd->async_cfqq[1][i]) |
2070 | cfq_put_queue(cfqd->async_cfqq[1][i]); | 2076 | cfq_put_queue(cfqd->async_cfqq[1][i]); |
2071 | if (cfqd->async_idle_cfqq) | ||
2072 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2073 | } | 2077 | } |
2078 | |||
2079 | if (cfqd->async_idle_cfqq) | ||
2080 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2074 | } | 2081 | } |
2075 | 2082 | ||
2076 | static void cfq_exit_queue(elevator_t *e) | 2083 | static void cfq_exit_queue(elevator_t *e) |
@@ -2125,6 +2132,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2125 | 2132 | ||
2126 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2133 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2127 | 2134 | ||
2135 | cfqd->last_end_request = jiffies; | ||
2128 | cfqd->cfq_quantum = cfq_quantum; | 2136 | cfqd->cfq_quantum = cfq_quantum; |
2129 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2137 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
2130 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; | 2138 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 75c98d58f4dd..3b927be03850 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1143 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1144 | { | 1144 | { |
1145 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1146 | struct request *rq; | ||
1147 | |||
1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { | ||
1149 | rq = list_entry_rq(tmp); | ||
1150 | 1146 | ||
1151 | if (rq->tag == -1) { | 1147 | list_for_each_safe(tmp, n, &q->tag_busy_list) |
1152 | printk(KERN_ERR | 1148 | blk_requeue_request(q, list_entry_rq(tmp)); |
1153 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1154 | list_del_init(&rq->queuelist); | ||
1155 | rq->cmd_flags &= ~REQ_QUEUED; | ||
1156 | } else | ||
1157 | blk_queue_end_tag(q, rq); | ||
1158 | |||
1159 | rq->cmd_flags &= ~REQ_STARTED; | ||
1160 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1161 | } | ||
1162 | } | 1149 | } |
1163 | 1150 | ||
1164 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | 1151 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |
@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1634 | { | 1621 | { |
1635 | struct request_queue *q = bdi->unplug_io_data; | 1622 | struct request_queue *q = bdi->unplug_io_data; |
1636 | 1623 | ||
1637 | /* | 1624 | blk_unplug(q); |
1638 | * devices don't necessarily have an ->unplug_fn defined | ||
1639 | */ | ||
1640 | if (q->unplug_fn) { | ||
1641 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1642 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1643 | |||
1644 | q->unplug_fn(q); | ||
1645 | } | ||
1646 | } | 1625 | } |
1647 | 1626 | ||
1648 | static void blk_unplug_work(struct work_struct *work) | 1627 | static void blk_unplug_work(struct work_struct *work) |
@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data) | |||
1666 | kblockd_schedule_work(&q->unplug_work); | 1645 | kblockd_schedule_work(&q->unplug_work); |
1667 | } | 1646 | } |
1668 | 1647 | ||
1648 | void blk_unplug(struct request_queue *q) | ||
1649 | { | ||
1650 | /* | ||
1651 | * devices don't necessarily have an ->unplug_fn defined | ||
1652 | */ | ||
1653 | if (q->unplug_fn) { | ||
1654 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1655 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1656 | |||
1657 | q->unplug_fn(q); | ||
1658 | } | ||
1659 | } | ||
1660 | EXPORT_SYMBOL(blk_unplug); | ||
1661 | |||
1669 | /** | 1662 | /** |
1670 | * blk_start_queue - restart a previously stopped queue | 1663 | * blk_start_queue - restart a previously stopped queue |
1671 | * @q: The &struct request_queue in question | 1664 | * @q: The &struct request_queue in question |