aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-11-07 07:51:35 -0500
committerJens Axboe <jens.axboe@oracle.com>2007-11-07 07:51:35 -0500
commit0e7be9edb9134f833278c381b6feabb54b875208 (patch)
treea4eca4110f1b672f78191c8a8f92086b1b6ade7a /block/cfq-iosched.c
parentb70c864d3ce706571d2f3cac1d35d4fba01d6072 (diff)
cfq_idle_class_timer: add paranoid checks for jiffies overflow
In theory, if the queue was idle long enough, cfq_idle_class_timer may have a false (and very long) timeout because jiffies can wrap into the past wrt ->last_end_request. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6831a758d541..0b4a47905575 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -789,6 +789,20 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
789 __cfq_slice_expired(cfqd, cfqq, timed_out); 789 __cfq_slice_expired(cfqd, cfqq, timed_out);
790} 790}
791 791
792static int start_idle_class_timer(struct cfq_data *cfqd)
793{
794 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
795 unsigned long now = jiffies;
796
797 if (time_before(now, end) &&
798 time_after_eq(now, cfqd->last_end_request)) {
799 mod_timer(&cfqd->idle_class_timer, end);
800 return 1;
801 }
802
803 return 0;
804}
805
792/* 806/*
793 * Get next queue for service. Unless we have a queue preemption, 807 * Get next queue for service. Unless we have a queue preemption,
794 * we'll simply select the first cfqq in the service tree. 808 * we'll simply select the first cfqq in the service tree.
@@ -805,19 +819,14 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
805 cfqq = rb_entry(n, struct cfq_queue, rb_node); 819 cfqq = rb_entry(n, struct cfq_queue, rb_node);
806 820
807 if (cfq_class_idle(cfqq)) { 821 if (cfq_class_idle(cfqq)) {
808 unsigned long end;
809
810 /* 822 /*
811 * if we have idle queues and no rt or be queues had 823 * if we have idle queues and no rt or be queues had
812 * pending requests, either allow immediate service if 824 * pending requests, either allow immediate service if
813 * the grace period has passed or arm the idle grace 825 * the grace period has passed or arm the idle grace
814 * timer 826 * timer
815 */ 827 */
816 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 828 if (start_idle_class_timer(cfqd))
817 if (time_before(jiffies, end)) {
818 mod_timer(&cfqd->idle_class_timer, end);
819 cfqq = NULL; 829 cfqq = NULL;
820 }
821 } 830 }
822 831
823 return cfqq; 832 return cfqq;
@@ -2036,17 +2045,14 @@ out_cont:
2036static void cfq_idle_class_timer(unsigned long data) 2045static void cfq_idle_class_timer(unsigned long data)
2037{ 2046{
2038 struct cfq_data *cfqd = (struct cfq_data *) data; 2047 struct cfq_data *cfqd = (struct cfq_data *) data;
2039 unsigned long flags, end; 2048 unsigned long flags;
2040 2049
2041 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2050 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2042 2051
2043 /* 2052 /*
2044 * race with a non-idle queue, reset timer 2053 * race with a non-idle queue, reset timer
2045 */ 2054 */
2046 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2055 if (!start_idle_class_timer(cfqd))
2047 if (!time_after_eq(jiffies, end))
2048 mod_timer(&cfqd->idle_class_timer, end);
2049 else
2050 cfq_schedule_dispatch(cfqd); 2056 cfq_schedule_dispatch(cfqd);
2051 2057
2052 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2058 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);