aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c15
-rw-r--r--block/blk-cgroup.h5
-rw-r--r--block/cfq-iosched.c33
3 files changed, 27 insertions, 26 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 83930f65016a..af42efbb0c1d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -213,7 +213,7 @@ void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
213} 213}
214EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); 214EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
215 215
216void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore) 216void blkiocg_set_start_empty_time(struct blkio_group *blkg)
217{ 217{
218 unsigned long flags; 218 unsigned long flags;
219 struct blkio_group_stats *stats; 219 struct blkio_group_stats *stats;
@@ -228,12 +228,15 @@ void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
228 } 228 }
229 229
230 /* 230 /*
231 * If ignore is set, we do not panic on the empty flag being set 231 * group is already marked empty. This can happen if cfqq got new
232 * already. This is to avoid cases where there are superfluous timeslice 232 * request in parent group and moved to this group while being added
233 * complete events (for eg., forced_dispatch in CFQ) when no IOs are 233 * to service tree. Just ignore the event and move on.
234 * served which could result in triggering the empty check incorrectly.
235 */ 234 */
236 BUG_ON(!ignore && blkio_blkg_empty(stats)); 235 if(blkio_blkg_empty(stats)) {
236 spin_unlock_irqrestore(&blkg->stats_lock, flags);
237 return;
238 }
239
237 stats->start_empty_time = sched_clock(); 240 stats->start_empty_time = sched_clock();
238 blkio_mark_blkg_empty(stats); 241 blkio_mark_blkg_empty(stats);
239 spin_unlock_irqrestore(&blkg->stats_lock, flags); 242 spin_unlock_irqrestore(&blkg->stats_lock, flags);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 2c956a06339a..a491a6d56ecf 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -174,7 +174,7 @@ void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
174 unsigned long dequeue); 174 unsigned long dequeue);
175void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg); 175void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
176void blkiocg_update_idle_time_stats(struct blkio_group *blkg); 176void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
177void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore); 177void blkiocg_set_start_empty_time(struct blkio_group *blkg);
178 178
179#define BLKG_FLAG_FNS(name) \ 179#define BLKG_FLAG_FNS(name) \
180static inline void blkio_mark_blkg_##name( \ 180static inline void blkio_mark_blkg_##name( \
@@ -205,8 +205,7 @@ static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
205static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) 205static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
206{} 206{}
207static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {} 207static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
208static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg, 208static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
209 bool ignore) {}
210#endif 209#endif
211 210
212#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 211#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d5927b53020e..002a5b621653 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -888,7 +888,7 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
888} 888}
889 889
890static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, 890static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
891 struct cfq_queue *cfqq, bool forced) 891 struct cfq_queue *cfqq)
892{ 892{
893 struct cfq_rb_root *st = &cfqd->grp_service_tree; 893 struct cfq_rb_root *st = &cfqd->grp_service_tree;
894 unsigned int used_sl, charge_sl; 894 unsigned int used_sl, charge_sl;
@@ -918,7 +918,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
918 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 918 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
919 st->min_vdisktime); 919 st->min_vdisktime);
920 blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 920 blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
921 blkiocg_set_start_empty_time(&cfqg->blkg, forced); 921 blkiocg_set_start_empty_time(&cfqg->blkg);
922} 922}
923 923
924#ifdef CONFIG_CFQ_GROUP_IOSCHED 924#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -1582,7 +1582,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1582 */ 1582 */
1583static void 1583static void
1584__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1584__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1585 bool timed_out, bool forced) 1585 bool timed_out)
1586{ 1586{
1587 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 1587 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1588 1588
@@ -1609,7 +1609,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1609 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 1609 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1610 } 1610 }
1611 1611
1612 cfq_group_served(cfqd, cfqq->cfqg, cfqq, forced); 1612 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1613 1613
1614 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 1614 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1615 cfq_del_cfqq_rr(cfqd, cfqq); 1615 cfq_del_cfqq_rr(cfqd, cfqq);
@@ -1628,13 +1628,12 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1628 } 1628 }
1629} 1629}
1630 1630
1631static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out, 1631static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1632 bool forced)
1633{ 1632{
1634 struct cfq_queue *cfqq = cfqd->active_queue; 1633 struct cfq_queue *cfqq = cfqd->active_queue;
1635 1634
1636 if (cfqq) 1635 if (cfqq)
1637 __cfq_slice_expired(cfqd, cfqq, timed_out, forced); 1636 __cfq_slice_expired(cfqd, cfqq, timed_out);
1638} 1637}
1639 1638
1640/* 1639/*
@@ -2202,7 +2201,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2202 } 2201 }
2203 2202
2204expire: 2203expire:
2205 cfq_slice_expired(cfqd, 0, false); 2204 cfq_slice_expired(cfqd, 0);
2206new_queue: 2205new_queue:
2207 /* 2206 /*
2208 * Current queue expired. Check if we have to switch to a new 2207 * Current queue expired. Check if we have to switch to a new
@@ -2228,7 +2227,7 @@ static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2228 BUG_ON(!list_empty(&cfqq->fifo)); 2227 BUG_ON(!list_empty(&cfqq->fifo));
2229 2228
2230 /* By default cfqq is not expired if it is empty. Do it explicitly */ 2229 /* By default cfqq is not expired if it is empty. Do it explicitly */
2231 __cfq_slice_expired(cfqq->cfqd, cfqq, 0, true); 2230 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2232 return dispatched; 2231 return dispatched;
2233} 2232}
2234 2233
@@ -2242,7 +2241,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
2242 int dispatched = 0; 2241 int dispatched = 0;
2243 2242
2244 /* Expire the timeslice of the current active queue first */ 2243 /* Expire the timeslice of the current active queue first */
2245 cfq_slice_expired(cfqd, 0, true); 2244 cfq_slice_expired(cfqd, 0);
2246 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { 2245 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2247 __cfq_set_active_queue(cfqd, cfqq); 2246 __cfq_set_active_queue(cfqd, cfqq);
2248 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 2247 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
@@ -2411,7 +2410,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
2411 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || 2410 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2412 cfq_class_idle(cfqq))) { 2411 cfq_class_idle(cfqq))) {
2413 cfqq->slice_end = jiffies + 1; 2412 cfqq->slice_end = jiffies + 1;
2414 cfq_slice_expired(cfqd, 0, false); 2413 cfq_slice_expired(cfqd, 0);
2415 } 2414 }
2416 2415
2417 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); 2416 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
@@ -2442,7 +2441,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2442 orig_cfqg = cfqq->orig_cfqg; 2441 orig_cfqg = cfqq->orig_cfqg;
2443 2442
2444 if (unlikely(cfqd->active_queue == cfqq)) { 2443 if (unlikely(cfqd->active_queue == cfqq)) {
2445 __cfq_slice_expired(cfqd, cfqq, 0, false); 2444 __cfq_slice_expired(cfqd, cfqq, 0);
2446 cfq_schedule_dispatch(cfqd); 2445 cfq_schedule_dispatch(cfqd);
2447 } 2446 }
2448 2447
@@ -2543,7 +2542,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2543 struct cfq_queue *__cfqq, *next; 2542 struct cfq_queue *__cfqq, *next;
2544 2543
2545 if (unlikely(cfqq == cfqd->active_queue)) { 2544 if (unlikely(cfqq == cfqd->active_queue)) {
2546 __cfq_slice_expired(cfqd, cfqq, 0, false); 2545 __cfq_slice_expired(cfqd, cfqq, 0);
2547 cfq_schedule_dispatch(cfqd); 2546 cfq_schedule_dispatch(cfqd);
2548 } 2547 }
2549 2548
@@ -3172,7 +3171,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3172static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3171static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3173{ 3172{
3174 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3173 cfq_log_cfqq(cfqd, cfqq, "preempt");
3175 cfq_slice_expired(cfqd, 1, false); 3174 cfq_slice_expired(cfqd, 1);
3176 3175
3177 /* 3176 /*
3178 * Put the new queue at the front of the of the current list, 3177 * Put the new queue at the front of the of the current list,
@@ -3383,7 +3382,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3383 * - when there is a close cooperator 3382 * - when there is a close cooperator
3384 */ 3383 */
3385 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 3384 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3386 cfq_slice_expired(cfqd, 1, false); 3385 cfq_slice_expired(cfqd, 1);
3387 else if (sync && cfqq_empty && 3386 else if (sync && cfqq_empty &&
3388 !cfq_close_cooperator(cfqd, cfqq)) { 3387 !cfq_close_cooperator(cfqd, cfqq)) {
3389 cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); 3388 cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
@@ -3648,7 +3647,7 @@ static void cfq_idle_slice_timer(unsigned long data)
3648 cfq_clear_cfqq_deep(cfqq); 3647 cfq_clear_cfqq_deep(cfqq);
3649 } 3648 }
3650expire: 3649expire:
3651 cfq_slice_expired(cfqd, timed_out, false); 3650 cfq_slice_expired(cfqd, timed_out);
3652out_kick: 3651out_kick:
3653 cfq_schedule_dispatch(cfqd); 3652 cfq_schedule_dispatch(cfqd);
3654out_cont: 3653out_cont:
@@ -3691,7 +3690,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
3691 spin_lock_irq(q->queue_lock); 3690 spin_lock_irq(q->queue_lock);
3692 3691
3693 if (cfqd->active_queue) 3692 if (cfqd->active_queue)
3694 __cfq_slice_expired(cfqd, cfqd->active_queue, 0, false); 3693 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3695 3694
3696 while (!list_empty(&cfqd->cic_list)) { 3695 while (!list_empty(&cfqd->cic_list)) {
3697 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 3696 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,