diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-19 18:10:58 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-20 07:47:47 -0400 |
commit | 598971bfbdfdc8701337dc1636c7919c44699914 (patch) | |
tree | 3de33d5059ce3a0cf77a9735804335f9264b7c08 /block/cfq-iosched.c | |
parent | abede6da27d9bd62ea9512830c83e32b3ee1104c (diff) |
cfq: don't use icq_get_changed()
cfq caches the associated cfqq's for a given cic. The cache needs to
be flushed if the cic's ioprio or blkcg has changed. It is currently
done by requiring the changing action to set the respective
ICQ_*_CHANGED bit in the icq and testing it from cfq_set_request(),
which involves iterating through all the affected icqs.
All cfq wants to know is whether ioprio and/or blkcg have changed
since the last flush and can be easily achieved by just remembering
the current ioprio and blkcg ID in cic.
This patch adds cic->{ioprio|blkcg_id}, updates all ioprio users to
use the remembered value instead, and updates cfq_set_request() path
such that, instead of using icq_get_changed(), the current values are
compared against the remembered ones and trigger appropriate flush
action if not. Condition tests are moved inside both _changed
functions which are now named check_ioprio_changed() and
check_blkcg_changed().
ioprio.h::task_ioprio*() can't be used anymore and replaced with
open-coded IOPRIO_CLASS_NONE case in cfq_async_queue_prio().
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 63 |
1 files changed, 40 insertions, 23 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9e8624e9e246..7c3893d4447a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -218,6 +218,10 @@ struct cfq_io_cq { | |||
218 | struct io_cq icq; /* must be the first member */ | 218 | struct io_cq icq; /* must be the first member */ |
219 | struct cfq_queue *cfqq[2]; | 219 | struct cfq_queue *cfqq[2]; |
220 | struct cfq_ttime ttime; | 220 | struct cfq_ttime ttime; |
221 | int ioprio; /* the current ioprio */ | ||
222 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
223 | uint64_t blkcg_id; /* the current blkcg ID */ | ||
224 | #endif | ||
221 | }; | 225 | }; |
222 | 226 | ||
223 | /* | 227 | /* |
@@ -2568,7 +2572,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) | |||
2568 | if (!cfq_cfqq_prio_changed(cfqq)) | 2572 | if (!cfq_cfqq_prio_changed(cfqq)) |
2569 | return; | 2573 | return; |
2570 | 2574 | ||
2571 | ioprio_class = IOPRIO_PRIO_CLASS(cic->icq.ioc->ioprio); | 2575 | ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); |
2572 | switch (ioprio_class) { | 2576 | switch (ioprio_class) { |
2573 | default: | 2577 | default: |
2574 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | 2578 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); |
@@ -2580,11 +2584,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) | |||
2580 | cfqq->ioprio_class = task_nice_ioclass(tsk); | 2584 | cfqq->ioprio_class = task_nice_ioclass(tsk); |
2581 | break; | 2585 | break; |
2582 | case IOPRIO_CLASS_RT: | 2586 | case IOPRIO_CLASS_RT: |
2583 | cfqq->ioprio = task_ioprio(cic->icq.ioc); | 2587 | cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
2584 | cfqq->ioprio_class = IOPRIO_CLASS_RT; | 2588 | cfqq->ioprio_class = IOPRIO_CLASS_RT; |
2585 | break; | 2589 | break; |
2586 | case IOPRIO_CLASS_BE: | 2590 | case IOPRIO_CLASS_BE: |
2587 | cfqq->ioprio = task_ioprio(cic->icq.ioc); | 2591 | cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
2588 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 2592 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
2589 | break; | 2593 | break; |
2590 | case IOPRIO_CLASS_IDLE: | 2594 | case IOPRIO_CLASS_IDLE: |
@@ -2602,12 +2606,17 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) | |||
2602 | cfq_clear_cfqq_prio_changed(cfqq); | 2606 | cfq_clear_cfqq_prio_changed(cfqq); |
2603 | } | 2607 | } |
2604 | 2608 | ||
2605 | static void changed_ioprio(struct cfq_io_cq *cic, struct bio *bio) | 2609 | static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) |
2606 | { | 2610 | { |
2611 | int ioprio = cic->icq.ioc->ioprio; | ||
2607 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2612 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
2608 | struct cfq_queue *cfqq; | 2613 | struct cfq_queue *cfqq; |
2609 | 2614 | ||
2610 | if (unlikely(!cfqd)) | 2615 | /* |
2616 | * Check whether ioprio has changed. The condition may trigger | ||
2617 | * spuriously on a newly created cic but there's no harm. | ||
2618 | */ | ||
2619 | if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) | ||
2611 | return; | 2620 | return; |
2612 | 2621 | ||
2613 | cfqq = cic->cfqq[BLK_RW_ASYNC]; | 2622 | cfqq = cic->cfqq[BLK_RW_ASYNC]; |
@@ -2624,6 +2633,8 @@ static void changed_ioprio(struct cfq_io_cq *cic, struct bio *bio) | |||
2624 | cfqq = cic->cfqq[BLK_RW_SYNC]; | 2633 | cfqq = cic->cfqq[BLK_RW_SYNC]; |
2625 | if (cfqq) | 2634 | if (cfqq) |
2626 | cfq_mark_cfqq_prio_changed(cfqq); | 2635 | cfq_mark_cfqq_prio_changed(cfqq); |
2636 | |||
2637 | cic->ioprio = ioprio; | ||
2627 | } | 2638 | } |
2628 | 2639 | ||
2629 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 2640 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
@@ -2647,17 +2658,24 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2647 | } | 2658 | } |
2648 | 2659 | ||
2649 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 2660 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
2650 | static void changed_cgroup(struct cfq_io_cq *cic) | 2661 | static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) |
2651 | { | 2662 | { |
2652 | struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); | ||
2653 | struct cfq_data *cfqd = cic_to_cfqd(cic); | 2663 | struct cfq_data *cfqd = cic_to_cfqd(cic); |
2654 | struct request_queue *q; | 2664 | struct cfq_queue *sync_cfqq; |
2665 | uint64_t id; | ||
2655 | 2666 | ||
2656 | if (unlikely(!cfqd)) | 2667 | rcu_read_lock(); |
2657 | return; | 2668 | id = bio_blkio_cgroup(bio)->id; |
2669 | rcu_read_unlock(); | ||
2658 | 2670 | ||
2659 | q = cfqd->queue; | 2671 | /* |
2672 | * Check whether blkcg has changed. The condition may trigger | ||
2673 | * spuriously on a newly created cic but there's no harm. | ||
2674 | */ | ||
2675 | if (unlikely(!cfqd) || likely(cic->blkcg_id == id)) | ||
2676 | return; | ||
2660 | 2677 | ||
2678 | sync_cfqq = cic_to_cfqq(cic, 1); | ||
2661 | if (sync_cfqq) { | 2679 | if (sync_cfqq) { |
2662 | /* | 2680 | /* |
2663 | * Drop reference to sync queue. A new sync queue will be | 2681 | * Drop reference to sync queue. A new sync queue will be |
@@ -2667,7 +2685,11 @@ static void changed_cgroup(struct cfq_io_cq *cic) | |||
2667 | cic_set_cfqq(cic, NULL, 1); | 2685 | cic_set_cfqq(cic, NULL, 1); |
2668 | cfq_put_queue(sync_cfqq); | 2686 | cfq_put_queue(sync_cfqq); |
2669 | } | 2687 | } |
2688 | |||
2689 | cic->blkcg_id = id; | ||
2670 | } | 2690 | } |
2691 | #else | ||
2692 | static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } | ||
2671 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ | 2693 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
2672 | 2694 | ||
2673 | static struct cfq_queue * | 2695 | static struct cfq_queue * |
@@ -2731,6 +2753,9 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | |||
2731 | switch (ioprio_class) { | 2753 | switch (ioprio_class) { |
2732 | case IOPRIO_CLASS_RT: | 2754 | case IOPRIO_CLASS_RT: |
2733 | return &cfqd->async_cfqq[0][ioprio]; | 2755 | return &cfqd->async_cfqq[0][ioprio]; |
2756 | case IOPRIO_CLASS_NONE: | ||
2757 | ioprio = IOPRIO_NORM; | ||
2758 | /* fall through */ | ||
2734 | case IOPRIO_CLASS_BE: | 2759 | case IOPRIO_CLASS_BE: |
2735 | return &cfqd->async_cfqq[1][ioprio]; | 2760 | return &cfqd->async_cfqq[1][ioprio]; |
2736 | case IOPRIO_CLASS_IDLE: | 2761 | case IOPRIO_CLASS_IDLE: |
@@ -2744,8 +2769,8 @@ static struct cfq_queue * | |||
2744 | cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, | 2769 | cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, |
2745 | struct bio *bio, gfp_t gfp_mask) | 2770 | struct bio *bio, gfp_t gfp_mask) |
2746 | { | 2771 | { |
2747 | const int ioprio = task_ioprio(cic->icq.ioc); | 2772 | const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); |
2748 | const int ioprio_class = task_ioprio_class(cic->icq.ioc); | 2773 | const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); |
2749 | struct cfq_queue **async_cfqq = NULL; | 2774 | struct cfq_queue **async_cfqq = NULL; |
2750 | struct cfq_queue *cfqq = NULL; | 2775 | struct cfq_queue *cfqq = NULL; |
2751 | 2776 | ||
@@ -3303,21 +3328,13 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, | |||
3303 | const int rw = rq_data_dir(rq); | 3328 | const int rw = rq_data_dir(rq); |
3304 | const bool is_sync = rq_is_sync(rq); | 3329 | const bool is_sync = rq_is_sync(rq); |
3305 | struct cfq_queue *cfqq; | 3330 | struct cfq_queue *cfqq; |
3306 | unsigned int changed; | ||
3307 | 3331 | ||
3308 | might_sleep_if(gfp_mask & __GFP_WAIT); | 3332 | might_sleep_if(gfp_mask & __GFP_WAIT); |
3309 | 3333 | ||
3310 | spin_lock_irq(q->queue_lock); | 3334 | spin_lock_irq(q->queue_lock); |
3311 | 3335 | ||
3312 | /* handle changed notifications */ | 3336 | check_ioprio_changed(cic, bio); |
3313 | changed = icq_get_changed(&cic->icq); | 3337 | check_blkcg_changed(cic, bio); |
3314 | if (unlikely(changed & ICQ_IOPRIO_CHANGED)) | ||
3315 | changed_ioprio(cic, bio); | ||
3316 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
3317 | if (unlikely(changed & ICQ_CGROUP_CHANGED)) | ||
3318 | changed_cgroup(cic); | ||
3319 | #endif | ||
3320 | |||
3321 | new_queue: | 3338 | new_queue: |
3322 | cfqq = cic_to_cfqq(cic, is_sync); | 3339 | cfqq = cic_to_cfqq(cic, is_sync); |
3323 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { | 3340 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { |