aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-01-07 02:46:59 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-01-07 02:46:59 -0500
commit30d7b9448f03f2c82d0fd44738674cc156a8ce0a (patch)
treef925a59dfe6a7b2da5240e9d43d873de7b1de273 /block
parentb7908c1035af7652cd613991b54dbff9c8b6bd3a (diff)
block cfq: don't use atomic_t for cfq_queue
cfq_queue->ref is used with queue_lock hold, so ref doesn't need to be an atomic and atomic operation is slower. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c19d015ac5a5..4cb4cf73ac00 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -96,7 +96,7 @@ struct cfq_rb_root {
96 */ 96 */
97struct cfq_queue { 97struct cfq_queue {
98 /* reference count */ 98 /* reference count */
99 atomic_t ref; 99 int ref;
100 /* various state flags, see below */ 100 /* various state flags, see below */
101 unsigned int flags; 101 unsigned int flags;
102 /* parent cfq_data */ 102 /* parent cfq_data */
@@ -2025,7 +2025,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq)
2025 int process_refs, io_refs; 2025 int process_refs, io_refs;
2026 2026
2027 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; 2027 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2028 process_refs = atomic_read(&cfqq->ref) - io_refs; 2028 process_refs = cfqq->ref - io_refs;
2029 BUG_ON(process_refs < 0); 2029 BUG_ON(process_refs < 0);
2030 return process_refs; 2030 return process_refs;
2031} 2031}
@@ -2065,10 +2065,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2065 */ 2065 */
2066 if (new_process_refs >= process_refs) { 2066 if (new_process_refs >= process_refs) {
2067 cfqq->new_cfqq = new_cfqq; 2067 cfqq->new_cfqq = new_cfqq;
2068 atomic_add(process_refs, &new_cfqq->ref); 2068 new_cfqq->ref += process_refs;
2069 } else { 2069 } else {
2070 new_cfqq->new_cfqq = cfqq; 2070 new_cfqq->new_cfqq = cfqq;
2071 atomic_add(new_process_refs, &cfqq->ref); 2071 cfqq->ref += new_process_refs;
2072 } 2072 }
2073} 2073}
2074 2074
@@ -2532,9 +2532,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2532 struct cfq_data *cfqd = cfqq->cfqd; 2532 struct cfq_data *cfqd = cfqq->cfqd;
2533 struct cfq_group *cfqg, *orig_cfqg; 2533 struct cfq_group *cfqg, *orig_cfqg;
2534 2534
2535 BUG_ON(atomic_read(&cfqq->ref) <= 0); 2535 BUG_ON(cfqq->ref <= 0);
2536 2536
2537 if (!atomic_dec_and_test(&cfqq->ref)) 2537 cfqq->ref--;
2538 if (cfqq->ref)
2538 return; 2539 return;
2539 2540
2540 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 2541 cfq_log_cfqq(cfqd, cfqq, "put_queue");
@@ -2837,7 +2838,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2837 RB_CLEAR_NODE(&cfqq->p_node); 2838 RB_CLEAR_NODE(&cfqq->p_node);
2838 INIT_LIST_HEAD(&cfqq->fifo); 2839 INIT_LIST_HEAD(&cfqq->fifo);
2839 2840
2840 atomic_set(&cfqq->ref, 0); 2841 cfqq->ref = 0;
2841 cfqq->cfqd = cfqd; 2842 cfqq->cfqd = cfqd;
2842 2843
2843 cfq_mark_cfqq_prio_changed(cfqq); 2844 cfq_mark_cfqq_prio_changed(cfqq);
@@ -2973,11 +2974,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2973 * pin the queue now that it's allocated, scheduler exit will prune it 2974 * pin the queue now that it's allocated, scheduler exit will prune it
2974 */ 2975 */
2975 if (!is_sync && !(*async_cfqq)) { 2976 if (!is_sync && !(*async_cfqq)) {
2976 atomic_inc(&cfqq->ref); 2977 cfqq->ref++;
2977 *async_cfqq = cfqq; 2978 *async_cfqq = cfqq;
2978 } 2979 }
2979 2980
2980 atomic_inc(&cfqq->ref); 2981 cfqq->ref++;
2981 return cfqq; 2982 return cfqq;
2982} 2983}
2983 2984
@@ -3679,7 +3680,7 @@ new_queue:
3679 } 3680 }
3680 3681
3681 cfqq->allocated[rw]++; 3682 cfqq->allocated[rw]++;
3682 atomic_inc(&cfqq->ref); 3683 cfqq->ref++;
3683 3684
3684 spin_unlock_irqrestore(q->queue_lock, flags); 3685 spin_unlock_irqrestore(q->queue_lock, flags);
3685 3686
@@ -3860,6 +3861,10 @@ static void *cfq_init_queue(struct request_queue *q)
3860 if (!cfqd) 3861 if (!cfqd)
3861 return NULL; 3862 return NULL;
3862 3863
3864 /*
3865 * Don't need take queue_lock in the routine, since we are
3866 * initializing the ioscheduler, and nobody is using cfqd
3867 */
3863 cfqd->cic_index = i; 3868 cfqd->cic_index = i;
3864 3869
3865 /* Init root service tree */ 3870 /* Init root service tree */
@@ -3899,7 +3904,7 @@ static void *cfq_init_queue(struct request_queue *q)
3899 * will not attempt to free it. 3904 * will not attempt to free it.
3900 */ 3905 */
3901 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 3906 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3902 atomic_inc(&cfqd->oom_cfqq.ref); 3907 cfqd->oom_cfqq.ref++;
3903 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); 3908 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3904 3909
3905 INIT_LIST_HEAD(&cfqd->cic_list); 3910 INIT_LIST_HEAD(&cfqd->cic_list);