diff options
author | Konstantin Khlebnikov <khlebnikov@openvz.org> | 2010-05-20 15:21:41 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-05-24 03:06:59 -0400 |
commit | 80b15c7389caa81a3860f9fc2ee47ec0ea572a63 (patch) | |
tree | 469486c0371134b18f32741be35bce59f783d864 /block | |
parent | bca4b914b5da3d8e7b9b647f620b71dc85c0c394 (diff) |
cfq-iosched: compact io_context radix_tree
Use small consequent indexes as radix tree keys instead of sparse cfqd address.
This change will reduce radix tree depth from 11 (6 for 32-bit hosts)
to 1 if host have <=64 disks under cfq control, or to 0 if there only one disk.
So, this patch save 10*560 bytes for each process (5*296 for 32-bit hosts)
For each cfqd allocate cic index from ida.
To unlink dead cic from tree without cfqd access store index into ->key.
(bit 0 -- dead mark, bits 1..30 -- index: ida produce id in range 0..2^31-1)
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 44 |
1 files changed, 39 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 407602350350..c72e5acc573d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -64,6 +64,9 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); | |||
64 | static struct completion *ioc_gone; | 64 | static struct completion *ioc_gone; |
65 | static DEFINE_SPINLOCK(ioc_gone_lock); | 65 | static DEFINE_SPINLOCK(ioc_gone_lock); |
66 | 66 | ||
67 | static DEFINE_SPINLOCK(cic_index_lock); | ||
68 | static DEFINE_IDA(cic_index_ida); | ||
69 | |||
67 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 70 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
68 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 71 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
69 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) | 72 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
@@ -271,6 +274,7 @@ struct cfq_data { | |||
271 | unsigned int cfq_latency; | 274 | unsigned int cfq_latency; |
272 | unsigned int cfq_group_isolation; | 275 | unsigned int cfq_group_isolation; |
273 | 276 | ||
277 | unsigned int cic_index; | ||
274 | struct list_head cic_list; | 278 | struct list_head cic_list; |
275 | 279 | ||
276 | /* | 280 | /* |
@@ -431,10 +435,11 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic, | |||
431 | } | 435 | } |
432 | 436 | ||
433 | #define CIC_DEAD_KEY 1ul | 437 | #define CIC_DEAD_KEY 1ul |
438 | #define CIC_DEAD_INDEX_SHIFT 1 | ||
434 | 439 | ||
435 | static inline void *cfqd_dead_key(struct cfq_data *cfqd) | 440 | static inline void *cfqd_dead_key(struct cfq_data *cfqd) |
436 | { | 441 | { |
437 | return (void *)((unsigned long) cfqd | CIC_DEAD_KEY); | 442 | return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); |
438 | } | 443 | } |
439 | 444 | ||
440 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) | 445 | static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) |
@@ -2532,7 +2537,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) | |||
2532 | BUG_ON(!(dead_key & CIC_DEAD_KEY)); | 2537 | BUG_ON(!(dead_key & CIC_DEAD_KEY)); |
2533 | 2538 | ||
2534 | spin_lock_irqsave(&ioc->lock, flags); | 2539 | spin_lock_irqsave(&ioc->lock, flags); |
2535 | radix_tree_delete(&ioc->radix_root, dead_key & ~CIC_DEAD_KEY); | 2540 | radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT); |
2536 | hlist_del_rcu(&cic->cic_list); | 2541 | hlist_del_rcu(&cic->cic_list); |
2537 | spin_unlock_irqrestore(&ioc->lock, flags); | 2542 | spin_unlock_irqrestore(&ioc->lock, flags); |
2538 | 2543 | ||
@@ -2906,7 +2911,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, | |||
2906 | 2911 | ||
2907 | BUG_ON(ioc->ioc_data == cic); | 2912 | BUG_ON(ioc->ioc_data == cic); |
2908 | 2913 | ||
2909 | radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); | 2914 | radix_tree_delete(&ioc->radix_root, cfqd->cic_index); |
2910 | hlist_del_rcu(&cic->cic_list); | 2915 | hlist_del_rcu(&cic->cic_list); |
2911 | spin_unlock_irqrestore(&ioc->lock, flags); | 2916 | spin_unlock_irqrestore(&ioc->lock, flags); |
2912 | 2917 | ||
@@ -2934,7 +2939,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) | |||
2934 | } | 2939 | } |
2935 | 2940 | ||
2936 | do { | 2941 | do { |
2937 | cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); | 2942 | cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index); |
2938 | rcu_read_unlock(); | 2943 | rcu_read_unlock(); |
2939 | if (!cic) | 2944 | if (!cic) |
2940 | break; | 2945 | break; |
@@ -2971,7 +2976,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | |||
2971 | 2976 | ||
2972 | spin_lock_irqsave(&ioc->lock, flags); | 2977 | spin_lock_irqsave(&ioc->lock, flags); |
2973 | ret = radix_tree_insert(&ioc->radix_root, | 2978 | ret = radix_tree_insert(&ioc->radix_root, |
2974 | (unsigned long) cfqd, cic); | 2979 | cfqd->cic_index, cic); |
2975 | if (!ret) | 2980 | if (!ret) |
2976 | hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); | 2981 | hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); |
2977 | spin_unlock_irqrestore(&ioc->lock, flags); | 2982 | spin_unlock_irqrestore(&ioc->lock, flags); |
@@ -3723,10 +3728,32 @@ static void cfq_exit_queue(struct elevator_queue *e) | |||
3723 | 3728 | ||
3724 | cfq_shutdown_timer_wq(cfqd); | 3729 | cfq_shutdown_timer_wq(cfqd); |
3725 | 3730 | ||
3731 | spin_lock(&cic_index_lock); | ||
3732 | ida_remove(&cic_index_ida, cfqd->cic_index); | ||
3733 | spin_unlock(&cic_index_lock); | ||
3734 | |||
3726 | /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ | 3735 | /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ |
3727 | call_rcu(&cfqd->rcu, cfq_cfqd_free); | 3736 | call_rcu(&cfqd->rcu, cfq_cfqd_free); |
3728 | } | 3737 | } |
3729 | 3738 | ||
3739 | static int cfq_alloc_cic_index(void) | ||
3740 | { | ||
3741 | int index, error; | ||
3742 | |||
3743 | do { | ||
3744 | if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) | ||
3745 | return -ENOMEM; | ||
3746 | |||
3747 | spin_lock(&cic_index_lock); | ||
3748 | error = ida_get_new(&cic_index_ida, &index); | ||
3749 | spin_unlock(&cic_index_lock); | ||
3750 | if (error && error != -EAGAIN) | ||
3751 | return error; | ||
3752 | } while (error); | ||
3753 | |||
3754 | return index; | ||
3755 | } | ||
3756 | |||
3730 | static void *cfq_init_queue(struct request_queue *q) | 3757 | static void *cfq_init_queue(struct request_queue *q) |
3731 | { | 3758 | { |
3732 | struct cfq_data *cfqd; | 3759 | struct cfq_data *cfqd; |
@@ -3734,10 +3761,16 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3734 | struct cfq_group *cfqg; | 3761 | struct cfq_group *cfqg; |
3735 | struct cfq_rb_root *st; | 3762 | struct cfq_rb_root *st; |
3736 | 3763 | ||
3764 | i = cfq_alloc_cic_index(); | ||
3765 | if (i < 0) | ||
3766 | return NULL; | ||
3767 | |||
3737 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); | 3768 | cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); |
3738 | if (!cfqd) | 3769 | if (!cfqd) |
3739 | return NULL; | 3770 | return NULL; |
3740 | 3771 | ||
3772 | cfqd->cic_index = i; | ||
3773 | |||
3741 | /* Init root service tree */ | 3774 | /* Init root service tree */ |
3742 | cfqd->grp_service_tree = CFQ_RB_ROOT; | 3775 | cfqd->grp_service_tree = CFQ_RB_ROOT; |
3743 | 3776 | ||
@@ -3999,6 +4032,7 @@ static void __exit cfq_exit(void) | |||
3999 | */ | 4032 | */ |
4000 | if (elv_ioc_count_read(cfq_ioc_count)) | 4033 | if (elv_ioc_count_read(cfq_ioc_count)) |
4001 | wait_for_completion(&all_gone); | 4034 | wait_for_completion(&all_gone); |
4035 | ida_destroy(&cic_index_ida); | ||
4002 | cfq_slab_kill(); | 4036 | cfq_slab_kill(); |
4003 | } | 4037 | } |
4004 | 4038 | ||