diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-13 18:33:41 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-13 18:33:41 -0500 |
commit | 22f746e235a5cbee2a6ca9887b1be2aa7d31fe71 (patch) | |
tree | a9786fe9fdb994b5ff69794023e6e3e48e39e0b0 /block | |
parent | f8fc877d3c1f10457d0d73d8540a0c51a1fa718a (diff) |
block: remove elevator_queue->ops
elevator_queue->ops points to the same ops struct ->elevator_type.ops
is pointing to. The only effect of caching it in elevator_queue is
shorter notation - it doesn't save any indirect derefence.
Relocate elevator_type->list which used only during module init/exit
to the end of the structure, rename elevator_queue->elevator_type to
->type, and replace elevator_queue->ops with elevator_queue->type.ops.
This doesn't introduce any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk.h | 10 | ||||
-rw-r--r-- | block/elevator.c | 74 |
2 files changed, 41 insertions, 43 deletions
diff --git a/block/blk.h b/block/blk.h index 5bca2668e1bf..4943770e0792 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -94,7 +94,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) | |||
94 | return NULL; | 94 | return NULL; |
95 | } | 95 | } |
96 | if (unlikely(blk_queue_dead(q)) || | 96 | if (unlikely(blk_queue_dead(q)) || |
97 | !q->elevator->ops->elevator_dispatch_fn(q, 0)) | 97 | !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) |
98 | return NULL; | 98 | return NULL; |
99 | } | 99 | } |
100 | } | 100 | } |
@@ -103,16 +103,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq) | |||
103 | { | 103 | { |
104 | struct elevator_queue *e = q->elevator; | 104 | struct elevator_queue *e = q->elevator; |
105 | 105 | ||
106 | if (e->ops->elevator_activate_req_fn) | 106 | if (e->type->ops.elevator_activate_req_fn) |
107 | e->ops->elevator_activate_req_fn(q, rq); | 107 | e->type->ops.elevator_activate_req_fn(q, rq); |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) | 110 | static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) |
111 | { | 111 | { |
112 | struct elevator_queue *e = q->elevator; | 112 | struct elevator_queue *e = q->elevator; |
113 | 113 | ||
114 | if (e->ops->elevator_deactivate_req_fn) | 114 | if (e->type->ops.elevator_deactivate_req_fn) |
115 | e->ops->elevator_deactivate_req_fn(q, rq); | 115 | e->type->ops.elevator_deactivate_req_fn(q, rq); |
116 | } | 116 | } |
117 | 117 | ||
118 | #ifdef CONFIG_FAIL_IO_TIMEOUT | 118 | #ifdef CONFIG_FAIL_IO_TIMEOUT |
diff --git a/block/elevator.c b/block/elevator.c index a16c2d1713e5..31ffe76aed3d 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -61,8 +61,8 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | |||
61 | struct request_queue *q = rq->q; | 61 | struct request_queue *q = rq->q; |
62 | struct elevator_queue *e = q->elevator; | 62 | struct elevator_queue *e = q->elevator; |
63 | 63 | ||
64 | if (e->ops->elevator_allow_merge_fn) | 64 | if (e->type->ops.elevator_allow_merge_fn) |
65 | return e->ops->elevator_allow_merge_fn(q, rq, bio); | 65 | return e->type->ops.elevator_allow_merge_fn(q, rq, bio); |
66 | 66 | ||
67 | return 1; | 67 | return 1; |
68 | } | 68 | } |
@@ -171,7 +171,7 @@ static struct elevator_type *elevator_get(const char *name) | |||
171 | static int elevator_init_queue(struct request_queue *q, | 171 | static int elevator_init_queue(struct request_queue *q, |
172 | struct elevator_queue *eq) | 172 | struct elevator_queue *eq) |
173 | { | 173 | { |
174 | eq->elevator_data = eq->ops->elevator_init_fn(q); | 174 | eq->elevator_data = eq->type->ops.elevator_init_fn(q); |
175 | if (eq->elevator_data) | 175 | if (eq->elevator_data) |
176 | return 0; | 176 | return 0; |
177 | return -ENOMEM; | 177 | return -ENOMEM; |
@@ -203,8 +203,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
203 | if (unlikely(!eq)) | 203 | if (unlikely(!eq)) |
204 | goto err; | 204 | goto err; |
205 | 205 | ||
206 | eq->ops = &e->ops; | 206 | eq->type = e; |
207 | eq->elevator_type = e; | ||
208 | kobject_init(&eq->kobj, &elv_ktype); | 207 | kobject_init(&eq->kobj, &elv_ktype); |
209 | mutex_init(&eq->sysfs_lock); | 208 | mutex_init(&eq->sysfs_lock); |
210 | 209 | ||
@@ -228,7 +227,7 @@ static void elevator_release(struct kobject *kobj) | |||
228 | struct elevator_queue *e; | 227 | struct elevator_queue *e; |
229 | 228 | ||
230 | e = container_of(kobj, struct elevator_queue, kobj); | 229 | e = container_of(kobj, struct elevator_queue, kobj); |
231 | elevator_put(e->elevator_type); | 230 | elevator_put(e->type); |
232 | kfree(e->hash); | 231 | kfree(e->hash); |
233 | kfree(e); | 232 | kfree(e); |
234 | } | 233 | } |
@@ -288,9 +287,8 @@ EXPORT_SYMBOL(elevator_init); | |||
288 | void elevator_exit(struct elevator_queue *e) | 287 | void elevator_exit(struct elevator_queue *e) |
289 | { | 288 | { |
290 | mutex_lock(&e->sysfs_lock); | 289 | mutex_lock(&e->sysfs_lock); |
291 | if (e->ops->elevator_exit_fn) | 290 | if (e->type->ops.elevator_exit_fn) |
292 | e->ops->elevator_exit_fn(e); | 291 | e->type->ops.elevator_exit_fn(e); |
293 | e->ops = NULL; | ||
294 | mutex_unlock(&e->sysfs_lock); | 292 | mutex_unlock(&e->sysfs_lock); |
295 | 293 | ||
296 | kobject_put(&e->kobj); | 294 | kobject_put(&e->kobj); |
@@ -500,8 +498,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
500 | return ELEVATOR_BACK_MERGE; | 498 | return ELEVATOR_BACK_MERGE; |
501 | } | 499 | } |
502 | 500 | ||
503 | if (e->ops->elevator_merge_fn) | 501 | if (e->type->ops.elevator_merge_fn) |
504 | return e->ops->elevator_merge_fn(q, req, bio); | 502 | return e->type->ops.elevator_merge_fn(q, req, bio); |
505 | 503 | ||
506 | return ELEVATOR_NO_MERGE; | 504 | return ELEVATOR_NO_MERGE; |
507 | } | 505 | } |
@@ -544,8 +542,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type) | |||
544 | { | 542 | { |
545 | struct elevator_queue *e = q->elevator; | 543 | struct elevator_queue *e = q->elevator; |
546 | 544 | ||
547 | if (e->ops->elevator_merged_fn) | 545 | if (e->type->ops.elevator_merged_fn) |
548 | e->ops->elevator_merged_fn(q, rq, type); | 546 | e->type->ops.elevator_merged_fn(q, rq, type); |
549 | 547 | ||
550 | if (type == ELEVATOR_BACK_MERGE) | 548 | if (type == ELEVATOR_BACK_MERGE) |
551 | elv_rqhash_reposition(q, rq); | 549 | elv_rqhash_reposition(q, rq); |
@@ -559,8 +557,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, | |||
559 | struct elevator_queue *e = q->elevator; | 557 | struct elevator_queue *e = q->elevator; |
560 | const int next_sorted = next->cmd_flags & REQ_SORTED; | 558 | const int next_sorted = next->cmd_flags & REQ_SORTED; |
561 | 559 | ||
562 | if (next_sorted && e->ops->elevator_merge_req_fn) | 560 | if (next_sorted && e->type->ops.elevator_merge_req_fn) |
563 | e->ops->elevator_merge_req_fn(q, rq, next); | 561 | e->type->ops.elevator_merge_req_fn(q, rq, next); |
564 | 562 | ||
565 | elv_rqhash_reposition(q, rq); | 563 | elv_rqhash_reposition(q, rq); |
566 | 564 | ||
@@ -577,8 +575,8 @@ void elv_bio_merged(struct request_queue *q, struct request *rq, | |||
577 | { | 575 | { |
578 | struct elevator_queue *e = q->elevator; | 576 | struct elevator_queue *e = q->elevator; |
579 | 577 | ||
580 | if (e->ops->elevator_bio_merged_fn) | 578 | if (e->type->ops.elevator_bio_merged_fn) |
581 | e->ops->elevator_bio_merged_fn(q, rq, bio); | 579 | e->type->ops.elevator_bio_merged_fn(q, rq, bio); |
582 | } | 580 | } |
583 | 581 | ||
584 | void elv_requeue_request(struct request_queue *q, struct request *rq) | 582 | void elv_requeue_request(struct request_queue *q, struct request *rq) |
@@ -604,12 +602,12 @@ void elv_drain_elevator(struct request_queue *q) | |||
604 | 602 | ||
605 | lockdep_assert_held(q->queue_lock); | 603 | lockdep_assert_held(q->queue_lock); |
606 | 604 | ||
607 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | 605 | while (q->elevator->type->ops.elevator_dispatch_fn(q, 1)) |
608 | ; | 606 | ; |
609 | if (q->nr_sorted && printed++ < 10) { | 607 | if (q->nr_sorted && printed++ < 10) { |
610 | printk(KERN_ERR "%s: forced dispatching is broken " | 608 | printk(KERN_ERR "%s: forced dispatching is broken " |
611 | "(nr_sorted=%u), please report this\n", | 609 | "(nr_sorted=%u), please report this\n", |
612 | q->elevator->elevator_type->elevator_name, q->nr_sorted); | 610 | q->elevator->type->elevator_name, q->nr_sorted); |
613 | } | 611 | } |
614 | } | 612 | } |
615 | 613 | ||
@@ -698,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) | |||
698 | * rq cannot be accessed after calling | 696 | * rq cannot be accessed after calling |
699 | * elevator_add_req_fn. | 697 | * elevator_add_req_fn. |
700 | */ | 698 | */ |
701 | q->elevator->ops->elevator_add_req_fn(q, rq); | 699 | q->elevator->type->ops.elevator_add_req_fn(q, rq); |
702 | break; | 700 | break; |
703 | 701 | ||
704 | case ELEVATOR_INSERT_FLUSH: | 702 | case ELEVATOR_INSERT_FLUSH: |
@@ -727,8 +725,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq) | |||
727 | { | 725 | { |
728 | struct elevator_queue *e = q->elevator; | 726 | struct elevator_queue *e = q->elevator; |
729 | 727 | ||
730 | if (e->ops->elevator_latter_req_fn) | 728 | if (e->type->ops.elevator_latter_req_fn) |
731 | return e->ops->elevator_latter_req_fn(q, rq); | 729 | return e->type->ops.elevator_latter_req_fn(q, rq); |
732 | return NULL; | 730 | return NULL; |
733 | } | 731 | } |
734 | 732 | ||
@@ -736,8 +734,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq) | |||
736 | { | 734 | { |
737 | struct elevator_queue *e = q->elevator; | 735 | struct elevator_queue *e = q->elevator; |
738 | 736 | ||
739 | if (e->ops->elevator_former_req_fn) | 737 | if (e->type->ops.elevator_former_req_fn) |
740 | return e->ops->elevator_former_req_fn(q, rq); | 738 | return e->type->ops.elevator_former_req_fn(q, rq); |
741 | return NULL; | 739 | return NULL; |
742 | } | 740 | } |
743 | 741 | ||
@@ -745,8 +743,8 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
745 | { | 743 | { |
746 | struct elevator_queue *e = q->elevator; | 744 | struct elevator_queue *e = q->elevator; |
747 | 745 | ||
748 | if (e->ops->elevator_set_req_fn) | 746 | if (e->type->ops.elevator_set_req_fn) |
749 | return e->ops->elevator_set_req_fn(q, rq, gfp_mask); | 747 | return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask); |
750 | 748 | ||
751 | rq->elevator_private[0] = NULL; | 749 | rq->elevator_private[0] = NULL; |
752 | return 0; | 750 | return 0; |
@@ -756,16 +754,16 @@ void elv_put_request(struct request_queue *q, struct request *rq) | |||
756 | { | 754 | { |
757 | struct elevator_queue *e = q->elevator; | 755 | struct elevator_queue *e = q->elevator; |
758 | 756 | ||
759 | if (e->ops->elevator_put_req_fn) | 757 | if (e->type->ops.elevator_put_req_fn) |
760 | e->ops->elevator_put_req_fn(rq); | 758 | e->type->ops.elevator_put_req_fn(rq); |
761 | } | 759 | } |
762 | 760 | ||
763 | int elv_may_queue(struct request_queue *q, int rw) | 761 | int elv_may_queue(struct request_queue *q, int rw) |
764 | { | 762 | { |
765 | struct elevator_queue *e = q->elevator; | 763 | struct elevator_queue *e = q->elevator; |
766 | 764 | ||
767 | if (e->ops->elevator_may_queue_fn) | 765 | if (e->type->ops.elevator_may_queue_fn) |
768 | return e->ops->elevator_may_queue_fn(q, rw); | 766 | return e->type->ops.elevator_may_queue_fn(q, rw); |
769 | 767 | ||
770 | return ELV_MQUEUE_MAY; | 768 | return ELV_MQUEUE_MAY; |
771 | } | 769 | } |
@@ -800,8 +798,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
800 | if (blk_account_rq(rq)) { | 798 | if (blk_account_rq(rq)) { |
801 | q->in_flight[rq_is_sync(rq)]--; | 799 | q->in_flight[rq_is_sync(rq)]--; |
802 | if ((rq->cmd_flags & REQ_SORTED) && | 800 | if ((rq->cmd_flags & REQ_SORTED) && |
803 | e->ops->elevator_completed_req_fn) | 801 | e->type->ops.elevator_completed_req_fn) |
804 | e->ops->elevator_completed_req_fn(q, rq); | 802 | e->type->ops.elevator_completed_req_fn(q, rq); |
805 | } | 803 | } |
806 | } | 804 | } |
807 | 805 | ||
@@ -819,7 +817,7 @@ elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
819 | 817 | ||
820 | e = container_of(kobj, struct elevator_queue, kobj); | 818 | e = container_of(kobj, struct elevator_queue, kobj); |
821 | mutex_lock(&e->sysfs_lock); | 819 | mutex_lock(&e->sysfs_lock); |
822 | error = e->ops ? entry->show(e, page) : -ENOENT; | 820 | error = e->type ? entry->show(e, page) : -ENOENT; |
823 | mutex_unlock(&e->sysfs_lock); | 821 | mutex_unlock(&e->sysfs_lock); |
824 | return error; | 822 | return error; |
825 | } | 823 | } |
@@ -837,7 +835,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr, | |||
837 | 835 | ||
838 | e = container_of(kobj, struct elevator_queue, kobj); | 836 | e = container_of(kobj, struct elevator_queue, kobj); |
839 | mutex_lock(&e->sysfs_lock); | 837 | mutex_lock(&e->sysfs_lock); |
840 | error = e->ops ? entry->store(e, page, length) : -ENOENT; | 838 | error = e->type ? entry->store(e, page, length) : -ENOENT; |
841 | mutex_unlock(&e->sysfs_lock); | 839 | mutex_unlock(&e->sysfs_lock); |
842 | return error; | 840 | return error; |
843 | } | 841 | } |
@@ -858,7 +856,7 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e) | |||
858 | 856 | ||
859 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); | 857 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
860 | if (!error) { | 858 | if (!error) { |
861 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; | 859 | struct elv_fs_entry *attr = e->type->elevator_attrs; |
862 | if (attr) { | 860 | if (attr) { |
863 | while (attr->attr.name) { | 861 | while (attr->attr.name) { |
864 | if (sysfs_create_file(&e->kobj, &attr->attr)) | 862 | if (sysfs_create_file(&e->kobj, &attr->attr)) |
@@ -959,7 +957,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
959 | elevator_exit(old_elevator); | 957 | elevator_exit(old_elevator); |
960 | elv_quiesce_end(q); | 958 | elv_quiesce_end(q); |
961 | 959 | ||
962 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 960 | blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name); |
963 | 961 | ||
964 | return 0; | 962 | return 0; |
965 | 963 | ||
@@ -993,7 +991,7 @@ int elevator_change(struct request_queue *q, const char *name) | |||
993 | return -EINVAL; | 991 | return -EINVAL; |
994 | } | 992 | } |
995 | 993 | ||
996 | if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { | 994 | if (!strcmp(elevator_name, q->elevator->type->elevator_name)) { |
997 | elevator_put(e); | 995 | elevator_put(e); |
998 | return 0; | 996 | return 0; |
999 | } | 997 | } |
@@ -1028,7 +1026,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name) | |||
1028 | if (!q->elevator || !blk_queue_stackable(q)) | 1026 | if (!q->elevator || !blk_queue_stackable(q)) |
1029 | return sprintf(name, "none\n"); | 1027 | return sprintf(name, "none\n"); |
1030 | 1028 | ||
1031 | elv = e->elevator_type; | 1029 | elv = e->type; |
1032 | 1030 | ||
1033 | spin_lock(&elv_list_lock); | 1031 | spin_lock(&elv_list_lock); |
1034 | list_for_each_entry(__e, &elv_list, list) { | 1032 | list_for_each_entry(__e, &elv_list, list) { |