diff options
author | Akinobu Mita <mita@miraclelinux.com> | 2006-04-24 15:12:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-06-23 11:10:38 -0400 |
commit | bae386f7884aa3720cc7880b36a41a1d2b9c327b (patch) | |
tree | e6ea0627160dc009cd825e44489583028c83791f /block/deadline-iosched.c | |
parent | 199f4c9f76fd8b030405abddf294e771f888de03 (diff) |
[PATCH] iosched: use hlist for request hashtable
Use hlist instead of list_head for request hashtable in deadline-iosched
and as-iosched. It also can remove the flag to know hashed or unhashed.
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
block/as-iosched.c | 45 +++++++++++++++++++--------------------------
block/deadline-iosched.c | 39 ++++++++++++++++-----------------------
2 files changed, 35 insertions(+), 49 deletions(-)
Diffstat (limited to 'block/deadline-iosched.c')
-rw-r--r-- | block/deadline-iosched.c | 39 |
1 files changed, 16 insertions, 23 deletions
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c94de8e12fbf..e5bccaaed563 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
@@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5; | |||
30 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) | 30 | #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) |
31 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) | 31 | #define DL_HASH_ENTRIES (1 << deadline_hash_shift) |
32 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 32 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
33 | #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) | 33 | #define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash)) |
34 | #define ON_HASH(drq) (drq)->on_hash | ||
35 | 34 | ||
36 | struct deadline_data { | 35 | struct deadline_data { |
37 | /* | 36 | /* |
@@ -48,7 +47,7 @@ struct deadline_data { | |||
48 | * next in sort order. read, write or both are NULL | 47 | * next in sort order. read, write or both are NULL |
49 | */ | 48 | */ |
50 | struct deadline_rq *next_drq[2]; | 49 | struct deadline_rq *next_drq[2]; |
51 | struct list_head *hash; /* request hash */ | 50 | struct hlist_head *hash; /* request hash */ |
52 | unsigned int batching; /* number of sequential requests made */ | 51 | unsigned int batching; /* number of sequential requests made */ |
53 | sector_t last_sector; /* head position */ | 52 | sector_t last_sector; /* head position */ |
54 | unsigned int starved; /* times reads have starved writes */ | 53 | unsigned int starved; /* times reads have starved writes */ |
@@ -79,8 +78,7 @@ struct deadline_rq { | |||
79 | /* | 78 | /* |
80 | * request hash, key is the ending offset (for back merge lookup) | 79 | * request hash, key is the ending offset (for back merge lookup) |
81 | */ | 80 | */ |
82 | struct list_head hash; | 81 | struct hlist_node hash; |
83 | char on_hash; | ||
84 | 82 | ||
85 | /* | 83 | /* |
86 | * expire fifo | 84 | * expire fifo |
@@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool; | |||
100 | */ | 98 | */ |
101 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) | 99 | static inline void __deadline_del_drq_hash(struct deadline_rq *drq) |
102 | { | 100 | { |
103 | drq->on_hash = 0; | 101 | hlist_del_init(&drq->hash); |
104 | list_del_init(&drq->hash); | ||
105 | } | 102 | } |
106 | 103 | ||
107 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) | 104 | static inline void deadline_del_drq_hash(struct deadline_rq *drq) |
@@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | |||
117 | 114 | ||
118 | BUG_ON(ON_HASH(drq)); | 115 | BUG_ON(ON_HASH(drq)); |
119 | 116 | ||
120 | drq->on_hash = 1; | 117 | hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); |
121 | list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); | ||
122 | } | 118 | } |
123 | 119 | ||
124 | /* | 120 | /* |
@@ -128,26 +124,24 @@ static inline void | |||
128 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) | 124 | deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) |
129 | { | 125 | { |
130 | struct request *rq = drq->request; | 126 | struct request *rq = drq->request; |
131 | struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; | 127 | struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; |
132 | 128 | ||
133 | if (ON_HASH(drq) && drq->hash.prev != head) { | 129 | if (ON_HASH(drq) && &drq->hash != head->first) { |
134 | list_del(&drq->hash); | 130 | hlist_del(&drq->hash); |
135 | list_add(&drq->hash, head); | 131 | hlist_add_head(&drq->hash, head); |
136 | } | 132 | } |
137 | } | 133 | } |
138 | 134 | ||
139 | static struct request * | 135 | static struct request * |
140 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) | 136 | deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) |
141 | { | 137 | { |
142 | struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; | 138 | struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; |
143 | struct list_head *entry, *next = hash_list->next; | 139 | struct hlist_node *entry, *next; |
140 | struct deadline_rq *drq; | ||
144 | 141 | ||
145 | while ((entry = next) != hash_list) { | 142 | hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) { |
146 | struct deadline_rq *drq = list_entry_hash(entry); | ||
147 | struct request *__rq = drq->request; | 143 | struct request *__rq = drq->request; |
148 | 144 | ||
149 | next = entry->next; | ||
150 | |||
151 | BUG_ON(!ON_HASH(drq)); | 145 | BUG_ON(!ON_HASH(drq)); |
152 | 146 | ||
153 | if (!rq_mergeable(__rq)) { | 147 | if (!rq_mergeable(__rq)) { |
@@ -625,7 +619,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
625 | return NULL; | 619 | return NULL; |
626 | memset(dd, 0, sizeof(*dd)); | 620 | memset(dd, 0, sizeof(*dd)); |
627 | 621 | ||
628 | dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, | 622 | dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES, |
629 | GFP_KERNEL, q->node); | 623 | GFP_KERNEL, q->node); |
630 | if (!dd->hash) { | 624 | if (!dd->hash) { |
631 | kfree(dd); | 625 | kfree(dd); |
@@ -641,7 +635,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
641 | } | 635 | } |
642 | 636 | ||
643 | for (i = 0; i < DL_HASH_ENTRIES; i++) | 637 | for (i = 0; i < DL_HASH_ENTRIES; i++) |
644 | INIT_LIST_HEAD(&dd->hash[i]); | 638 | INIT_HLIST_HEAD(&dd->hash[i]); |
645 | 639 | ||
646 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | 640 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
647 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 641 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
@@ -677,8 +671,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
677 | RB_CLEAR(&drq->rb_node); | 671 | RB_CLEAR(&drq->rb_node); |
678 | drq->request = rq; | 672 | drq->request = rq; |
679 | 673 | ||
680 | INIT_LIST_HEAD(&drq->hash); | 674 | INIT_HLIST_NODE(&drq->hash); |
681 | drq->on_hash = 0; | ||
682 | 675 | ||
683 | INIT_LIST_HEAD(&drq->fifo); | 676 | INIT_LIST_HEAD(&drq->fifo); |
684 | 677 | ||