aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAkinobu Mita <mita@miraclelinux.com>2006-04-24 15:12:59 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-06-23 11:10:38 -0400
commitbae386f7884aa3720cc7880b36a41a1d2b9c327b (patch)
treee6ea0627160dc009cd825e44489583028c83791f
parent199f4c9f76fd8b030405abddf294e771f888de03 (diff)
[PATCH] iosched: use hlist for request hashtable
Use hlist instead of list_head for request hashtable in deadline-iosched and as-iosched. It also can remove the flag to know hashed or unhashed. Signed-off-by: Akinobu Mita <mita@miraclelinux.com> Signed-off-by: Jens Axboe <axboe@suse.de> block/as-iosched.c | 45 +++++++++++++++++++-------------------------- block/deadline-iosched.c | 39 ++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 49 deletions(-)
-rw-r--r--block/as-iosched.c45
-rw-r--r--block/deadline-iosched.c39
2 files changed, 35 insertions, 49 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0c750393be4..9b13d72ffef 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -96,7 +96,7 @@ struct as_data {
96 96
97 struct as_rq *next_arq[2]; /* next in sort order */ 97 struct as_rq *next_arq[2]; /* next in sort order */
98 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 98 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
99 struct list_head *hash; /* request hash */ 99 struct hlist_head *hash; /* request hash */
100 100
101 unsigned long exit_prob; /* probability a task will exit while 101 unsigned long exit_prob; /* probability a task will exit while
102 being waited on */ 102 being waited on */
@@ -165,8 +165,7 @@ struct as_rq {
165 /* 165 /*
166 * request hash, key is the ending offset (for back merge lookup) 166 * request hash, key is the ending offset (for back merge lookup)
167 */ 167 */
168 struct list_head hash; 168 struct hlist_node hash;
169 unsigned int on_hash;
170 169
171 /* 170 /*
172 * expire fifo 171 * expire fifo
@@ -282,17 +281,15 @@ static const int as_hash_shift = 6;
282#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) 281#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
283#define AS_HASH_ENTRIES (1 << as_hash_shift) 282#define AS_HASH_ENTRIES (1 << as_hash_shift)
284#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 283#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
285#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
286 284
287static inline void __as_del_arq_hash(struct as_rq *arq) 285static inline void __as_del_arq_hash(struct as_rq *arq)
288{ 286{
289 arq->on_hash = 0; 287 hlist_del_init(&arq->hash);
290 list_del_init(&arq->hash);
291} 288}
292 289
293static inline void as_del_arq_hash(struct as_rq *arq) 290static inline void as_del_arq_hash(struct as_rq *arq)
294{ 291{
295 if (arq->on_hash) 292 if (!hlist_unhashed(&arq->hash))
296 __as_del_arq_hash(arq); 293 __as_del_arq_hash(arq);
297} 294}
298 295
@@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
300{ 297{
301 struct request *rq = arq->request; 298 struct request *rq = arq->request;
302 299
303 BUG_ON(arq->on_hash); 300 BUG_ON(!hlist_unhashed(&arq->hash));
304 301
305 arq->on_hash = 1; 302 hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
306 list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
307} 303}
308 304
309/* 305/*
@@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
312static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) 308static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
313{ 309{
314 struct request *rq = arq->request; 310 struct request *rq = arq->request;
315 struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; 311 struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
316 312
317 if (!arq->on_hash) { 313 if (hlist_unhashed(&arq->hash)) {
318 WARN_ON(1); 314 WARN_ON(1);
319 return; 315 return;
320 } 316 }
321 317
322 if (arq->hash.prev != head) { 318 if (&arq->hash != head->first) {
323 list_del(&arq->hash); 319 hlist_del(&arq->hash);
324 list_add(&arq->hash, head); 320 hlist_add_head(&arq->hash, head);
325 } 321 }
326} 322}
327 323
328static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) 324static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
329{ 325{
330 struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; 326 struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
331 struct list_head *entry, *next = hash_list->next; 327 struct hlist_node *entry, *next;
328 struct as_rq *arq;
332 329
333 while ((entry = next) != hash_list) { 330 hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
334 struct as_rq *arq = list_entry_hash(entry);
335 struct request *__rq = arq->request; 331 struct request *__rq = arq->request;
336 332
337 next = entry->next; 333 BUG_ON(hlist_unhashed(&arq->hash));
338
339 BUG_ON(!arq->on_hash);
340 334
341 if (!rq_mergeable(__rq)) { 335 if (!rq_mergeable(__rq)) {
342 as_del_arq_hash(arq); 336 as_del_arq_hash(arq);
@@ -1601,8 +1595,7 @@ static int as_set_request(request_queue_t *q, struct request *rq,
1601 arq->request = rq; 1595 arq->request = rq;
1602 arq->state = AS_RQ_PRESCHED; 1596 arq->state = AS_RQ_PRESCHED;
1603 arq->io_context = NULL; 1597 arq->io_context = NULL;
1604 INIT_LIST_HEAD(&arq->hash); 1598 INIT_HLIST_NODE(&arq->hash);
1605 arq->on_hash = 0;
1606 INIT_LIST_HEAD(&arq->fifo); 1599 INIT_LIST_HEAD(&arq->fifo);
1607 rq->elevator_private = arq; 1600 rq->elevator_private = arq;
1608 return 0; 1601 return 0;
@@ -1662,7 +1655,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1662 1655
1663 ad->q = q; /* Identify what queue the data belongs to */ 1656 ad->q = q; /* Identify what queue the data belongs to */
1664 1657
1665 ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, 1658 ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
1666 GFP_KERNEL, q->node); 1659 GFP_KERNEL, q->node);
1667 if (!ad->hash) { 1660 if (!ad->hash) {
1668 kfree(ad); 1661 kfree(ad);
@@ -1684,7 +1677,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
1684 INIT_WORK(&ad->antic_work, as_work_handler, q); 1677 INIT_WORK(&ad->antic_work, as_work_handler, q);
1685 1678
1686 for (i = 0; i < AS_HASH_ENTRIES; i++) 1679 for (i = 0; i < AS_HASH_ENTRIES; i++)
1687 INIT_LIST_HEAD(&ad->hash[i]); 1680 INIT_HLIST_HEAD(&ad->hash[i]);
1688 1681
1689 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1682 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1690 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1683 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c94de8e12fb..e5bccaaed56 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5;
30#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift)) 30#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31#define DL_HASH_ENTRIES (1 << deadline_hash_shift) 31#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
32#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 32#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
33#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) 33#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
34#define ON_HASH(drq) (drq)->on_hash
35 34
36struct deadline_data { 35struct deadline_data {
37 /* 36 /*
@@ -48,7 +47,7 @@ struct deadline_data {
48 * next in sort order. read, write or both are NULL 47 * next in sort order. read, write or both are NULL
49 */ 48 */
50 struct deadline_rq *next_drq[2]; 49 struct deadline_rq *next_drq[2];
51 struct list_head *hash; /* request hash */ 50 struct hlist_head *hash; /* request hash */
52 unsigned int batching; /* number of sequential requests made */ 51 unsigned int batching; /* number of sequential requests made */
53 sector_t last_sector; /* head position */ 52 sector_t last_sector; /* head position */
54 unsigned int starved; /* times reads have starved writes */ 53 unsigned int starved; /* times reads have starved writes */
@@ -79,8 +78,7 @@ struct deadline_rq {
79 /* 78 /*
80 * request hash, key is the ending offset (for back merge lookup) 79 * request hash, key is the ending offset (for back merge lookup)
81 */ 80 */
82 struct list_head hash; 81 struct hlist_node hash;
83 char on_hash;
84 82
85 /* 83 /*
86 * expire fifo 84 * expire fifo
@@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool;
100 */ 98 */
101static inline void __deadline_del_drq_hash(struct deadline_rq *drq) 99static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
102{ 100{
103 drq->on_hash = 0; 101 hlist_del_init(&drq->hash);
104 list_del_init(&drq->hash);
105} 102}
106 103
107static inline void deadline_del_drq_hash(struct deadline_rq *drq) 104static inline void deadline_del_drq_hash(struct deadline_rq *drq)
@@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
117 114
118 BUG_ON(ON_HASH(drq)); 115 BUG_ON(ON_HASH(drq));
119 116
120 drq->on_hash = 1; 117 hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
121 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
122} 118}
123 119
124/* 120/*
@@ -128,26 +124,24 @@ static inline void
128deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) 124deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
129{ 125{
130 struct request *rq = drq->request; 126 struct request *rq = drq->request;
131 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))]; 127 struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
132 128
133 if (ON_HASH(drq) && drq->hash.prev != head) { 129 if (ON_HASH(drq) && &drq->hash != head->first) {
134 list_del(&drq->hash); 130 hlist_del(&drq->hash);
135 list_add(&drq->hash, head); 131 hlist_add_head(&drq->hash, head);
136 } 132 }
137} 133}
138 134
139static struct request * 135static struct request *
140deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) 136deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
141{ 137{
142 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)]; 138 struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
143 struct list_head *entry, *next = hash_list->next; 139 struct hlist_node *entry, *next;
140 struct deadline_rq *drq;
144 141
145 while ((entry = next) != hash_list) { 142 hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
146 struct deadline_rq *drq = list_entry_hash(entry);
147 struct request *__rq = drq->request; 143 struct request *__rq = drq->request;
148 144
149 next = entry->next;
150
151 BUG_ON(!ON_HASH(drq)); 145 BUG_ON(!ON_HASH(drq));
152 146
153 if (!rq_mergeable(__rq)) { 147 if (!rq_mergeable(__rq)) {
@@ -625,7 +619,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
625 return NULL; 619 return NULL;
626 memset(dd, 0, sizeof(*dd)); 620 memset(dd, 0, sizeof(*dd));
627 621
628 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, 622 dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
629 GFP_KERNEL, q->node); 623 GFP_KERNEL, q->node);
630 if (!dd->hash) { 624 if (!dd->hash) {
631 kfree(dd); 625 kfree(dd);
@@ -641,7 +635,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
641 } 635 }
642 636
643 for (i = 0; i < DL_HASH_ENTRIES; i++) 637 for (i = 0; i < DL_HASH_ENTRIES; i++)
644 INIT_LIST_HEAD(&dd->hash[i]); 638 INIT_HLIST_HEAD(&dd->hash[i]);
645 639
646 INIT_LIST_HEAD(&dd->fifo_list[READ]); 640 INIT_LIST_HEAD(&dd->fifo_list[READ]);
647 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 641 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -677,8 +671,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
677 RB_CLEAR(&drq->rb_node); 671 RB_CLEAR(&drq->rb_node);
678 drq->request = rq; 672 drq->request = rq;
679 673
680 INIT_LIST_HEAD(&drq->hash); 674 INIT_HLIST_NODE(&drq->hash);
681 drq->on_hash = 0;
682 675
683 INIT_LIST_HEAD(&drq->fifo); 676 INIT_LIST_HEAD(&drq->fifo);
684 677