diff options
author | Akinobu Mita <mita@miraclelinux.com> | 2006-04-24 15:12:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-06-23 11:10:38 -0400 |
commit | bae386f7884aa3720cc7880b36a41a1d2b9c327b (patch) | |
tree | e6ea0627160dc009cd825e44489583028c83791f /block/as-iosched.c | |
parent | 199f4c9f76fd8b030405abddf294e771f888de03 (diff) |
[PATCH] iosched: use hlist for request hashtable
Use hlist instead of list_head for request hashtable in deadline-iosched
and as-iosched. It also can remove the flag to know hashed or unhashed.
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
block/as-iosched.c | 45 +++++++++++++++++++--------------------------
block/deadline-iosched.c | 39 ++++++++++++++++-----------------------
2 files changed, 35 insertions(+), 49 deletions(-)
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r-- | block/as-iosched.c | 45 |
1 files changed, 19 insertions, 26 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 0c750393be4a..9b13d72ffefa 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -96,7 +96,7 @@ struct as_data { | |||
96 | 96 | ||
97 | struct as_rq *next_arq[2]; /* next in sort order */ | 97 | struct as_rq *next_arq[2]; /* next in sort order */ |
98 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ | 98 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ |
99 | struct list_head *hash; /* request hash */ | 99 | struct hlist_head *hash; /* request hash */ |
100 | 100 | ||
101 | unsigned long exit_prob; /* probability a task will exit while | 101 | unsigned long exit_prob; /* probability a task will exit while |
102 | being waited on */ | 102 | being waited on */ |
@@ -165,8 +165,7 @@ struct as_rq { | |||
165 | /* | 165 | /* |
166 | * request hash, key is the ending offset (for back merge lookup) | 166 | * request hash, key is the ending offset (for back merge lookup) |
167 | */ | 167 | */ |
168 | struct list_head hash; | 168 | struct hlist_node hash; |
169 | unsigned int on_hash; | ||
170 | 169 | ||
171 | /* | 170 | /* |
172 | * expire fifo | 171 | * expire fifo |
@@ -282,17 +281,15 @@ static const int as_hash_shift = 6; | |||
282 | #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) | 281 | #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) |
283 | #define AS_HASH_ENTRIES (1 << as_hash_shift) | 282 | #define AS_HASH_ENTRIES (1 << as_hash_shift) |
284 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) | 283 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
285 | #define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash) | ||
286 | 284 | ||
287 | static inline void __as_del_arq_hash(struct as_rq *arq) | 285 | static inline void __as_del_arq_hash(struct as_rq *arq) |
288 | { | 286 | { |
289 | arq->on_hash = 0; | 287 | hlist_del_init(&arq->hash); |
290 | list_del_init(&arq->hash); | ||
291 | } | 288 | } |
292 | 289 | ||
293 | static inline void as_del_arq_hash(struct as_rq *arq) | 290 | static inline void as_del_arq_hash(struct as_rq *arq) |
294 | { | 291 | { |
295 | if (arq->on_hash) | 292 | if (!hlist_unhashed(&arq->hash)) |
296 | __as_del_arq_hash(arq); | 293 | __as_del_arq_hash(arq); |
297 | } | 294 | } |
298 | 295 | ||
@@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) | |||
300 | { | 297 | { |
301 | struct request *rq = arq->request; | 298 | struct request *rq = arq->request; |
302 | 299 | ||
303 | BUG_ON(arq->on_hash); | 300 | BUG_ON(!hlist_unhashed(&arq->hash)); |
304 | 301 | ||
305 | arq->on_hash = 1; | 302 | hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); |
306 | list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); | ||
307 | } | 303 | } |
308 | 304 | ||
309 | /* | 305 | /* |
@@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) | |||
312 | static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) | 308 | static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) |
313 | { | 309 | { |
314 | struct request *rq = arq->request; | 310 | struct request *rq = arq->request; |
315 | struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; | 311 | struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; |
316 | 312 | ||
317 | if (!arq->on_hash) { | 313 | if (hlist_unhashed(&arq->hash)) { |
318 | WARN_ON(1); | 314 | WARN_ON(1); |
319 | return; | 315 | return; |
320 | } | 316 | } |
321 | 317 | ||
322 | if (arq->hash.prev != head) { | 318 | if (&arq->hash != head->first) { |
323 | list_del(&arq->hash); | 319 | hlist_del(&arq->hash); |
324 | list_add(&arq->hash, head); | 320 | hlist_add_head(&arq->hash, head); |
325 | } | 321 | } |
326 | } | 322 | } |
327 | 323 | ||
328 | static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) | 324 | static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) |
329 | { | 325 | { |
330 | struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; | 326 | struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; |
331 | struct list_head *entry, *next = hash_list->next; | 327 | struct hlist_node *entry, *next; |
328 | struct as_rq *arq; | ||
332 | 329 | ||
333 | while ((entry = next) != hash_list) { | 330 | hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) { |
334 | struct as_rq *arq = list_entry_hash(entry); | ||
335 | struct request *__rq = arq->request; | 331 | struct request *__rq = arq->request; |
336 | 332 | ||
337 | next = entry->next; | 333 | BUG_ON(hlist_unhashed(&arq->hash)); |
338 | |||
339 | BUG_ON(!arq->on_hash); | ||
340 | 334 | ||
341 | if (!rq_mergeable(__rq)) { | 335 | if (!rq_mergeable(__rq)) { |
342 | as_del_arq_hash(arq); | 336 | as_del_arq_hash(arq); |
@@ -1601,8 +1595,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, | |||
1601 | arq->request = rq; | 1595 | arq->request = rq; |
1602 | arq->state = AS_RQ_PRESCHED; | 1596 | arq->state = AS_RQ_PRESCHED; |
1603 | arq->io_context = NULL; | 1597 | arq->io_context = NULL; |
1604 | INIT_LIST_HEAD(&arq->hash); | 1598 | INIT_HLIST_NODE(&arq->hash); |
1605 | arq->on_hash = 0; | ||
1606 | INIT_LIST_HEAD(&arq->fifo); | 1599 | INIT_LIST_HEAD(&arq->fifo); |
1607 | rq->elevator_private = arq; | 1600 | rq->elevator_private = arq; |
1608 | return 0; | 1601 | return 0; |
@@ -1662,7 +1655,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) | |||
1662 | 1655 | ||
1663 | ad->q = q; /* Identify what queue the data belongs to */ | 1656 | ad->q = q; /* Identify what queue the data belongs to */ |
1664 | 1657 | ||
1665 | ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, | 1658 | ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES, |
1666 | GFP_KERNEL, q->node); | 1659 | GFP_KERNEL, q->node); |
1667 | if (!ad->hash) { | 1660 | if (!ad->hash) { |
1668 | kfree(ad); | 1661 | kfree(ad); |
@@ -1684,7 +1677,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e) | |||
1684 | INIT_WORK(&ad->antic_work, as_work_handler, q); | 1677 | INIT_WORK(&ad->antic_work, as_work_handler, q); |
1685 | 1678 | ||
1686 | for (i = 0; i < AS_HASH_ENTRIES; i++) | 1679 | for (i = 0; i < AS_HASH_ENTRIES; i++) |
1687 | INIT_LIST_HEAD(&ad->hash[i]); | 1680 | INIT_HLIST_HEAD(&ad->hash[i]); |
1688 | 1681 | ||
1689 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); | 1682 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); |
1690 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 1683 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |