aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2012-12-17 10:01:27 -0500
committerJens Axboe <axboe@kernel.dk>2013-01-11 08:43:13 -0500
commit242d98f077ac0ab80920219769eb095503b93f61 (patch)
treedf4bd59a6db181b796ff8687339e4335e2c59104 /block
parent9931faca02c604c22335f5a935a501bb2ace6e20 (diff)
block,elevator: use new hashtable implementation
Switch elevator to use the new hashtable implementation. This reduces the amount of generic unrelated code in the elevator. This also removes the dymanic allocation of the hash table. The size of the table is constant so there's no point in paying the price of an extra dereference when accessing it. This patch depends on d9b482c ("hashtable: introduce a small and naive hashtable") which was merged in v3.6. Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c23
2 files changed, 5 insertions, 20 deletions
diff --git a/block/blk.h b/block/blk.h
index 47fdfdd41520..e837b8f619b7 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -61,7 +61,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
61/* 61/*
62 * Internal elevator interface 62 * Internal elevator interface
63 */ 63 */
64#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 64#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
65 65
66void blk_insert_flush(struct request *rq); 66void blk_insert_flush(struct request *rq);
67void blk_abort_flushes(struct request_queue *q); 67void blk_abort_flushes(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 9edba1b8323e..11683bb10b7b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -46,11 +46,6 @@ static LIST_HEAD(elv_list);
46/* 46/*
47 * Merge hash stuff. 47 * Merge hash stuff.
48 */ 48 */
49static const int elv_hash_shift = 6;
50#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
51#define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 49#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55 50
56/* 51/*
@@ -142,7 +137,6 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
142 struct elevator_type *e) 137 struct elevator_type *e)
143{ 138{
144 struct elevator_queue *eq; 139 struct elevator_queue *eq;
145 int i;
146 140
147 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); 141 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
148 if (unlikely(!eq)) 142 if (unlikely(!eq))
@@ -151,14 +145,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q,
151 eq->type = e; 145 eq->type = e;
152 kobject_init(&eq->kobj, &elv_ktype); 146 kobject_init(&eq->kobj, &elv_ktype);
153 mutex_init(&eq->sysfs_lock); 147 mutex_init(&eq->sysfs_lock);
154 148 hash_init(eq->hash);
155 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
156 GFP_KERNEL, q->node);
157 if (!eq->hash)
158 goto err;
159
160 for (i = 0; i < ELV_HASH_ENTRIES; i++)
161 INIT_HLIST_HEAD(&eq->hash[i]);
162 149
163 return eq; 150 return eq;
164err: 151err:
@@ -173,7 +160,6 @@ static void elevator_release(struct kobject *kobj)
173 160
174 e = container_of(kobj, struct elevator_queue, kobj); 161 e = container_of(kobj, struct elevator_queue, kobj);
175 elevator_put(e->type); 162 elevator_put(e->type);
176 kfree(e->hash);
177 kfree(e); 163 kfree(e);
178} 164}
179 165
@@ -240,7 +226,7 @@ EXPORT_SYMBOL(elevator_exit);
240 226
241static inline void __elv_rqhash_del(struct request *rq) 227static inline void __elv_rqhash_del(struct request *rq)
242{ 228{
243 hlist_del_init(&rq->hash); 229 hash_del(&rq->hash);
244} 230}
245 231
246static void elv_rqhash_del(struct request_queue *q, struct request *rq) 232static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -254,7 +240,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
254 struct elevator_queue *e = q->elevator; 240 struct elevator_queue *e = q->elevator;
255 241
256 BUG_ON(ELV_ON_HASH(rq)); 242 BUG_ON(ELV_ON_HASH(rq));
257 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 243 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
258} 244}
259 245
260static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 246static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -266,11 +252,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
266static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 252static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
267{ 253{
268 struct elevator_queue *e = q->elevator; 254 struct elevator_queue *e = q->elevator;
269 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
270 struct hlist_node *entry, *next; 255 struct hlist_node *entry, *next;
271 struct request *rq; 256 struct request *rq;
272 257
273 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { 258 hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) {
274 BUG_ON(!ELV_ON_HASH(rq)); 259 BUG_ON(!ELV_ON_HASH(rq));
275 260
276 if (unlikely(!rq_mergeable(rq))) { 261 if (unlikely(!rq_mergeable(rq))) {