diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 15:52:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-28 15:52:24 -0500 |
commit | ee89f81252179dcbf6cd65bd48299f5e52292d88 (patch) | |
tree | 805846cd12821f84cfe619d44c9e3e36e0b0f9e6 /block/elevator.c | |
parent | 21f3b24da9328415792efc780f50b9f434c12465 (diff) | |
parent | de33127d8d3f1d570aad8c2223cd81b206636bc1 (diff) |
Merge branch 'for-3.9/core' of git://git.kernel.dk/linux-block
Pull block IO core bits from Jens Axboe:
"Below are the core block IO bits for 3.9. It was delayed a few days
since my workstation kept crashing every 2-8h after pulling it into
current -git, but turns out it is a bug in the new pstate code (divide
by zero, will report separately). In any case, it contains:
- The big cfq/blkcg update from Tejun and and Vivek.
- Additional block and writeback tracepoints from Tejun.
- Improvement of the should sort (based on queues) logic in the plug
flushing.
- _io() variants of the wait_for_completion() interface, using
io_schedule() instead of schedule() to contribute to io wait
properly.
- Various little fixes.
You'll get two trivial merge conflicts, which should be easy enough to
fix up"
Fix up the trivial conflicts due to hlist traversal cleanups (commit
b67bfe0d42ca: "hlist: drop the node parameter from iterators").
* 'for-3.9/core' of git://git.kernel.dk/linux-block: (39 commits)
block: remove redundant check to bd_openers()
block: use i_size_write() in bd_set_size()
cfq: fix lock imbalance with failed allocations
drivers/block/swim3.c: fix null pointer dereference
block: don't select PERCPU_RWSEM
block: account iowait time when waiting for completion of IO request
sched: add wait_for_completion_io[_timeout]
writeback: add more tracepoints
block: add block_{touch|dirty}_buffer tracepoint
buffer: make touch_buffer() an exported function
block: add @req to bio_{front|back}_merge tracepoints
block: add missing block_bio_complete() tracepoint
block: Remove should_sort judgement when flush blk_plug
block,elevator: use new hashtable implementation
cfq-iosched: add hierarchical cfq_group statistics
cfq-iosched: collect stats from dead cfqgs
cfq-iosched: separate out cfqg_stats_reset() from cfq_pd_reset_stats()
blkcg: make blkcg_print_blkgs() grab q locks instead of blkcg lock
block: RCU free request_queue
blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge()
...
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 23 |
1 files changed, 4 insertions, 19 deletions
diff --git a/block/elevator.c b/block/elevator.c index d0acb31cc083..a0ffdd943c98 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -46,11 +46,6 @@ static LIST_HEAD(elv_list); | |||
46 | /* | 46 | /* |
47 | * Merge hash stuff. | 47 | * Merge hash stuff. |
48 | */ | 48 | */ |
49 | static const int elv_hash_shift = 6; | ||
50 | #define ELV_HASH_BLOCK(sec) ((sec) >> 3) | ||
51 | #define ELV_HASH_FN(sec) \ | ||
52 | (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) | ||
53 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) | ||
54 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) | 49 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
55 | 50 | ||
56 | /* | 51 | /* |
@@ -158,7 +153,6 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
158 | struct elevator_type *e) | 153 | struct elevator_type *e) |
159 | { | 154 | { |
160 | struct elevator_queue *eq; | 155 | struct elevator_queue *eq; |
161 | int i; | ||
162 | 156 | ||
163 | eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); | 157 | eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
164 | if (unlikely(!eq)) | 158 | if (unlikely(!eq)) |
@@ -167,14 +161,7 @@ static struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
167 | eq->type = e; | 161 | eq->type = e; |
168 | kobject_init(&eq->kobj, &elv_ktype); | 162 | kobject_init(&eq->kobj, &elv_ktype); |
169 | mutex_init(&eq->sysfs_lock); | 163 | mutex_init(&eq->sysfs_lock); |
170 | 164 | hash_init(eq->hash); | |
171 | eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, | ||
172 | GFP_KERNEL, q->node); | ||
173 | if (!eq->hash) | ||
174 | goto err; | ||
175 | |||
176 | for (i = 0; i < ELV_HASH_ENTRIES; i++) | ||
177 | INIT_HLIST_HEAD(&eq->hash[i]); | ||
178 | 165 | ||
179 | return eq; | 166 | return eq; |
180 | err: | 167 | err: |
@@ -189,7 +176,6 @@ static void elevator_release(struct kobject *kobj) | |||
189 | 176 | ||
190 | e = container_of(kobj, struct elevator_queue, kobj); | 177 | e = container_of(kobj, struct elevator_queue, kobj); |
191 | elevator_put(e->type); | 178 | elevator_put(e->type); |
192 | kfree(e->hash); | ||
193 | kfree(e); | 179 | kfree(e); |
194 | } | 180 | } |
195 | 181 | ||
@@ -261,7 +247,7 @@ EXPORT_SYMBOL(elevator_exit); | |||
261 | 247 | ||
262 | static inline void __elv_rqhash_del(struct request *rq) | 248 | static inline void __elv_rqhash_del(struct request *rq) |
263 | { | 249 | { |
264 | hlist_del_init(&rq->hash); | 250 | hash_del(&rq->hash); |
265 | } | 251 | } |
266 | 252 | ||
267 | static void elv_rqhash_del(struct request_queue *q, struct request *rq) | 253 | static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
@@ -275,7 +261,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq) | |||
275 | struct elevator_queue *e = q->elevator; | 261 | struct elevator_queue *e = q->elevator; |
276 | 262 | ||
277 | BUG_ON(ELV_ON_HASH(rq)); | 263 | BUG_ON(ELV_ON_HASH(rq)); |
278 | hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); | 264 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
279 | } | 265 | } |
280 | 266 | ||
281 | static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) | 267 | static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
@@ -287,11 +273,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) | |||
287 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) | 273 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
288 | { | 274 | { |
289 | struct elevator_queue *e = q->elevator; | 275 | struct elevator_queue *e = q->elevator; |
290 | struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; | ||
291 | struct hlist_node *next; | 276 | struct hlist_node *next; |
292 | struct request *rq; | 277 | struct request *rq; |
293 | 278 | ||
294 | hlist_for_each_entry_safe(rq, next, hash_list, hash) { | 279 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
295 | BUG_ON(!ELV_ON_HASH(rq)); | 280 | BUG_ON(!ELV_ON_HASH(rq)); |
296 | 281 | ||
297 | if (unlikely(!rq_mergeable(rq))) { | 282 | if (unlikely(!rq_mergeable(rq))) { |