aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c8
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-cgroup.h21
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-flush.c38
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/blk-mq-tag.c59
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c13
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c22
11 files changed, 75 insertions, 111 deletions
diff --git a/block/bio.c b/block/bio.c
index 8c2e55e39a1b..0ec61c9e536c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
746 746
747 goto done; 747 goto done;
748 } 748 }
749
750 /*
751 * If the queue doesn't support SG gaps and adding this
752 * offset would create a gap, disallow it.
753 */
754 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
755 bvec_gap_to_prev(prev, offset))
756 return 0;
749 } 757 }
750 758
751 if (bio->bi_vcnt >= bio->bi_max_vecs) 759 if (bio->bi_vcnt >= bio->bi_max_vecs)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 069bc202ffe3..b9f4cc494ece 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
80 blkg->q = q; 80 blkg->q = q;
81 INIT_LIST_HEAD(&blkg->q_node); 81 INIT_LIST_HEAD(&blkg->q_node);
82 blkg->blkcg = blkcg; 82 blkg->blkcg = blkcg;
83 blkg->refcnt = 1; 83 atomic_set(&blkg->refcnt, 1);
84 84
85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
86 if (blkcg != &blkcg_root) { 86 if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
399 399
400 /* release the blkcg and parent blkg refs this blkg has been holding */ 400 /* release the blkcg and parent blkg refs this blkg has been holding */
401 css_put(&blkg->blkcg->css); 401 css_put(&blkg->blkcg->css);
402 if (blkg->parent) { 402 if (blkg->parent)
403 spin_lock_irq(blkg->q->queue_lock);
404 blkg_put(blkg->parent); 403 blkg_put(blkg->parent);
405 spin_unlock_irq(blkg->q->queue_lock);
406 }
407 404
408 blkg_free(blkg); 405 blkg_free(blkg);
409} 406}
@@ -1093,7 +1090,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1093 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1090 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1094 * successful registration. Returns 0 on success and -errno on failure. 1091 * successful registration. Returns 0 on success and -errno on failure.
1095 */ 1092 */
1096int __init blkcg_policy_register(struct blkcg_policy *pol) 1093int blkcg_policy_register(struct blkcg_policy *pol)
1097{ 1094{
1098 int i, ret; 1095 int i, ret;
1099 1096
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index cbb7f943f78a..d3fd7aa3d2a3 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -18,6 +18,7 @@
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/atomic.h>
21 22
22/* Max limits for throttle policy */ 23/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX 24#define THROTL_IOPS_MAX UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
104 struct request_list rl; 105 struct request_list rl;
105 106
106 /* reference count */ 107 /* reference count */
107 int refcnt; 108 atomic_t refcnt;
108 109
109 /* is this blkg online? protected by both blkcg and q locks */ 110 /* is this blkg online? protected by both blkcg and q locks */
110 bool online; 111 bool online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q); 146void blkcg_exit_queue(struct request_queue *q);
146 147
147/* Blkio controller policy registration */ 148/* Blkio controller policy registration */
148int __init blkcg_policy_register(struct blkcg_policy *pol); 149int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol); 150void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q, 151int blkcg_activate_policy(struct request_queue *q,
151 const struct blkcg_policy *pol); 152 const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
257 * blkg_get - get a blkg reference 258 * blkg_get - get a blkg reference
258 * @blkg: blkg to get 259 * @blkg: blkg to get
259 * 260 *
260 * The caller should be holding queue_lock and an existing reference. 261 * The caller should be holding an existing reference.
261 */ 262 */
262static inline void blkg_get(struct blkcg_gq *blkg) 263static inline void blkg_get(struct blkcg_gq *blkg)
263{ 264{
264 lockdep_assert_held(blkg->q->queue_lock); 265 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
265 WARN_ON_ONCE(!blkg->refcnt); 266 atomic_inc(&blkg->refcnt);
266 blkg->refcnt++;
267} 267}
268 268
269void __blkg_release_rcu(struct rcu_head *rcu); 269void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
271/** 271/**
272 * blkg_put - put a blkg reference 272 * blkg_put - put a blkg reference
273 * @blkg: blkg to put 273 * @blkg: blkg to put
274 *
275 * The caller should be holding queue_lock.
276 */ 274 */
277static inline void blkg_put(struct blkcg_gq *blkg) 275static inline void blkg_put(struct blkcg_gq *blkg)
278{ 276{
279 lockdep_assert_held(blkg->q->queue_lock); 277 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
280 WARN_ON_ONCE(blkg->refcnt <= 0); 278 if (atomic_dec_and_test(&blkg->refcnt))
281 if (!--blkg->refcnt)
282 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 279 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283} 280}
284 281
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
580static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 577static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
581static inline void blkcg_drain_queue(struct request_queue *q) { } 578static inline void blkcg_drain_queue(struct request_queue *q) { }
582static inline void blkcg_exit_queue(struct request_queue *q) { } 579static inline void blkcg_exit_queue(struct request_queue *q) { }
583static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 580static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
584static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 581static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
585static inline int blkcg_activate_policy(struct request_queue *q, 582static inline int blkcg_activate_policy(struct request_queue *q,
586 const struct blkcg_policy *pol) { return 0; } 583 const struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-core.c b/block/blk-core.c
index f6f6b9af3e3f..6f8dba161bfe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3312,8 +3312,7 @@ int __init blk_dev_init(void)
3312 3312
3313 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3313 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3314 kblockd_workqueue = alloc_workqueue("kblockd", 3314 kblockd_workqueue = alloc_workqueue("kblockd",
3315 WQ_MEM_RECLAIM | WQ_HIGHPRI | 3315 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3316 WQ_POWER_EFFICIENT, 0);
3317 if (!kblockd_workqueue) 3316 if (!kblockd_workqueue)
3318 panic("Failed to create kblockd\n"); 3317 panic("Failed to create kblockd\n");
3319 3318
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 8ffee4b5f93d..3cb5e9e7108a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -422,44 +422,6 @@ void blk_insert_flush(struct request *rq)
422} 422}
423 423
424/** 424/**
425 * blk_abort_flushes - @q is being aborted, abort flush requests
426 * @q: request_queue being aborted
427 *
428 * To be called from elv_abort_queue(). @q is being aborted. Prepare all
429 * FLUSH/FUA requests for abortion.
430 *
431 * CONTEXT:
432 * spin_lock_irq(q->queue_lock)
433 */
434void blk_abort_flushes(struct request_queue *q)
435{
436 struct request *rq, *n;
437 int i;
438
439 /*
440 * Requests in flight for data are already owned by the dispatch
441 * queue or the device driver. Just restore for normal completion.
442 */
443 list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
444 list_del_init(&rq->flush.list);
445 blk_flush_restore_request(rq);
446 }
447
448 /*
449 * We need to give away requests on flush queues. Restore for
450 * normal completion and put them on the dispatch queue.
451 */
452 for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
453 list_for_each_entry_safe(rq, n, &q->flush_queue[i],
454 flush.list) {
455 list_del_init(&rq->flush.list);
456 blk_flush_restore_request(rq);
457 list_add_tail(&rq->queuelist, &q->queue_head);
458 }
459 }
460}
461
462/**
463 * blkdev_issue_flush - queue a flush 425 * blkdev_issue_flush - queue a flush
464 * @bdev: blockdev to issue flush for 426 * @bdev: blockdev to issue flush for
465 * @gfp_mask: memory allocation flags (for bio_alloc) 427 * @gfp_mask: memory allocation flags (for bio_alloc)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3bf0df0f4c2..54535831f1e1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
568 568
569bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 569bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
570{ 570{
571 struct request_queue *q = rq->q;
572
571 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 573 if (!rq_mergeable(rq) || !bio_mergeable(bio))
572 return false; 574 return false;
573 575
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
591 !blk_write_same_mergeable(rq->bio, bio)) 593 !blk_write_same_mergeable(rq->bio, bio))
592 return false; 594 return false;
593 595
596 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
597 struct bio_vec *bprev;
598
599 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
600 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
601 return false;
602 }
603
594 return true; 604 return true;
595} 605}
596 606
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 1aab39f71d95..c1b92426c95e 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -43,9 +43,16 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
43 return bt_has_free_tags(&tags->bitmap_tags); 43 return bt_has_free_tags(&tags->bitmap_tags);
44} 44}
45 45
46static inline void bt_index_inc(unsigned int *index) 46static inline int bt_index_inc(int index)
47{ 47{
48 *index = (*index + 1) & (BT_WAIT_QUEUES - 1); 48 return (index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
51static inline void bt_index_atomic_inc(atomic_t *index)
52{
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
49} 56}
50 57
51/* 58/*
@@ -69,14 +76,14 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
69 int i, wake_index; 76 int i, wake_index;
70 77
71 bt = &tags->bitmap_tags; 78 bt = &tags->bitmap_tags;
72 wake_index = bt->wake_index; 79 wake_index = atomic_read(&bt->wake_index);
73 for (i = 0; i < BT_WAIT_QUEUES; i++) { 80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
74 struct bt_wait_state *bs = &bt->bs[wake_index]; 81 struct bt_wait_state *bs = &bt->bs[wake_index];
75 82
76 if (waitqueue_active(&bs->wait)) 83 if (waitqueue_active(&bs->wait))
77 wake_up(&bs->wait); 84 wake_up(&bs->wait);
78 85
79 bt_index_inc(&wake_index); 86 wake_index = bt_index_inc(wake_index);
80 } 87 }
81} 88}
82 89
@@ -212,12 +219,14 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
212 struct blk_mq_hw_ctx *hctx) 219 struct blk_mq_hw_ctx *hctx)
213{ 220{
214 struct bt_wait_state *bs; 221 struct bt_wait_state *bs;
222 int wait_index;
215 223
216 if (!hctx) 224 if (!hctx)
217 return &bt->bs[0]; 225 return &bt->bs[0];
218 226
219 bs = &bt->bs[hctx->wait_index]; 227 wait_index = atomic_read(&hctx->wait_index);
220 bt_index_inc(&hctx->wait_index); 228 bs = &bt->bs[wait_index];
229 bt_index_atomic_inc(&hctx->wait_index);
221 return bs; 230 return bs;
222} 231}
223 232
@@ -239,18 +248,12 @@ static int bt_get(struct blk_mq_alloc_data *data,
239 248
240 bs = bt_wait_ptr(bt, hctx); 249 bs = bt_wait_ptr(bt, hctx);
241 do { 250 do {
242 bool was_empty;
243
244 was_empty = list_empty(&wait.task_list);
245 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); 251 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
246 252
247 tag = __bt_get(hctx, bt, last_tag); 253 tag = __bt_get(hctx, bt, last_tag);
248 if (tag != -1) 254 if (tag != -1)
249 break; 255 break;
250 256
251 if (was_empty)
252 atomic_set(&bs->wait_cnt, bt->wake_cnt);
253
254 blk_mq_put_ctx(data->ctx); 257 blk_mq_put_ctx(data->ctx);
255 258
256 io_schedule(); 259 io_schedule();
@@ -313,18 +316,19 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
313{ 316{
314 int i, wake_index; 317 int i, wake_index;
315 318
316 wake_index = bt->wake_index; 319 wake_index = atomic_read(&bt->wake_index);
317 for (i = 0; i < BT_WAIT_QUEUES; i++) { 320 for (i = 0; i < BT_WAIT_QUEUES; i++) {
318 struct bt_wait_state *bs = &bt->bs[wake_index]; 321 struct bt_wait_state *bs = &bt->bs[wake_index];
319 322
320 if (waitqueue_active(&bs->wait)) { 323 if (waitqueue_active(&bs->wait)) {
321 if (wake_index != bt->wake_index) 324 int o = atomic_read(&bt->wake_index);
322 bt->wake_index = wake_index; 325 if (wake_index != o)
326 atomic_cmpxchg(&bt->wake_index, o, wake_index);
323 327
324 return bs; 328 return bs;
325 } 329 }
326 330
327 bt_index_inc(&wake_index); 331 wake_index = bt_index_inc(wake_index);
328 } 332 }
329 333
330 return NULL; 334 return NULL;
@@ -334,6 +338,7 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
334{ 338{
335 const int index = TAG_TO_INDEX(bt, tag); 339 const int index = TAG_TO_INDEX(bt, tag);
336 struct bt_wait_state *bs; 340 struct bt_wait_state *bs;
341 int wait_cnt;
337 342
338 /* 343 /*
339 * The unlock memory barrier need to order access to req in free 344 * The unlock memory barrier need to order access to req in free
@@ -342,10 +347,19 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
342 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); 347 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
343 348
344 bs = bt_wake_ptr(bt); 349 bs = bt_wake_ptr(bt);
345 if (bs && atomic_dec_and_test(&bs->wait_cnt)) { 350 if (!bs)
346 atomic_set(&bs->wait_cnt, bt->wake_cnt); 351 return;
347 bt_index_inc(&bt->wake_index); 352
353 wait_cnt = atomic_dec_return(&bs->wait_cnt);
354 if (wait_cnt == 0) {
355wake:
356 atomic_add(bt->wake_cnt, &bs->wait_cnt);
357 bt_index_atomic_inc(&bt->wake_index);
348 wake_up(&bs->wait); 358 wake_up(&bs->wait);
359 } else if (wait_cnt < 0) {
360 wait_cnt = atomic_inc_return(&bs->wait_cnt);
361 if (!wait_cnt)
362 goto wake;
349 } 363 }
350} 364}
351 365
@@ -499,10 +513,13 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
499 return -ENOMEM; 513 return -ENOMEM;
500 } 514 }
501 515
502 for (i = 0; i < BT_WAIT_QUEUES; i++) 516 bt_update_count(bt, depth);
517
518 for (i = 0; i < BT_WAIT_QUEUES; i++) {
503 init_waitqueue_head(&bt->bs[i].wait); 519 init_waitqueue_head(&bt->bs[i].wait);
520 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
521 }
504 522
505 bt_update_count(bt, depth);
506 return 0; 523 return 0;
507} 524}
508 525
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 98696a65d4d4..6206ed17ef76 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -24,7 +24,7 @@ struct blk_mq_bitmap_tags {
24 unsigned int map_nr; 24 unsigned int map_nr;
25 struct blk_align_bitmap *map; 25 struct blk_align_bitmap *map;
26 26
27 unsigned int wake_index; 27 atomic_t wake_index;
28 struct bt_wait_state *bs; 28 struct bt_wait_state *bs;
29}; 29};
30 30
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e11f5f8e0313..ad69ef657e85 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -109,7 +109,7 @@ static void blk_mq_queue_exit(struct request_queue *q)
109 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); 109 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
110} 110}
111 111
112static void __blk_mq_drain_queue(struct request_queue *q) 112void blk_mq_drain_queue(struct request_queue *q)
113{ 113{
114 while (true) { 114 while (true) {
115 s64 count; 115 s64 count;
@@ -120,7 +120,7 @@ static void __blk_mq_drain_queue(struct request_queue *q)
120 120
121 if (count == 0) 121 if (count == 0)
122 break; 122 break;
123 blk_mq_run_queues(q, false); 123 blk_mq_start_hw_queues(q);
124 msleep(10); 124 msleep(10);
125 } 125 }
126} 126}
@@ -139,12 +139,7 @@ static void blk_mq_freeze_queue(struct request_queue *q)
139 spin_unlock_irq(q->queue_lock); 139 spin_unlock_irq(q->queue_lock);
140 140
141 if (drain) 141 if (drain)
142 __blk_mq_drain_queue(q); 142 blk_mq_drain_queue(q);
143}
144
145void blk_mq_drain_queue(struct request_queue *q)
146{
147 __blk_mq_drain_queue(q);
148} 143}
149 144
150static void blk_mq_unfreeze_queue(struct request_queue *q) 145static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -883,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
883 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
884 879
885 preempt_disable(); 880 preempt_disable();
886 __blk_mq_run_hw_queue(hctx); 881 blk_mq_run_hw_queue(hctx, false);
887 preempt_enable(); 882 preempt_enable();
888} 883}
889EXPORT_SYMBOL(blk_mq_start_hw_queue); 884EXPORT_SYMBOL(blk_mq_start_hw_queue);
diff --git a/block/blk.h b/block/blk.h
index 45385e9abf6f..6748c4f8d7a1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -84,7 +84,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
84#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) 84#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
85 85
86void blk_insert_flush(struct request *rq); 86void blk_insert_flush(struct request *rq);
87void blk_abort_flushes(struct request_queue *q);
88 87
89static inline struct request *__elv_next_request(struct request_queue *q) 88static inline struct request *__elv_next_request(struct request_queue *q)
90{ 89{
diff --git a/block/elevator.c b/block/elevator.c
index f35edddfe9b5..24c28b659bb3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -729,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
729 return ELV_MQUEUE_MAY; 729 return ELV_MQUEUE_MAY;
730} 730}
731 731
732void elv_abort_queue(struct request_queue *q)
733{
734 struct request *rq;
735
736 blk_abort_flushes(q);
737
738 while (!list_empty(&q->queue_head)) {
739 rq = list_entry_rq(q->queue_head.next);
740 rq->cmd_flags |= REQ_QUIET;
741 trace_block_rq_abort(q, rq);
742 /*
743 * Mark this request as started so we don't trigger
744 * any debug logic in the end I/O path.
745 */
746 blk_start_request(rq);
747 __blk_end_request_all(rq, -EIO);
748 }
749}
750EXPORT_SYMBOL(elv_abort_queue);
751
752void elv_completed_request(struct request_queue *q, struct request *rq) 732void elv_completed_request(struct request_queue *q, struct request *rq)
753{ 733{
754 struct elevator_queue *e = q->elevator; 734 struct elevator_queue *e = q->elevator;
@@ -845,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
845} 825}
846EXPORT_SYMBOL(elv_unregister_queue); 826EXPORT_SYMBOL(elv_unregister_queue);
847 827
848int __init elv_register(struct elevator_type *e) 828int elv_register(struct elevator_type *e)
849{ 829{
850 char *def = ""; 830 char *def = "";
851 831