diff options
Diffstat (limited to 'drivers')
42 files changed, 101 insertions, 591 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9279272b3732..35658f445fca 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q) | |||
3170 | int sg_index = 0; | 3170 | int sg_index = 0; |
3171 | int chained = 0; | 3171 | int chained = 0; |
3172 | 3172 | ||
3173 | /* We call start_io here in case there is a command waiting on the | ||
3174 | * queue that has not been sent. | ||
3175 | */ | ||
3176 | if (blk_queue_plugged(q)) | ||
3177 | goto startio; | ||
3178 | |||
3179 | queue: | 3173 | queue: |
3180 | creq = blk_peek_request(q); | 3174 | creq = blk_peek_request(q); |
3181 | if (!creq) | 3175 | if (!creq) |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 946dad4caef3..b2fceb53e809 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q) | |||
911 | struct scatterlist tmp_sg[SG_MAX]; | 911 | struct scatterlist tmp_sg[SG_MAX]; |
912 | int i, dir, seg; | 912 | int i, dir, seg; |
913 | 913 | ||
914 | if (blk_queue_plugged(q)) | ||
915 | goto startio; | ||
916 | |||
917 | queue_next: | 914 | queue_next: |
918 | creq = blk_peek_request(q); | 915 | creq = blk_peek_request(q); |
919 | if (!creq) | 916 | if (!creq) |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index ba95cba192be..aca302492ff2 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
80 | 80 | ||
81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) | 81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) |
82 | rw |= REQ_FUA; | 82 | rw |= REQ_FUA; |
83 | rw |= REQ_UNPLUG | REQ_SYNC; | 83 | rw |= REQ_SYNC; |
84 | 84 | ||
85 | bio = bio_alloc(GFP_NOIO, 1); | 85 | bio = bio_alloc(GFP_NOIO, 1); |
86 | bio->bi_bdev = bdev->md_bdev; | 86 | bio->bi_bdev = bdev->md_bdev; |
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | |||
689 | } | 689 | } |
690 | } | 690 | } |
691 | 691 | ||
692 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
693 | |||
694 | /* always (try to) flush bitmap to stable storage */ | 692 | /* always (try to) flush bitmap to stable storage */ |
695 | drbd_md_flush(mdev); | 693 | drbd_md_flush(mdev); |
696 | 694 | ||
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index fd42832f785b..0645ca829a94 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c | |||
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) | |||
840 | for (i = 0; i < num_pages; i++) | 840 | for (i = 0; i < num_pages; i++) |
841 | bm_page_io_async(mdev, b, i, rw); | 841 | bm_page_io_async(mdev, b, i, rw); |
842 | 842 | ||
843 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
844 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); | 843 | wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); |
845 | 844 | ||
846 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { | 845 | if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 3803a0348937..b0bd27dfc1e8 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -377,7 +377,7 @@ union p_header { | |||
377 | #define DP_HARDBARRIER 1 /* depricated */ | 377 | #define DP_HARDBARRIER 1 /* depricated */ |
378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ | 378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ |
379 | #define DP_MAY_SET_IN_SYNC 4 | 379 | #define DP_MAY_SET_IN_SYNC 4 |
380 | #define DP_UNPLUG 8 /* equals REQ_UNPLUG */ | 380 | #define DP_UNPLUG 8 /* not used anymore */ |
381 | #define DP_FUA 16 /* equals REQ_FUA */ | 381 | #define DP_FUA 16 /* equals REQ_FUA */ |
382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ | 382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ |
383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ | 383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ |
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev) | |||
2382 | return QUEUE_ORDERED_NONE; | 2382 | return QUEUE_ORDERED_NONE; |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | static inline void drbd_blk_run_queue(struct request_queue *q) | ||
2386 | { | ||
2387 | if (q && q->unplug_fn) | ||
2388 | q->unplug_fn(q); | ||
2389 | } | ||
2390 | |||
2391 | static inline void drbd_kick_lo(struct drbd_conf *mdev) | ||
2392 | { | ||
2393 | if (get_ldev(mdev)) { | ||
2394 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev)); | ||
2395 | put_ldev(mdev); | ||
2396 | } | ||
2397 | } | ||
2398 | |||
2399 | static inline void drbd_md_flush(struct drbd_conf *mdev) | 2385 | static inline void drbd_md_flush(struct drbd_conf *mdev) |
2400 | { | 2386 | { |
2401 | int r; | 2387 | int r; |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 29cd0dc9fe4f..8a43ce0edeed 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) | |||
2477 | { | 2477 | { |
2478 | if (mdev->agreed_pro_version >= 95) | 2478 | if (mdev->agreed_pro_version >= 95) |
2479 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | | 2479 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
2480 | (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) | | ||
2481 | (bi_rw & REQ_FUA ? DP_FUA : 0) | | 2480 | (bi_rw & REQ_FUA ? DP_FUA : 0) | |
2482 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | | 2481 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | |
2483 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); | 2482 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); |
2484 | else | 2483 | else |
2485 | return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; | 2484 | return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; |
2486 | } | 2485 | } |
2487 | 2486 | ||
2488 | /* Used to send write requests | 2487 | /* Used to send write requests |
@@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode) | |||
2719 | return 0; | 2718 | return 0; |
2720 | } | 2719 | } |
2721 | 2720 | ||
2722 | static void drbd_unplug_fn(struct request_queue *q) | ||
2723 | { | ||
2724 | struct drbd_conf *mdev = q->queuedata; | ||
2725 | |||
2726 | /* unplug FIRST */ | ||
2727 | spin_lock_irq(q->queue_lock); | ||
2728 | blk_remove_plug(q); | ||
2729 | spin_unlock_irq(q->queue_lock); | ||
2730 | |||
2731 | /* only if connected */ | ||
2732 | spin_lock_irq(&mdev->req_lock); | ||
2733 | if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) { | ||
2734 | D_ASSERT(mdev->state.role == R_PRIMARY); | ||
2735 | if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) { | ||
2736 | /* add to the data.work queue, | ||
2737 | * unless already queued. | ||
2738 | * XXX this might be a good addition to drbd_queue_work | ||
2739 | * anyways, to detect "double queuing" ... */ | ||
2740 | if (list_empty(&mdev->unplug_work.list)) | ||
2741 | drbd_queue_work(&mdev->data.work, | ||
2742 | &mdev->unplug_work); | ||
2743 | } | ||
2744 | } | ||
2745 | spin_unlock_irq(&mdev->req_lock); | ||
2746 | |||
2747 | if (mdev->state.disk >= D_INCONSISTENT) | ||
2748 | drbd_kick_lo(mdev); | ||
2749 | } | ||
2750 | |||
2751 | static void drbd_set_defaults(struct drbd_conf *mdev) | 2721 | static void drbd_set_defaults(struct drbd_conf *mdev) |
2752 | { | 2722 | { |
2753 | /* This way we get a compile error when sync_conf grows, | 2723 | /* This way we get a compile error when sync_conf grows, |
@@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
3222 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | 3192 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); |
3223 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | 3193 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
3224 | blk_queue_merge_bvec(q, drbd_merge_bvec); | 3194 | blk_queue_merge_bvec(q, drbd_merge_bvec); |
3225 | q->queue_lock = &mdev->req_lock; /* needed since we use */ | 3195 | q->queue_lock = &mdev->req_lock; |
3226 | /* plugging on a queue, that actually has no requests! */ | ||
3227 | q->unplug_fn = drbd_unplug_fn; | ||
3228 | 3196 | ||
3229 | mdev->md_io_page = alloc_page(GFP_KERNEL); | 3197 | mdev->md_io_page = alloc_page(GFP_KERNEL); |
3230 | if (!mdev->md_io_page) | 3198 | if (!mdev->md_io_page) |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 24487d4fb202..8e68be939deb 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int | |||
187 | return NULL; | 187 | return NULL; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* kick lower level device, if we have more than (arbitrary number) | ||
191 | * reference counts on it, which typically are locally submitted io | ||
192 | * requests. don't use unacked_cnt, so we speed up proto A and B, too. */ | ||
193 | static void maybe_kick_lo(struct drbd_conf *mdev) | ||
194 | { | ||
195 | if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark) | ||
196 | drbd_kick_lo(mdev); | ||
197 | } | ||
198 | |||
199 | static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) | 190 | static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) |
200 | { | 191 | { |
201 | struct drbd_epoch_entry *e; | 192 | struct drbd_epoch_entry *e; |
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) | |||
219 | LIST_HEAD(reclaimed); | 210 | LIST_HEAD(reclaimed); |
220 | struct drbd_epoch_entry *e, *t; | 211 | struct drbd_epoch_entry *e, *t; |
221 | 212 | ||
222 | maybe_kick_lo(mdev); | ||
223 | spin_lock_irq(&mdev->req_lock); | 213 | spin_lock_irq(&mdev->req_lock); |
224 | reclaim_net_ee(mdev, &reclaimed); | 214 | reclaim_net_ee(mdev, &reclaimed); |
225 | spin_unlock_irq(&mdev->req_lock); | 215 | spin_unlock_irq(&mdev->req_lock); |
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) | |||
436 | while (!list_empty(head)) { | 426 | while (!list_empty(head)) { |
437 | prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); | 427 | prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); |
438 | spin_unlock_irq(&mdev->req_lock); | 428 | spin_unlock_irq(&mdev->req_lock); |
439 | drbd_kick_lo(mdev); | 429 | io_schedule(); |
440 | schedule(); | ||
441 | finish_wait(&mdev->ee_wait, &wait); | 430 | finish_wait(&mdev->ee_wait, &wait); |
442 | spin_lock_irq(&mdev->req_lock); | 431 | spin_lock_irq(&mdev->req_lock); |
443 | } | 432 | } |
@@ -1111,8 +1100,6 @@ next_bio: | |||
1111 | /* > e->sector, unless this is the first bio */ | 1100 | /* > e->sector, unless this is the first bio */ |
1112 | bio->bi_sector = sector; | 1101 | bio->bi_sector = sector; |
1113 | bio->bi_bdev = mdev->ldev->backing_bdev; | 1102 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1114 | /* we special case some flags in the multi-bio case, see below | ||
1115 | * (REQ_UNPLUG) */ | ||
1116 | bio->bi_rw = rw; | 1103 | bio->bi_rw = rw; |
1117 | bio->bi_private = e; | 1104 | bio->bi_private = e; |
1118 | bio->bi_end_io = drbd_endio_sec; | 1105 | bio->bi_end_io = drbd_endio_sec; |
@@ -1141,13 +1128,8 @@ next_bio: | |||
1141 | bios = bios->bi_next; | 1128 | bios = bios->bi_next; |
1142 | bio->bi_next = NULL; | 1129 | bio->bi_next = NULL; |
1143 | 1130 | ||
1144 | /* strip off REQ_UNPLUG unless it is the last bio */ | ||
1145 | if (bios) | ||
1146 | bio->bi_rw &= ~REQ_UNPLUG; | ||
1147 | |||
1148 | drbd_generic_make_request(mdev, fault_type, bio); | 1131 | drbd_generic_make_request(mdev, fault_type, bio); |
1149 | } while (bios); | 1132 | } while (bios); |
1150 | maybe_kick_lo(mdev); | ||
1151 | return 0; | 1133 | return 0; |
1152 | 1134 | ||
1153 | fail: | 1135 | fail: |
@@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1167 | 1149 | ||
1168 | inc_unacked(mdev); | 1150 | inc_unacked(mdev); |
1169 | 1151 | ||
1170 | if (mdev->net_conf->wire_protocol != DRBD_PROT_C) | ||
1171 | drbd_kick_lo(mdev); | ||
1172 | |||
1173 | mdev->current_epoch->barrier_nr = p->barrier; | 1152 | mdev->current_epoch->barrier_nr = p->barrier; |
1174 | rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); | 1153 | rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); |
1175 | 1154 | ||
@@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | |||
1636 | { | 1615 | { |
1637 | if (mdev->agreed_pro_version >= 95) | 1616 | if (mdev->agreed_pro_version >= 95) |
1638 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1639 | (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) | | ||
1640 | (dpf & DP_FUA ? REQ_FUA : 0) | | 1618 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1641 | (dpf & DP_FLUSH ? REQ_FUA : 0) | | 1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | |
1642 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | 1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1643 | else | 1621 | else |
1644 | return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; | 1622 | return dpf & DP_RW_SYNC ? REQ_SYNC : 0; |
1645 | } | 1623 | } |
1646 | 1624 | ||
1647 | /* mirrored write */ | 1625 | /* mirrored write */ |
@@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3556 | 3534 | ||
3557 | static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3535 | static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
3558 | { | 3536 | { |
3559 | if (mdev->state.disk >= D_INCONSISTENT) | ||
3560 | drbd_kick_lo(mdev); | ||
3561 | |||
3562 | /* Make sure we've acked all the TCP data associated | 3537 | /* Make sure we've acked all the TCP data associated |
3563 | * with the data requests being unplugged */ | 3538 | * with the data requests being unplugged */ |
3564 | drbd_tcp_quickack(mdev->data.socket); | 3539 | drbd_tcp_quickack(mdev->data.socket); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 11a75d32a2e2..ad3fc6228f27 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -960,10 +960,6 @@ allocate_barrier: | |||
960 | bio_endio(req->private_bio, -EIO); | 960 | bio_endio(req->private_bio, -EIO); |
961 | } | 961 | } |
962 | 962 | ||
963 | /* we need to plug ALWAYS since we possibly need to kick lo_dev. | ||
964 | * we plug after submit, so we won't miss an unplug event */ | ||
965 | drbd_plug_device(mdev); | ||
966 | |||
967 | return 0; | 963 | return 0; |
968 | 964 | ||
969 | fail_conflicting: | 965 | fail_conflicting: |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 34f224b018b3..e027446590d3 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev) | |||
792 | * queue (or even the read operations for those packets | 792 | * queue (or even the read operations for those packets |
793 | * is not finished by now). Retry in 100ms. */ | 793 | * is not finished by now). Retry in 100ms. */ |
794 | 794 | ||
795 | drbd_kick_lo(mdev); | ||
796 | __set_current_state(TASK_INTERRUPTIBLE); | 795 | __set_current_state(TASK_INTERRUPTIBLE); |
797 | schedule_timeout(HZ / 10); | 796 | schedule_timeout(HZ / 10); |
798 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); | 797 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h index defdb5013ea3..53586fa5ae1b 100644 --- a/drivers/block/drbd/drbd_wrappers.h +++ b/drivers/block/drbd/drbd_wrappers.h | |||
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev, | |||
45 | generic_make_request(bio); | 45 | generic_make_request(bio); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void drbd_plug_device(struct drbd_conf *mdev) | ||
49 | { | ||
50 | struct request_queue *q; | ||
51 | q = bdev_get_queue(mdev->this_bdev); | ||
52 | |||
53 | spin_lock_irq(q->queue_lock); | ||
54 | |||
55 | /* XXX the check on !blk_queue_plugged is redundant, | ||
56 | * implicitly checked in blk_plug_device */ | ||
57 | |||
58 | if (!blk_queue_plugged(q)) { | ||
59 | blk_plug_device(q); | ||
60 | del_timer(&q->unplug_timer); | ||
61 | /* unplugging should not happen automatically... */ | ||
62 | } | ||
63 | spin_unlock_irq(q->queue_lock); | ||
64 | } | ||
65 | |||
66 | static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) | 48 | static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) |
67 | { | 49 | { |
68 | return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) | 50 | return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3851dbc03e05..301d7a9a41a6 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3838,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev) | |||
3838 | bio.bi_end_io = floppy_rb0_complete; | 3838 | bio.bi_end_io = floppy_rb0_complete; |
3839 | 3839 | ||
3840 | submit_bio(READ, &bio); | 3840 | submit_bio(READ, &bio); |
3841 | generic_unplug_device(bdev_get_queue(bdev)); | ||
3842 | process_fd_request(); | 3841 | process_fd_request(); |
3843 | wait_for_completion(&complete); | 3842 | wait_for_completion(&complete); |
3844 | 3843 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 79c3079a6203..a076a14ca72d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -540,17 +540,6 @@ out: | |||
540 | return 0; | 540 | return 0; |
541 | } | 541 | } |
542 | 542 | ||
543 | /* | ||
544 | * kick off io on the underlying address space | ||
545 | */ | ||
546 | static void loop_unplug(struct request_queue *q) | ||
547 | { | ||
548 | struct loop_device *lo = q->queuedata; | ||
549 | |||
550 | queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); | ||
551 | blk_run_address_space(lo->lo_backing_file->f_mapping); | ||
552 | } | ||
553 | |||
554 | struct switch_request { | 543 | struct switch_request { |
555 | struct file *file; | 544 | struct file *file; |
556 | struct completion wait; | 545 | struct completion wait; |
@@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
917 | */ | 906 | */ |
918 | blk_queue_make_request(lo->lo_queue, loop_make_request); | 907 | blk_queue_make_request(lo->lo_queue, loop_make_request); |
919 | lo->lo_queue->queuedata = lo; | 908 | lo->lo_queue->queuedata = lo; |
920 | lo->lo_queue->unplug_fn = loop_unplug; | ||
921 | 909 | ||
922 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) | 910 | if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) |
923 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); | 911 | blk_queue_flush(lo->lo_queue, REQ_FLUSH); |
@@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) | |||
1019 | 1007 | ||
1020 | kthread_stop(lo->lo_thread); | 1008 | kthread_stop(lo->lo_thread); |
1021 | 1009 | ||
1022 | lo->lo_queue->unplug_fn = NULL; | ||
1023 | lo->lo_backing_file = NULL; | 1010 | lo->lo_backing_file = NULL; |
1024 | 1011 | ||
1025 | loop_release_xfer(lo); | 1012 | loop_release_xfer(lo); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index a077db27b7c9..07a382eaf0a8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar) | |||
1606 | min_sleep_time = pkt->sleep_time; | 1606 | min_sleep_time = pkt->sleep_time; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | generic_unplug_device(bdev_get_queue(pd->bdev)); | ||
1610 | |||
1611 | VPRINTK("kcdrwd: sleeping\n"); | 1609 | VPRINTK("kcdrwd: sleeping\n"); |
1612 | residue = schedule_timeout(min_sleep_time); | 1610 | residue = schedule_timeout(min_sleep_time); |
1613 | VPRINTK("kcdrwd: wake up\n"); | 1611 | VPRINTK("kcdrwd: wake up\n"); |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 9b102abe209c..031ca720d926 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) | |||
241 | * | 241 | * |
242 | * Whenever IO on the active page completes, the Ready page is activated | 242 | * Whenever IO on the active page completes, the Ready page is activated |
243 | * and the ex-Active page is clean out and made Ready. | 243 | * and the ex-Active page is clean out and made Ready. |
244 | * Otherwise the Ready page is only activated when it becomes full, or | 244 | * Otherwise the Ready page is only activated when it becomes full. |
245 | * when mm_unplug_device is called via the unplug_io_fn. | ||
246 | * | 245 | * |
247 | * If a request arrives while both pages a full, it is queued, and b_rdev is | 246 | * If a request arrives while both pages a full, it is queued, and b_rdev is |
248 | * overloaded to record whether it was a read or a write. | 247 | * overloaded to record whether it was a read or a write. |
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page) | |||
333 | page->biotail = &page->bio; | 332 | page->biotail = &page->bio; |
334 | } | 333 | } |
335 | 334 | ||
336 | static void mm_unplug_device(struct request_queue *q) | ||
337 | { | ||
338 | struct cardinfo *card = q->queuedata; | ||
339 | unsigned long flags; | ||
340 | |||
341 | spin_lock_irqsave(&card->lock, flags); | ||
342 | if (blk_remove_plug(q)) | ||
343 | activate(card); | ||
344 | spin_unlock_irqrestore(&card->lock, flags); | ||
345 | } | ||
346 | |||
347 | /* | 335 | /* |
348 | * If there is room on Ready page, take | 336 | * If there is room on Ready page, take |
349 | * one bh off list and add it. | 337 | * one bh off list and add it. |
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) | |||
535 | *card->biotail = bio; | 523 | *card->biotail = bio; |
536 | bio->bi_next = NULL; | 524 | bio->bi_next = NULL; |
537 | card->biotail = &bio->bi_next; | 525 | card->biotail = &bio->bi_next; |
538 | blk_plug_device(q); | ||
539 | spin_unlock_irq(&card->lock); | 526 | spin_unlock_irq(&card->lock); |
540 | 527 | ||
541 | return 0; | 528 | return 0; |
@@ -897,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, | |||
897 | blk_queue_make_request(card->queue, mm_make_request); | 884 | blk_queue_make_request(card->queue, mm_make_request); |
898 | card->queue->queue_lock = &card->lock; | 885 | card->queue->queue_lock = &card->lock; |
899 | card->queue->queuedata = card; | 886 | card->queue->queuedata = card; |
900 | card->queue->unplug_fn = mm_unplug_device; | ||
901 | 887 | ||
902 | tasklet_init(&card->tasklet, process_page, (unsigned long)card); | 888 | tasklet_init(&card->tasklet, process_page, (unsigned long)card); |
903 | 889 | ||
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index e88a2cf17711..6f218e014e99 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special) | |||
233 | 233 | ||
234 | drive->hwif->rq = NULL; | 234 | drive->hwif->rq = NULL; |
235 | 235 | ||
236 | elv_add_request(drive->queue, &drive->sense_rq, | 236 | elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); |
237 | ELEVATOR_INSERT_FRONT, 0); | ||
238 | return 0; | 237 | return 0; |
239 | } | 238 | } |
240 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); | 239 | EXPORT_SYMBOL_GPL(ide_queue_sense_rq); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index a2e29099ee0c..fd1e11799137 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -258,17 +258,10 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) | |||
258 | if (time_after(jiffies, info->write_timeout)) | 258 | if (time_after(jiffies, info->write_timeout)) |
259 | return 0; | 259 | return 0; |
260 | else { | 260 | else { |
261 | struct request_queue *q = drive->queue; | ||
262 | unsigned long flags; | ||
263 | |||
264 | /* | 261 | /* |
265 | * take a breather relying on the unplug timer to kick us again | 262 | * take a breather |
266 | */ | 263 | */ |
267 | 264 | blk_delay_queue(drive->queue, 1); | |
268 | spin_lock_irqsave(q->queue_lock, flags); | ||
269 | blk_plug_device(q); | ||
270 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
271 | |||
272 | return 1; | 265 | return 1; |
273 | } | 266 | } |
274 | } | 267 | } |
@@ -1514,8 +1507,6 @@ static int ide_cdrom_setup(ide_drive_t *drive) | |||
1514 | blk_queue_dma_alignment(q, 31); | 1507 | blk_queue_dma_alignment(q, 31); |
1515 | blk_queue_update_dma_pad(q, 15); | 1508 | blk_queue_update_dma_pad(q, 15); |
1516 | 1509 | ||
1517 | q->unplug_delay = max((1 * HZ) / 1000, 1); | ||
1518 | |||
1519 | drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; | 1510 | drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; |
1520 | drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); | 1511 | drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); |
1521 | 1512 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 999dac054bcc..f4077840d3ab 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -549,8 +549,6 @@ plug_device_2: | |||
549 | 549 | ||
550 | if (rq) | 550 | if (rq) |
551 | blk_requeue_request(q, rq); | 551 | blk_requeue_request(q, rq); |
552 | if (!elv_queue_empty(q)) | ||
553 | blk_plug_device(q); | ||
554 | } | 552 | } |
555 | 553 | ||
556 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | 554 | void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) |
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) | |||
562 | 560 | ||
563 | if (rq) | 561 | if (rq) |
564 | blk_requeue_request(q, rq); | 562 | blk_requeue_request(q, rq); |
565 | if (!elv_queue_empty(q)) | ||
566 | blk_plug_device(q); | ||
567 | 563 | ||
568 | spin_unlock_irqrestore(q->queue_lock, flags); | 564 | spin_unlock_irqrestore(q->queue_lock, flags); |
569 | } | 565 | } |
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 88a380c5a470..6ab9ab2a5081 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
52 | rq->cmd[0] = REQ_UNPARK_HEADS; | 52 | rq->cmd[0] = REQ_UNPARK_HEADS; |
53 | rq->cmd_len = 1; | 53 | rq->cmd_len = 1; |
54 | rq->cmd_type = REQ_TYPE_SPECIAL; | 54 | rq->cmd_type = REQ_TYPE_SPECIAL; |
55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); | 55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); |
56 | 56 | ||
57 | out: | 57 | out: |
58 | return; | 58 | return; |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 9a35320fb59f..ca203cb23f3c 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) | |||
347 | atomic_inc(&bitmap->pending_writes); | 347 | atomic_inc(&bitmap->pending_writes); |
348 | set_buffer_locked(bh); | 348 | set_buffer_locked(bh); |
349 | set_buffer_mapped(bh); | 349 | set_buffer_mapped(bh); |
350 | submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); | 350 | submit_bh(WRITE | REQ_SYNC, bh); |
351 | bh = bh->b_this_page; | 351 | bh = bh->b_this_page; |
352 | } | 352 | } |
353 | 353 | ||
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1339 | prepare_to_wait(&bitmap->overflow_wait, &__wait, | 1339 | prepare_to_wait(&bitmap->overflow_wait, &__wait, |
1340 | TASK_UNINTERRUPTIBLE); | 1340 | TASK_UNINTERRUPTIBLE); |
1341 | spin_unlock_irq(&bitmap->lock); | 1341 | spin_unlock_irq(&bitmap->lock); |
1342 | md_unplug(bitmap->mddev); | 1342 | io_schedule(); |
1343 | schedule(); | ||
1344 | finish_wait(&bitmap->overflow_wait, &__wait); | 1343 | finish_wait(&bitmap->overflow_wait, &__wait); |
1345 | continue; | 1344 | continue; |
1346 | } | 1345 | } |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e054bd91664..2c62c1169f78 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) | |||
991 | clone->bi_destructor = dm_crypt_bio_destructor; | 991 | clone->bi_destructor = dm_crypt_bio_destructor; |
992 | } | 992 | } |
993 | 993 | ||
994 | static void kcryptd_unplug(struct crypt_config *cc) | ||
995 | { | ||
996 | blk_unplug(bdev_get_queue(cc->dev->bdev)); | ||
997 | } | ||
998 | |||
999 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | 994 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
1000 | { | 995 | { |
1001 | struct crypt_config *cc = io->target->private; | 996 | struct crypt_config *cc = io->target->private; |
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |||
1008 | * one in order to decrypt the whole bio data *afterwards*. | 1003 | * one in order to decrypt the whole bio data *afterwards*. |
1009 | */ | 1004 | */ |
1010 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); | 1005 | clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); |
1011 | if (!clone) { | 1006 | if (!clone) |
1012 | kcryptd_unplug(cc); | ||
1013 | return 1; | 1007 | return 1; |
1014 | } | ||
1015 | 1008 | ||
1016 | crypt_inc_pending(io); | 1009 | crypt_inc_pending(io); |
1017 | 1010 | ||
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 136d4f71a116..76a5af00a26b 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
352 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); | 352 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
353 | 353 | ||
354 | if (sync) | 354 | if (sync) |
355 | rw |= REQ_SYNC | REQ_UNPLUG; | 355 | rw |= REQ_SYNC; |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * For multiple regions we need to be careful to rewind | 358 | * For multiple regions we need to be careful to rewind |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 924f5f0084c2..1bb73a13ca40 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -37,13 +37,6 @@ struct dm_kcopyd_client { | |||
37 | unsigned int nr_pages; | 37 | unsigned int nr_pages; |
38 | unsigned int nr_free_pages; | 38 | unsigned int nr_free_pages; |
39 | 39 | ||
40 | /* | ||
41 | * Block devices to unplug. | ||
42 | * Non-NULL pointer means that a block device has some pending requests | ||
43 | * and needs to be unplugged. | ||
44 | */ | ||
45 | struct block_device *unplug[2]; | ||
46 | |||
47 | struct dm_io_client *io_client; | 40 | struct dm_io_client *io_client; |
48 | 41 | ||
49 | wait_queue_head_t destroyq; | 42 | wait_queue_head_t destroyq; |
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job) | |||
315 | return 0; | 308 | return 0; |
316 | } | 309 | } |
317 | 310 | ||
318 | /* | ||
319 | * Unplug the block device at the specified index. | ||
320 | */ | ||
321 | static void unplug(struct dm_kcopyd_client *kc, int rw) | ||
322 | { | ||
323 | if (kc->unplug[rw] != NULL) { | ||
324 | blk_unplug(bdev_get_queue(kc->unplug[rw])); | ||
325 | kc->unplug[rw] = NULL; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Prepare block device unplug. If there's another device | ||
331 | * to be unplugged at the same array index, we unplug that | ||
332 | * device first. | ||
333 | */ | ||
334 | static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, | ||
335 | struct block_device *bdev) | ||
336 | { | ||
337 | if (likely(kc->unplug[rw] == bdev)) | ||
338 | return; | ||
339 | unplug(kc, rw); | ||
340 | kc->unplug[rw] = bdev; | ||
341 | } | ||
342 | |||
343 | static void complete_io(unsigned long error, void *context) | 311 | static void complete_io(unsigned long error, void *context) |
344 | { | 312 | { |
345 | struct kcopyd_job *job = (struct kcopyd_job *) context; | 313 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
@@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job) | |||
386 | .client = job->kc->io_client, | 354 | .client = job->kc->io_client, |
387 | }; | 355 | }; |
388 | 356 | ||
389 | if (job->rw == READ) { | 357 | if (job->rw == READ) |
390 | r = dm_io(&io_req, 1, &job->source, NULL); | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
391 | prepare_unplug(job->kc, READ, job->source.bdev); | 359 | else |
392 | } else { | ||
393 | if (job->num_dests > 1) | ||
394 | io_req.bi_rw |= REQ_UNPLUG; | ||
395 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); | 360 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
396 | if (!(io_req.bi_rw & REQ_UNPLUG)) | ||
397 | prepare_unplug(job->kc, WRITE, job->dests[0].bdev); | ||
398 | } | ||
399 | 361 | ||
400 | return r; | 362 | return r; |
401 | } | 363 | } |
@@ -466,6 +428,7 @@ static void do_work(struct work_struct *work) | |||
466 | { | 428 | { |
467 | struct dm_kcopyd_client *kc = container_of(work, | 429 | struct dm_kcopyd_client *kc = container_of(work, |
468 | struct dm_kcopyd_client, kcopyd_work); | 430 | struct dm_kcopyd_client, kcopyd_work); |
431 | struct blk_plug plug; | ||
469 | 432 | ||
470 | /* | 433 | /* |
471 | * The order that these are called is *very* important. | 434 | * The order that these are called is *very* important. |
@@ -473,18 +436,12 @@ static void do_work(struct work_struct *work) | |||
473 | * Pages jobs when successful will jump onto the io jobs | 436 | * Pages jobs when successful will jump onto the io jobs |
474 | * list. io jobs call wake when they complete and it all | 437 | * list. io jobs call wake when they complete and it all |
475 | * starts again. | 438 | * starts again. |
476 | * | ||
477 | * Note that io_jobs add block devices to the unplug array, | ||
478 | * this array is cleared with "unplug" calls. It is thus | ||
479 | * forbidden to run complete_jobs after io_jobs and before | ||
480 | * unplug because the block device could be destroyed in | ||
481 | * job completion callback. | ||
482 | */ | 439 | */ |
440 | blk_start_plug(&plug); | ||
483 | process_jobs(&kc->complete_jobs, kc, run_complete_job); | 441 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
484 | process_jobs(&kc->pages_jobs, kc, run_pages_job); | 442 | process_jobs(&kc->pages_jobs, kc, run_pages_job); |
485 | process_jobs(&kc->io_jobs, kc, run_io_job); | 443 | process_jobs(&kc->io_jobs, kc, run_io_job); |
486 | unplug(kc, READ); | 444 | blk_finish_plug(&plug); |
487 | unplug(kc, WRITE); | ||
488 | } | 445 | } |
489 | 446 | ||
490 | /* | 447 | /* |
@@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages, | |||
665 | INIT_LIST_HEAD(&kc->io_jobs); | 622 | INIT_LIST_HEAD(&kc->io_jobs); |
666 | INIT_LIST_HEAD(&kc->pages_jobs); | 623 | INIT_LIST_HEAD(&kc->pages_jobs); |
667 | 624 | ||
668 | memset(kc->unplug, 0, sizeof(kc->unplug)); | ||
669 | |||
670 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); | 625 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
671 | if (!kc->job_pool) | 626 | if (!kc->job_pool) |
672 | goto bad_slab; | 627 | goto bad_slab; |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b9e1e15ef11c..5ef136cdba91 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb) | |||
394 | { | 394 | { |
395 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | 395 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); |
396 | 396 | ||
397 | md_raid5_unplug_device(rs->md.private); | 397 | md_raid5_kick_device(rs->md.private); |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index dee326775c60..976ad4688afc 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work) | |||
842 | do_reads(ms, &reads); | 842 | do_reads(ms, &reads); |
843 | do_writes(ms, &writes); | 843 | do_writes(ms, &writes); |
844 | do_failures(ms, &failures); | 844 | do_failures(ms, &failures); |
845 | |||
846 | dm_table_unplug_all(ms->ti->table); | ||
847 | } | 845 | } |
848 | 846 | ||
849 | /*----------------------------------------------------------------- | 847 | /*----------------------------------------------------------------- |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 38e4eb1bb965..f50a7b952257 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t) | |||
1275 | return 0; | 1275 | return 0; |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | void dm_table_unplug_all(struct dm_table *t) | ||
1279 | { | ||
1280 | struct dm_dev_internal *dd; | ||
1281 | struct list_head *devices = dm_table_get_devices(t); | ||
1282 | struct dm_target_callbacks *cb; | ||
1283 | |||
1284 | list_for_each_entry(dd, devices, list) { | ||
1285 | struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); | ||
1286 | char b[BDEVNAME_SIZE]; | ||
1287 | |||
1288 | if (likely(q)) | ||
1289 | blk_unplug(q); | ||
1290 | else | ||
1291 | DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", | ||
1292 | dm_device_name(t->md), | ||
1293 | bdevname(dd->dm_dev.bdev, b)); | ||
1294 | } | ||
1295 | |||
1296 | list_for_each_entry(cb, &t->target_callbacks, list) | ||
1297 | if (cb->unplug_fn) | ||
1298 | cb->unplug_fn(cb); | ||
1299 | } | ||
1300 | |||
1301 | struct mapped_device *dm_table_get_md(struct dm_table *t) | 1278 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
1302 | { | 1279 | { |
1303 | return t->md; | 1280 | return t->md; |
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode); | |||
1345 | EXPORT_SYMBOL(dm_table_get_md); | 1322 | EXPORT_SYMBOL(dm_table_get_md); |
1346 | EXPORT_SYMBOL(dm_table_put); | 1323 | EXPORT_SYMBOL(dm_table_put); |
1347 | EXPORT_SYMBOL(dm_table_get); | 1324 | EXPORT_SYMBOL(dm_table_get); |
1348 | EXPORT_SYMBOL(dm_table_unplug_all); | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eaa3af0e0632..d22b9905c168 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone) | |||
807 | dm_unprep_request(rq); | 807 | dm_unprep_request(rq); |
808 | 808 | ||
809 | spin_lock_irqsave(q->queue_lock, flags); | 809 | spin_lock_irqsave(q->queue_lock, flags); |
810 | if (elv_queue_empty(q)) | ||
811 | blk_plug_device(q); | ||
812 | blk_requeue_request(q, rq); | 810 | blk_requeue_request(q, rq); |
813 | spin_unlock_irqrestore(q->queue_lock, flags); | 811 | spin_unlock_irqrestore(q->queue_lock, flags); |
814 | 812 | ||
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q) | |||
1613 | * number of in-flight I/Os after the queue is stopped in | 1611 | * number of in-flight I/Os after the queue is stopped in |
1614 | * dm_suspend(). | 1612 | * dm_suspend(). |
1615 | */ | 1613 | */ |
1616 | while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { | 1614 | while (!blk_queue_stopped(q)) { |
1617 | rq = blk_peek_request(q); | 1615 | rq = blk_peek_request(q); |
1618 | if (!rq) | 1616 | if (!rq) |
1619 | goto plug_and_out; | 1617 | goto delay_and_out; |
1620 | 1618 | ||
1621 | /* always use block 0 to find the target for flushes for now */ | 1619 | /* always use block 0 to find the target for flushes for now */ |
1622 | pos = 0; | 1620 | pos = 0; |
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q) | |||
1627 | BUG_ON(!dm_target_is_valid(ti)); | 1625 | BUG_ON(!dm_target_is_valid(ti)); |
1628 | 1626 | ||
1629 | if (ti->type->busy && ti->type->busy(ti)) | 1627 | if (ti->type->busy && ti->type->busy(ti)) |
1630 | goto plug_and_out; | 1628 | goto delay_and_out; |
1631 | 1629 | ||
1632 | blk_start_request(rq); | 1630 | blk_start_request(rq); |
1633 | clone = rq->special; | 1631 | clone = rq->special; |
@@ -1647,11 +1645,8 @@ requeued: | |||
1647 | BUG_ON(!irqs_disabled()); | 1645 | BUG_ON(!irqs_disabled()); |
1648 | spin_lock(q->queue_lock); | 1646 | spin_lock(q->queue_lock); |
1649 | 1647 | ||
1650 | plug_and_out: | 1648 | delay_and_out: |
1651 | if (!elv_queue_empty(q)) | 1649 | blk_delay_queue(q, HZ / 10); |
1652 | /* Some requests still remain, retry later */ | ||
1653 | blk_plug_device(q); | ||
1654 | |||
1655 | out: | 1650 | out: |
1656 | dm_table_put(map); | 1651 | dm_table_put(map); |
1657 | 1652 | ||
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q) | |||
1680 | return r; | 1675 | return r; |
1681 | } | 1676 | } |
1682 | 1677 | ||
1683 | static void dm_unplug_all(struct request_queue *q) | ||
1684 | { | ||
1685 | struct mapped_device *md = q->queuedata; | ||
1686 | struct dm_table *map = dm_get_live_table(md); | ||
1687 | |||
1688 | if (map) { | ||
1689 | if (dm_request_based(md)) | ||
1690 | generic_unplug_device(q); | ||
1691 | |||
1692 | dm_table_unplug_all(map); | ||
1693 | dm_table_put(map); | ||
1694 | } | ||
1695 | } | ||
1696 | |||
1697 | static int dm_any_congested(void *congested_data, int bdi_bits) | 1678 | static int dm_any_congested(void *congested_data, int bdi_bits) |
1698 | { | 1679 | { |
1699 | int r = bdi_bits; | 1680 | int r = bdi_bits; |
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md) | |||
1817 | md->queue->backing_dev_info.congested_data = md; | 1798 | md->queue->backing_dev_info.congested_data = md; |
1818 | blk_queue_make_request(md->queue, dm_request); | 1799 | blk_queue_make_request(md->queue, dm_request); |
1819 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); | 1800 | blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); |
1820 | md->queue->unplug_fn = dm_unplug_all; | ||
1821 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); | 1801 | blk_queue_merge_bvec(md->queue, dm_merge_bvec); |
1822 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); | 1802 | blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); |
1823 | } | 1803 | } |
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2263 | int r = 0; | 2243 | int r = 0; |
2264 | DECLARE_WAITQUEUE(wait, current); | 2244 | DECLARE_WAITQUEUE(wait, current); |
2265 | 2245 | ||
2266 | dm_unplug_all(md->queue); | ||
2267 | |||
2268 | add_wait_queue(&md->wait, &wait); | 2246 | add_wait_queue(&md->wait, &wait); |
2269 | 2247 | ||
2270 | while (1) { | 2248 | while (1) { |
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md) | |||
2539 | 2517 | ||
2540 | clear_bit(DMF_SUSPENDED, &md->flags); | 2518 | clear_bit(DMF_SUSPENDED, &md->flags); |
2541 | 2519 | ||
2542 | dm_table_unplug_all(map); | ||
2543 | r = 0; | 2520 | r = 0; |
2544 | out: | 2521 | out: |
2545 | dm_table_put(map); | 2522 | dm_table_put(map); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 0ed7f6bc2a7f..338804f8fb3b 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q, | |||
87 | return maxsectors << 9; | 87 | return maxsectors << 9; |
88 | } | 88 | } |
89 | 89 | ||
90 | static void linear_unplug(struct request_queue *q) | ||
91 | { | ||
92 | mddev_t *mddev = q->queuedata; | ||
93 | linear_conf_t *conf; | ||
94 | int i; | ||
95 | |||
96 | rcu_read_lock(); | ||
97 | conf = rcu_dereference(mddev->private); | ||
98 | |||
99 | for (i=0; i < mddev->raid_disks; i++) { | ||
100 | struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); | ||
101 | blk_unplug(r_queue); | ||
102 | } | ||
103 | rcu_read_unlock(); | ||
104 | } | ||
105 | |||
106 | static int linear_congested(void *data, int bits) | 90 | static int linear_congested(void *data, int bits) |
107 | { | 91 | { |
108 | mddev_t *mddev = data; | 92 | mddev_t *mddev = data; |
@@ -224,7 +208,6 @@ static int linear_run (mddev_t *mddev) | |||
224 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); | 208 | md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); |
225 | 209 | ||
226 | blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); | 210 | blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); |
227 | mddev->queue->unplug_fn = linear_unplug; | ||
228 | mddev->queue->backing_dev_info.congested_fn = linear_congested; | 211 | mddev->queue->backing_dev_info.congested_fn = linear_congested; |
229 | mddev->queue->backing_dev_info.congested_data = mddev; | 212 | mddev->queue->backing_dev_info.congested_data = mddev; |
230 | md_integrity_register(mddev); | 213 | md_integrity_register(mddev); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 818313e277e7..86ba66c0b28a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
780 | bio->bi_end_io = super_written; | 780 | bio->bi_end_io = super_written; |
781 | 781 | ||
782 | atomic_inc(&mddev->pending_writes); | 782 | atomic_inc(&mddev->pending_writes); |
783 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, | 783 | submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); |
784 | bio); | ||
785 | } | 784 | } |
786 | 785 | ||
787 | void md_super_wait(mddev_t *mddev) | 786 | void md_super_wait(mddev_t *mddev) |
@@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, | |||
809 | struct completion event; | 808 | struct completion event; |
810 | int ret; | 809 | int ret; |
811 | 810 | ||
812 | rw |= REQ_SYNC | REQ_UNPLUG; | 811 | rw |= REQ_SYNC; |
813 | 812 | ||
814 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? | 813 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? |
815 | rdev->meta_bdev : rdev->bdev; | 814 | rdev->meta_bdev : rdev->bdev; |
@@ -4817,7 +4816,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4817 | __md_stop_writes(mddev); | 4816 | __md_stop_writes(mddev); |
4818 | md_stop(mddev); | 4817 | md_stop(mddev); |
4819 | mddev->queue->merge_bvec_fn = NULL; | 4818 | mddev->queue->merge_bvec_fn = NULL; |
4820 | mddev->queue->unplug_fn = NULL; | ||
4821 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4819 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4822 | 4820 | ||
4823 | /* tell userspace to handle 'inactive' */ | 4821 | /* tell userspace to handle 'inactive' */ |
@@ -6692,8 +6690,6 @@ EXPORT_SYMBOL_GPL(md_allow_write); | |||
6692 | 6690 | ||
6693 | void md_unplug(mddev_t *mddev) | 6691 | void md_unplug(mddev_t *mddev) |
6694 | { | 6692 | { |
6695 | if (mddev->queue) | ||
6696 | blk_unplug(mddev->queue); | ||
6697 | if (mddev->plug) | 6693 | if (mddev->plug) |
6698 | mddev->plug->unplug_fn(mddev->plug); | 6694 | mddev->plug->unplug_fn(mddev->plug); |
6699 | } | 6695 | } |
@@ -6876,7 +6872,6 @@ void md_do_sync(mddev_t *mddev) | |||
6876 | >= mddev->resync_max - mddev->curr_resync_completed | 6872 | >= mddev->resync_max - mddev->curr_resync_completed |
6877 | )) { | 6873 | )) { |
6878 | /* time to update curr_resync_completed */ | 6874 | /* time to update curr_resync_completed */ |
6879 | md_unplug(mddev); | ||
6880 | wait_event(mddev->recovery_wait, | 6875 | wait_event(mddev->recovery_wait, |
6881 | atomic_read(&mddev->recovery_active) == 0); | 6876 | atomic_read(&mddev->recovery_active) == 0); |
6882 | mddev->curr_resync_completed = j; | 6877 | mddev->curr_resync_completed = j; |
@@ -6952,7 +6947,6 @@ void md_do_sync(mddev_t *mddev) | |||
6952 | * about not overloading the IO subsystem. (things like an | 6947 | * about not overloading the IO subsystem. (things like an |
6953 | * e2fsck being done on the RAID array should execute fast) | 6948 | * e2fsck being done on the RAID array should execute fast) |
6954 | */ | 6949 | */ |
6955 | md_unplug(mddev); | ||
6956 | cond_resched(); | 6950 | cond_resched(); |
6957 | 6951 | ||
6958 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 | 6952 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 |
@@ -6971,8 +6965,6 @@ void md_do_sync(mddev_t *mddev) | |||
6971 | * this also signals 'finished resyncing' to md_stop | 6965 | * this also signals 'finished resyncing' to md_stop |
6972 | */ | 6966 | */ |
6973 | out: | 6967 | out: |
6974 | md_unplug(mddev); | ||
6975 | |||
6976 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); | 6968 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); |
6977 | 6969 | ||
6978 | /* tell personality that we are finished */ | 6970 | /* tell personality that we are finished */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3a62d440e27b..5e694b151c30 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error) | |||
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
109 | static void unplug_slaves(mddev_t *mddev) | ||
110 | { | ||
111 | multipath_conf_t *conf = mddev->private; | ||
112 | int i; | ||
113 | |||
114 | rcu_read_lock(); | ||
115 | for (i=0; i<mddev->raid_disks; i++) { | ||
116 | mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); | ||
117 | if (rdev && !test_bit(Faulty, &rdev->flags) | ||
118 | && atomic_read(&rdev->nr_pending)) { | ||
119 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
120 | |||
121 | atomic_inc(&rdev->nr_pending); | ||
122 | rcu_read_unlock(); | ||
123 | |||
124 | blk_unplug(r_queue); | ||
125 | |||
126 | rdev_dec_pending(rdev, mddev); | ||
127 | rcu_read_lock(); | ||
128 | } | ||
129 | } | ||
130 | rcu_read_unlock(); | ||
131 | } | ||
132 | |||
133 | static void multipath_unplug(struct request_queue *q) | ||
134 | { | ||
135 | unplug_slaves(q->queuedata); | ||
136 | } | ||
137 | |||
138 | |||
139 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) | 109 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) |
140 | { | 110 | { |
141 | multipath_conf_t *conf = mddev->private; | 111 | multipath_conf_t *conf = mddev->private; |
@@ -517,7 +487,6 @@ static int multipath_run (mddev_t *mddev) | |||
517 | */ | 487 | */ |
518 | md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); | 488 | md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); |
519 | 489 | ||
520 | mddev->queue->unplug_fn = multipath_unplug; | ||
521 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; | 490 | mddev->queue->backing_dev_info.congested_fn = multipath_congested; |
522 | mddev->queue->backing_dev_info.congested_data = mddev; | 491 | mddev->queue->backing_dev_info.congested_data = mddev; |
523 | md_integrity_register(mddev); | 492 | md_integrity_register(mddev); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c0ac457f1218..95916fd6394a 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -25,21 +25,6 @@ | |||
25 | #include "raid0.h" | 25 | #include "raid0.h" |
26 | #include "raid5.h" | 26 | #include "raid5.h" |
27 | 27 | ||
28 | static void raid0_unplug(struct request_queue *q) | ||
29 | { | ||
30 | mddev_t *mddev = q->queuedata; | ||
31 | raid0_conf_t *conf = mddev->private; | ||
32 | mdk_rdev_t **devlist = conf->devlist; | ||
33 | int raid_disks = conf->strip_zone[0].nb_dev; | ||
34 | int i; | ||
35 | |||
36 | for (i=0; i < raid_disks; i++) { | ||
37 | struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); | ||
38 | |||
39 | blk_unplug(r_queue); | ||
40 | } | ||
41 | } | ||
42 | |||
43 | static int raid0_congested(void *data, int bits) | 28 | static int raid0_congested(void *data, int bits) |
44 | { | 29 | { |
45 | mddev_t *mddev = data; | 30 | mddev_t *mddev = data; |
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) | |||
272 | mdname(mddev), | 257 | mdname(mddev), |
273 | (unsigned long long)smallest->sectors); | 258 | (unsigned long long)smallest->sectors); |
274 | } | 259 | } |
275 | mddev->queue->unplug_fn = raid0_unplug; | ||
276 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; | 260 | mddev->queue->backing_dev_info.congested_fn = raid0_congested; |
277 | mddev->queue->backing_dev_info.congested_data = mddev; | 261 | mddev->queue->backing_dev_info.congested_data = mddev; |
278 | 262 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 06cd712807d0..8f34ad5c478b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -52,23 +52,16 @@ | |||
52 | #define NR_RAID1_BIOS 256 | 52 | #define NR_RAID1_BIOS 256 |
53 | 53 | ||
54 | 54 | ||
55 | static void unplug_slaves(mddev_t *mddev); | ||
56 | |||
57 | static void allow_barrier(conf_t *conf); | 55 | static void allow_barrier(conf_t *conf); |
58 | static void lower_barrier(conf_t *conf); | 56 | static void lower_barrier(conf_t *conf); |
59 | 57 | ||
60 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | 58 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) |
61 | { | 59 | { |
62 | struct pool_info *pi = data; | 60 | struct pool_info *pi = data; |
63 | r1bio_t *r1_bio; | ||
64 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); | 61 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); |
65 | 62 | ||
66 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 63 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
67 | r1_bio = kzalloc(size, gfp_flags); | 64 | return kzalloc(size, gfp_flags); |
68 | if (!r1_bio && pi->mddev) | ||
69 | unplug_slaves(pi->mddev); | ||
70 | |||
71 | return r1_bio; | ||
72 | } | 65 | } |
73 | 66 | ||
74 | static void r1bio_pool_free(void *r1_bio, void *data) | 67 | static void r1bio_pool_free(void *r1_bio, void *data) |
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
91 | int i, j; | 84 | int i, j; |
92 | 85 | ||
93 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); | 86 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); |
94 | if (!r1_bio) { | 87 | if (!r1_bio) |
95 | unplug_slaves(pi->mddev); | ||
96 | return NULL; | 88 | return NULL; |
97 | } | ||
98 | 89 | ||
99 | /* | 90 | /* |
100 | * Allocate bios : 1 for reading, n-1 for writing | 91 | * Allocate bios : 1 for reading, n-1 for writing |
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) | |||
520 | return new_disk; | 511 | return new_disk; |
521 | } | 512 | } |
522 | 513 | ||
523 | static void unplug_slaves(mddev_t *mddev) | ||
524 | { | ||
525 | conf_t *conf = mddev->private; | ||
526 | int i; | ||
527 | |||
528 | rcu_read_lock(); | ||
529 | for (i=0; i<mddev->raid_disks; i++) { | ||
530 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | ||
531 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
532 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
533 | |||
534 | atomic_inc(&rdev->nr_pending); | ||
535 | rcu_read_unlock(); | ||
536 | |||
537 | blk_unplug(r_queue); | ||
538 | |||
539 | rdev_dec_pending(rdev, mddev); | ||
540 | rcu_read_lock(); | ||
541 | } | ||
542 | } | ||
543 | rcu_read_unlock(); | ||
544 | } | ||
545 | |||
546 | static void raid1_unplug(struct request_queue *q) | ||
547 | { | ||
548 | mddev_t *mddev = q->queuedata; | ||
549 | |||
550 | unplug_slaves(mddev); | ||
551 | md_wakeup_thread(mddev->thread); | ||
552 | } | ||
553 | |||
554 | static int raid1_congested(void *data, int bits) | 514 | static int raid1_congested(void *data, int bits) |
555 | { | 515 | { |
556 | mddev_t *mddev = data; | 516 | mddev_t *mddev = data; |
@@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits) | |||
580 | } | 540 | } |
581 | 541 | ||
582 | 542 | ||
583 | static int flush_pending_writes(conf_t *conf) | 543 | static void flush_pending_writes(conf_t *conf) |
584 | { | 544 | { |
585 | /* Any writes that have been queued but are awaiting | 545 | /* Any writes that have been queued but are awaiting |
586 | * bitmap updates get flushed here. | 546 | * bitmap updates get flushed here. |
587 | * We return 1 if any requests were actually submitted. | ||
588 | */ | 547 | */ |
589 | int rv = 0; | ||
590 | |||
591 | spin_lock_irq(&conf->device_lock); | 548 | spin_lock_irq(&conf->device_lock); |
592 | 549 | ||
593 | if (conf->pending_bio_list.head) { | 550 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 551 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 552 | bio = bio_list_get(&conf->pending_bio_list); |
596 | /* Only take the spinlock to quiet a warning */ | ||
597 | spin_lock(conf->mddev->queue->queue_lock); | ||
598 | blk_remove_plug(conf->mddev->queue); | ||
599 | spin_unlock(conf->mddev->queue->queue_lock); | ||
600 | spin_unlock_irq(&conf->device_lock); | 553 | spin_unlock_irq(&conf->device_lock); |
601 | /* flush any pending bitmap writes to | 554 | /* flush any pending bitmap writes to |
602 | * disk before proceeding w/ I/O */ | 555 | * disk before proceeding w/ I/O */ |
@@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf) | |||
608 | generic_make_request(bio); | 561 | generic_make_request(bio); |
609 | bio = next; | 562 | bio = next; |
610 | } | 563 | } |
611 | rv = 1; | ||
612 | } else | 564 | } else |
613 | spin_unlock_irq(&conf->device_lock); | 565 | spin_unlock_irq(&conf->device_lock); |
614 | return rv; | 566 | } |
567 | |||
568 | static void md_kick_device(mddev_t *mddev) | ||
569 | { | ||
570 | blk_flush_plug(current); | ||
571 | md_wakeup_thread(mddev->thread); | ||
615 | } | 572 | } |
616 | 573 | ||
617 | /* Barriers.... | 574 | /* Barriers.... |
@@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf) | |||
643 | 600 | ||
644 | /* Wait until no block IO is waiting */ | 601 | /* Wait until no block IO is waiting */ |
645 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, | 602 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, |
646 | conf->resync_lock, | 603 | conf->resync_lock, md_kick_device(conf->mddev)); |
647 | raid1_unplug(conf->mddev->queue)); | ||
648 | 604 | ||
649 | /* block any new IO from starting */ | 605 | /* block any new IO from starting */ |
650 | conf->barrier++; | 606 | conf->barrier++; |
@@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf) | |||
652 | /* Now wait for all pending IO to complete */ | 608 | /* Now wait for all pending IO to complete */ |
653 | wait_event_lock_irq(conf->wait_barrier, | 609 | wait_event_lock_irq(conf->wait_barrier, |
654 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 610 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
655 | conf->resync_lock, | 611 | conf->resync_lock, md_kick_device(conf->mddev)); |
656 | raid1_unplug(conf->mddev->queue)); | ||
657 | 612 | ||
658 | spin_unlock_irq(&conf->resync_lock); | 613 | spin_unlock_irq(&conf->resync_lock); |
659 | } | 614 | } |
@@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf) | |||
675 | conf->nr_waiting++; | 630 | conf->nr_waiting++; |
676 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 631 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
677 | conf->resync_lock, | 632 | conf->resync_lock, |
678 | raid1_unplug(conf->mddev->queue)); | 633 | md_kick_device(conf->mddev)); |
679 | conf->nr_waiting--; | 634 | conf->nr_waiting--; |
680 | } | 635 | } |
681 | conf->nr_pending++; | 636 | conf->nr_pending++; |
@@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf) | |||
712 | conf->nr_pending == conf->nr_queued+1, | 667 | conf->nr_pending == conf->nr_queued+1, |
713 | conf->resync_lock, | 668 | conf->resync_lock, |
714 | ({ flush_pending_writes(conf); | 669 | ({ flush_pending_writes(conf); |
715 | raid1_unplug(conf->mddev->queue); })); | 670 | md_kick_device(conf->mddev); })); |
716 | spin_unlock_irq(&conf->resync_lock); | 671 | spin_unlock_irq(&conf->resync_lock); |
717 | } | 672 | } |
718 | static void unfreeze_array(conf_t *conf) | 673 | static void unfreeze_array(conf_t *conf) |
@@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
962 | atomic_inc(&r1_bio->remaining); | 917 | atomic_inc(&r1_bio->remaining); |
963 | spin_lock_irqsave(&conf->device_lock, flags); | 918 | spin_lock_irqsave(&conf->device_lock, flags); |
964 | bio_list_add(&conf->pending_bio_list, mbio); | 919 | bio_list_add(&conf->pending_bio_list, mbio); |
965 | blk_plug_device_unlocked(mddev->queue); | ||
966 | spin_unlock_irqrestore(&conf->device_lock, flags); | 920 | spin_unlock_irqrestore(&conf->device_lock, flags); |
967 | } | 921 | } |
968 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 922 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
971 | /* In case raid1d snuck in to freeze_array */ | 925 | /* In case raid1d snuck in to freeze_array */ |
972 | wake_up(&conf->wait_barrier); | 926 | wake_up(&conf->wait_barrier); |
973 | 927 | ||
974 | if (do_sync) | 928 | if (do_sync || !bitmap) |
975 | md_wakeup_thread(mddev->thread); | 929 | md_wakeup_thread(mddev->thread); |
976 | 930 | ||
977 | return 0; | 931 | return 0; |
@@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev) | |||
1561 | unsigned long flags; | 1515 | unsigned long flags; |
1562 | conf_t *conf = mddev->private; | 1516 | conf_t *conf = mddev->private; |
1563 | struct list_head *head = &conf->retry_list; | 1517 | struct list_head *head = &conf->retry_list; |
1564 | int unplug=0; | ||
1565 | mdk_rdev_t *rdev; | 1518 | mdk_rdev_t *rdev; |
1566 | 1519 | ||
1567 | md_check_recovery(mddev); | 1520 | md_check_recovery(mddev); |
@@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev) | |||
1569 | for (;;) { | 1522 | for (;;) { |
1570 | char b[BDEVNAME_SIZE]; | 1523 | char b[BDEVNAME_SIZE]; |
1571 | 1524 | ||
1572 | unplug += flush_pending_writes(conf); | 1525 | flush_pending_writes(conf); |
1573 | 1526 | ||
1574 | spin_lock_irqsave(&conf->device_lock, flags); | 1527 | spin_lock_irqsave(&conf->device_lock, flags); |
1575 | if (list_empty(head)) { | 1528 | if (list_empty(head)) { |
@@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev) | |||
1583 | 1536 | ||
1584 | mddev = r1_bio->mddev; | 1537 | mddev = r1_bio->mddev; |
1585 | conf = mddev->private; | 1538 | conf = mddev->private; |
1586 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) { | 1539 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) |
1587 | sync_request_write(mddev, r1_bio); | 1540 | sync_request_write(mddev, r1_bio); |
1588 | unplug = 1; | 1541 | else { |
1589 | } else { | ||
1590 | int disk; | 1542 | int disk; |
1591 | 1543 | ||
1592 | /* we got a read error. Maybe the drive is bad. Maybe just | 1544 | /* we got a read error. Maybe the drive is bad. Maybe just |
@@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev) | |||
1636 | bio->bi_end_io = raid1_end_read_request; | 1588 | bio->bi_end_io = raid1_end_read_request; |
1637 | bio->bi_rw = READ | do_sync; | 1589 | bio->bi_rw = READ | do_sync; |
1638 | bio->bi_private = r1_bio; | 1590 | bio->bi_private = r1_bio; |
1639 | unplug = 1; | ||
1640 | generic_make_request(bio); | 1591 | generic_make_request(bio); |
1641 | } | 1592 | } |
1642 | } | 1593 | } |
1643 | cond_resched(); | 1594 | cond_resched(); |
1644 | } | 1595 | } |
1645 | if (unplug) | ||
1646 | unplug_slaves(mddev); | ||
1647 | } | 1596 | } |
1648 | 1597 | ||
1649 | 1598 | ||
@@ -2066,7 +2015,6 @@ static int run(mddev_t *mddev) | |||
2066 | 2015 | ||
2067 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); | 2016 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); |
2068 | 2017 | ||
2069 | mddev->queue->unplug_fn = raid1_unplug; | ||
2070 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; | 2018 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; |
2071 | mddev->queue->backing_dev_info.congested_data = mddev; | 2019 | mddev->queue->backing_dev_info.congested_data = mddev; |
2072 | md_integrity_register(mddev); | 2020 | md_integrity_register(mddev); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 747d061d8e05..c0d0f5f7e407 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -57,23 +57,16 @@ | |||
57 | */ | 57 | */ |
58 | #define NR_RAID10_BIOS 256 | 58 | #define NR_RAID10_BIOS 256 |
59 | 59 | ||
60 | static void unplug_slaves(mddev_t *mddev); | ||
61 | |||
62 | static void allow_barrier(conf_t *conf); | 60 | static void allow_barrier(conf_t *conf); |
63 | static void lower_barrier(conf_t *conf); | 61 | static void lower_barrier(conf_t *conf); |
64 | 62 | ||
65 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) | 63 | static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) |
66 | { | 64 | { |
67 | conf_t *conf = data; | 65 | conf_t *conf = data; |
68 | r10bio_t *r10_bio; | ||
69 | int size = offsetof(struct r10bio_s, devs[conf->copies]); | 66 | int size = offsetof(struct r10bio_s, devs[conf->copies]); |
70 | 67 | ||
71 | /* allocate a r10bio with room for raid_disks entries in the bios array */ | 68 | /* allocate a r10bio with room for raid_disks entries in the bios array */ |
72 | r10_bio = kzalloc(size, gfp_flags); | 69 | return kzalloc(size, gfp_flags); |
73 | if (!r10_bio && conf->mddev) | ||
74 | unplug_slaves(conf->mddev); | ||
75 | |||
76 | return r10_bio; | ||
77 | } | 70 | } |
78 | 71 | ||
79 | static void r10bio_pool_free(void *r10_bio, void *data) | 72 | static void r10bio_pool_free(void *r10_bio, void *data) |
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
106 | int nalloc; | 99 | int nalloc; |
107 | 100 | ||
108 | r10_bio = r10bio_pool_alloc(gfp_flags, conf); | 101 | r10_bio = r10bio_pool_alloc(gfp_flags, conf); |
109 | if (!r10_bio) { | 102 | if (!r10_bio) |
110 | unplug_slaves(conf->mddev); | ||
111 | return NULL; | 103 | return NULL; |
112 | } | ||
113 | 104 | ||
114 | if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) | 105 | if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) |
115 | nalloc = conf->copies; /* resync */ | 106 | nalloc = conf->copies; /* resync */ |
@@ -597,37 +588,6 @@ rb_out: | |||
597 | return disk; | 588 | return disk; |
598 | } | 589 | } |
599 | 590 | ||
600 | static void unplug_slaves(mddev_t *mddev) | ||
601 | { | ||
602 | conf_t *conf = mddev->private; | ||
603 | int i; | ||
604 | |||
605 | rcu_read_lock(); | ||
606 | for (i=0; i < conf->raid_disks; i++) { | ||
607 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | ||
608 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
609 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
610 | |||
611 | atomic_inc(&rdev->nr_pending); | ||
612 | rcu_read_unlock(); | ||
613 | |||
614 | blk_unplug(r_queue); | ||
615 | |||
616 | rdev_dec_pending(rdev, mddev); | ||
617 | rcu_read_lock(); | ||
618 | } | ||
619 | } | ||
620 | rcu_read_unlock(); | ||
621 | } | ||
622 | |||
623 | static void raid10_unplug(struct request_queue *q) | ||
624 | { | ||
625 | mddev_t *mddev = q->queuedata; | ||
626 | |||
627 | unplug_slaves(q->queuedata); | ||
628 | md_wakeup_thread(mddev->thread); | ||
629 | } | ||
630 | |||
631 | static int raid10_congested(void *data, int bits) | 591 | static int raid10_congested(void *data, int bits) |
632 | { | 592 | { |
633 | mddev_t *mddev = data; | 593 | mddev_t *mddev = data; |
@@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits) | |||
649 | return ret; | 609 | return ret; |
650 | } | 610 | } |
651 | 611 | ||
652 | static int flush_pending_writes(conf_t *conf) | 612 | static void flush_pending_writes(conf_t *conf) |
653 | { | 613 | { |
654 | /* Any writes that have been queued but are awaiting | 614 | /* Any writes that have been queued but are awaiting |
655 | * bitmap updates get flushed here. | 615 | * bitmap updates get flushed here. |
656 | * We return 1 if any requests were actually submitted. | ||
657 | */ | 616 | */ |
658 | int rv = 0; | ||
659 | |||
660 | spin_lock_irq(&conf->device_lock); | 617 | spin_lock_irq(&conf->device_lock); |
661 | 618 | ||
662 | if (conf->pending_bio_list.head) { | 619 | if (conf->pending_bio_list.head) { |
663 | struct bio *bio; | 620 | struct bio *bio; |
664 | bio = bio_list_get(&conf->pending_bio_list); | 621 | bio = bio_list_get(&conf->pending_bio_list); |
665 | /* Spinlock only taken to quiet a warning */ | ||
666 | spin_lock(conf->mddev->queue->queue_lock); | ||
667 | blk_remove_plug(conf->mddev->queue); | ||
668 | spin_unlock(conf->mddev->queue->queue_lock); | ||
669 | spin_unlock_irq(&conf->device_lock); | 622 | spin_unlock_irq(&conf->device_lock); |
670 | /* flush any pending bitmap writes to disk | 623 | /* flush any pending bitmap writes to disk |
671 | * before proceeding w/ I/O */ | 624 | * before proceeding w/ I/O */ |
@@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf) | |||
677 | generic_make_request(bio); | 630 | generic_make_request(bio); |
678 | bio = next; | 631 | bio = next; |
679 | } | 632 | } |
680 | rv = 1; | ||
681 | } else | 633 | } else |
682 | spin_unlock_irq(&conf->device_lock); | 634 | spin_unlock_irq(&conf->device_lock); |
683 | return rv; | ||
684 | } | 635 | } |
636 | |||
637 | static void md_kick_device(mddev_t *mddev) | ||
638 | { | ||
639 | blk_flush_plug(current); | ||
640 | md_wakeup_thread(mddev->thread); | ||
641 | } | ||
642 | |||
685 | /* Barriers.... | 643 | /* Barriers.... |
686 | * Sometimes we need to suspend IO while we do something else, | 644 | * Sometimes we need to suspend IO while we do something else, |
687 | * either some resync/recovery, or reconfigure the array. | 645 | * either some resync/recovery, or reconfigure the array. |
@@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force) | |||
711 | 669 | ||
712 | /* Wait until no block IO is waiting (unless 'force') */ | 670 | /* Wait until no block IO is waiting (unless 'force') */ |
713 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, | 671 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, |
714 | conf->resync_lock, | 672 | conf->resync_lock, md_kick_device(conf->mddev)); |
715 | raid10_unplug(conf->mddev->queue)); | ||
716 | 673 | ||
717 | /* block any new IO from starting */ | 674 | /* block any new IO from starting */ |
718 | conf->barrier++; | 675 | conf->barrier++; |
@@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force) | |||
720 | /* No wait for all pending IO to complete */ | 677 | /* No wait for all pending IO to complete */ |
721 | wait_event_lock_irq(conf->wait_barrier, | 678 | wait_event_lock_irq(conf->wait_barrier, |
722 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 679 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
723 | conf->resync_lock, | 680 | conf->resync_lock, md_kick_device(conf->mddev)); |
724 | raid10_unplug(conf->mddev->queue)); | ||
725 | 681 | ||
726 | spin_unlock_irq(&conf->resync_lock); | 682 | spin_unlock_irq(&conf->resync_lock); |
727 | } | 683 | } |
@@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf) | |||
742 | conf->nr_waiting++; | 698 | conf->nr_waiting++; |
743 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 699 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
744 | conf->resync_lock, | 700 | conf->resync_lock, |
745 | raid10_unplug(conf->mddev->queue)); | 701 | md_kick_device(conf->mddev)); |
746 | conf->nr_waiting--; | 702 | conf->nr_waiting--; |
747 | } | 703 | } |
748 | conf->nr_pending++; | 704 | conf->nr_pending++; |
@@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf) | |||
779 | conf->nr_pending == conf->nr_queued+1, | 735 | conf->nr_pending == conf->nr_queued+1, |
780 | conf->resync_lock, | 736 | conf->resync_lock, |
781 | ({ flush_pending_writes(conf); | 737 | ({ flush_pending_writes(conf); |
782 | raid10_unplug(conf->mddev->queue); })); | 738 | md_kick_device(conf->mddev); })); |
783 | spin_unlock_irq(&conf->resync_lock); | 739 | spin_unlock_irq(&conf->resync_lock); |
784 | } | 740 | } |
785 | 741 | ||
@@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
974 | atomic_inc(&r10_bio->remaining); | 930 | atomic_inc(&r10_bio->remaining); |
975 | spin_lock_irqsave(&conf->device_lock, flags); | 931 | spin_lock_irqsave(&conf->device_lock, flags); |
976 | bio_list_add(&conf->pending_bio_list, mbio); | 932 | bio_list_add(&conf->pending_bio_list, mbio); |
977 | blk_plug_device_unlocked(mddev->queue); | ||
978 | spin_unlock_irqrestore(&conf->device_lock, flags); | 933 | spin_unlock_irqrestore(&conf->device_lock, flags); |
979 | } | 934 | } |
980 | 935 | ||
@@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
991 | /* In case raid10d snuck in to freeze_array */ | 946 | /* In case raid10d snuck in to freeze_array */ |
992 | wake_up(&conf->wait_barrier); | 947 | wake_up(&conf->wait_barrier); |
993 | 948 | ||
994 | if (do_sync) | 949 | if (do_sync || !mddev->bitmap) |
995 | md_wakeup_thread(mddev->thread); | 950 | md_wakeup_thread(mddev->thread); |
996 | 951 | ||
997 | return 0; | 952 | return 0; |
@@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev) | |||
1684 | unsigned long flags; | 1639 | unsigned long flags; |
1685 | conf_t *conf = mddev->private; | 1640 | conf_t *conf = mddev->private; |
1686 | struct list_head *head = &conf->retry_list; | 1641 | struct list_head *head = &conf->retry_list; |
1687 | int unplug=0; | ||
1688 | mdk_rdev_t *rdev; | 1642 | mdk_rdev_t *rdev; |
1689 | 1643 | ||
1690 | md_check_recovery(mddev); | 1644 | md_check_recovery(mddev); |
@@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev) | |||
1692 | for (;;) { | 1646 | for (;;) { |
1693 | char b[BDEVNAME_SIZE]; | 1647 | char b[BDEVNAME_SIZE]; |
1694 | 1648 | ||
1695 | unplug += flush_pending_writes(conf); | 1649 | flush_pending_writes(conf); |
1696 | 1650 | ||
1697 | spin_lock_irqsave(&conf->device_lock, flags); | 1651 | spin_lock_irqsave(&conf->device_lock, flags); |
1698 | if (list_empty(head)) { | 1652 | if (list_empty(head)) { |
@@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev) | |||
1706 | 1660 | ||
1707 | mddev = r10_bio->mddev; | 1661 | mddev = r10_bio->mddev; |
1708 | conf = mddev->private; | 1662 | conf = mddev->private; |
1709 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) { | 1663 | if (test_bit(R10BIO_IsSync, &r10_bio->state)) |
1710 | sync_request_write(mddev, r10_bio); | 1664 | sync_request_write(mddev, r10_bio); |
1711 | unplug = 1; | 1665 | else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) |
1712 | } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) { | ||
1713 | recovery_request_write(mddev, r10_bio); | 1666 | recovery_request_write(mddev, r10_bio); |
1714 | unplug = 1; | 1667 | else { |
1715 | } else { | ||
1716 | int mirror; | 1668 | int mirror; |
1717 | /* we got a read error. Maybe the drive is bad. Maybe just | 1669 | /* we got a read error. Maybe the drive is bad. Maybe just |
1718 | * the block and we can fix it. | 1670 | * the block and we can fix it. |
@@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev) | |||
1759 | bio->bi_rw = READ | do_sync; | 1711 | bio->bi_rw = READ | do_sync; |
1760 | bio->bi_private = r10_bio; | 1712 | bio->bi_private = r10_bio; |
1761 | bio->bi_end_io = raid10_end_read_request; | 1713 | bio->bi_end_io = raid10_end_read_request; |
1762 | unplug = 1; | ||
1763 | generic_make_request(bio); | 1714 | generic_make_request(bio); |
1764 | } | 1715 | } |
1765 | } | 1716 | } |
1766 | cond_resched(); | 1717 | cond_resched(); |
1767 | } | 1718 | } |
1768 | if (unplug) | ||
1769 | unplug_slaves(mddev); | ||
1770 | } | 1719 | } |
1771 | 1720 | ||
1772 | 1721 | ||
@@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev) | |||
2377 | md_set_array_sectors(mddev, size); | 2326 | md_set_array_sectors(mddev, size); |
2378 | mddev->resync_max_sectors = size; | 2327 | mddev->resync_max_sectors = size; |
2379 | 2328 | ||
2380 | mddev->queue->unplug_fn = raid10_unplug; | ||
2381 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; | 2329 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; |
2382 | mddev->queue->backing_dev_info.congested_data = mddev; | 2330 | mddev->queue->backing_dev_info.congested_data = mddev; |
2383 | 2331 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 78536fdbd87f..e867ee42b152 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void unplug_slaves(mddev_t *mddev); | ||
437 | |||
438 | static struct stripe_head * | 436 | static struct stripe_head * |
439 | get_active_stripe(raid5_conf_t *conf, sector_t sector, | 437 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
440 | int previous, int noblock, int noquiesce) | 438 | int previous, int noblock, int noquiesce) |
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, | |||
463 | < (conf->max_nr_stripes *3/4) | 461 | < (conf->max_nr_stripes *3/4) |
464 | || !conf->inactive_blocked), | 462 | || !conf->inactive_blocked), |
465 | conf->device_lock, | 463 | conf->device_lock, |
466 | md_raid5_unplug_device(conf) | 464 | md_raid5_kick_device(conf)); |
467 | ); | ||
468 | conf->inactive_blocked = 0; | 465 | conf->inactive_blocked = 0; |
469 | } else | 466 | } else |
470 | init_stripe(sh, sector, previous); | 467 | init_stripe(sh, sector, previous); |
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) | |||
1473 | wait_event_lock_irq(conf->wait_for_stripe, | 1470 | wait_event_lock_irq(conf->wait_for_stripe, |
1474 | !list_empty(&conf->inactive_list), | 1471 | !list_empty(&conf->inactive_list), |
1475 | conf->device_lock, | 1472 | conf->device_lock, |
1476 | unplug_slaves(conf->mddev) | 1473 | blk_flush_plug(current)); |
1477 | ); | ||
1478 | osh = get_free_stripe(conf); | 1474 | osh = get_free_stripe(conf); |
1479 | spin_unlock_irq(&conf->device_lock); | 1475 | spin_unlock_irq(&conf->device_lock); |
1480 | atomic_set(&nsh->count, 1); | 1476 | atomic_set(&nsh->count, 1); |
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf) | |||
3645 | } | 3641 | } |
3646 | } | 3642 | } |
3647 | 3643 | ||
3648 | static void unplug_slaves(mddev_t *mddev) | 3644 | void md_raid5_kick_device(raid5_conf_t *conf) |
3649 | { | 3645 | { |
3650 | raid5_conf_t *conf = mddev->private; | 3646 | blk_flush_plug(current); |
3651 | int i; | 3647 | raid5_activate_delayed(conf); |
3652 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | ||
3653 | |||
3654 | rcu_read_lock(); | ||
3655 | for (i = 0; i < devs; i++) { | ||
3656 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); | ||
3657 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
3658 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
3659 | |||
3660 | atomic_inc(&rdev->nr_pending); | ||
3661 | rcu_read_unlock(); | ||
3662 | |||
3663 | blk_unplug(r_queue); | ||
3664 | |||
3665 | rdev_dec_pending(rdev, mddev); | ||
3666 | rcu_read_lock(); | ||
3667 | } | ||
3668 | } | ||
3669 | rcu_read_unlock(); | ||
3670 | } | ||
3671 | |||
3672 | void md_raid5_unplug_device(raid5_conf_t *conf) | ||
3673 | { | ||
3674 | unsigned long flags; | ||
3675 | |||
3676 | spin_lock_irqsave(&conf->device_lock, flags); | ||
3677 | |||
3678 | if (plugger_remove_plug(&conf->plug)) { | ||
3679 | conf->seq_flush++; | ||
3680 | raid5_activate_delayed(conf); | ||
3681 | } | ||
3682 | md_wakeup_thread(conf->mddev->thread); | 3648 | md_wakeup_thread(conf->mddev->thread); |
3683 | |||
3684 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
3685 | |||
3686 | unplug_slaves(conf->mddev); | ||
3687 | } | 3649 | } |
3688 | EXPORT_SYMBOL_GPL(md_raid5_unplug_device); | 3650 | EXPORT_SYMBOL_GPL(md_raid5_kick_device); |
3689 | 3651 | ||
3690 | static void raid5_unplug(struct plug_handle *plug) | 3652 | static void raid5_unplug(struct plug_handle *plug) |
3691 | { | 3653 | { |
3692 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); | 3654 | raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); |
3693 | md_raid5_unplug_device(conf); | ||
3694 | } | ||
3695 | 3655 | ||
3696 | static void raid5_unplug_queue(struct request_queue *q) | 3656 | md_raid5_kick_device(conf); |
3697 | { | ||
3698 | mddev_t *mddev = q->queuedata; | ||
3699 | md_raid5_unplug_device(mddev->private); | ||
3700 | } | 3657 | } |
3701 | 3658 | ||
3702 | int md_raid5_congested(mddev_t *mddev, int bits) | 3659 | int md_raid5_congested(mddev_t *mddev, int bits) |
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
4100 | * add failed due to overlap. Flush everything | 4057 | * add failed due to overlap. Flush everything |
4101 | * and wait a while | 4058 | * and wait a while |
4102 | */ | 4059 | */ |
4103 | md_raid5_unplug_device(conf); | 4060 | md_raid5_kick_device(conf); |
4104 | release_stripe(sh); | 4061 | release_stripe(sh); |
4105 | schedule(); | 4062 | schedule(); |
4106 | goto retry; | 4063 | goto retry; |
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski | |||
4365 | 4322 | ||
4366 | if (sector_nr >= max_sector) { | 4323 | if (sector_nr >= max_sector) { |
4367 | /* just being told to finish up .. nothing much to do */ | 4324 | /* just being told to finish up .. nothing much to do */ |
4368 | unplug_slaves(mddev); | ||
4369 | 4325 | ||
4370 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { | 4326 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
4371 | end_reshape(conf); | 4327 | end_reshape(conf); |
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev) | |||
4569 | spin_unlock_irq(&conf->device_lock); | 4525 | spin_unlock_irq(&conf->device_lock); |
4570 | 4526 | ||
4571 | async_tx_issue_pending_all(); | 4527 | async_tx_issue_pending_all(); |
4572 | unplug_slaves(mddev); | ||
4573 | 4528 | ||
4574 | pr_debug("--- raid5d inactive\n"); | 4529 | pr_debug("--- raid5d inactive\n"); |
4575 | } | 4530 | } |
@@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev) | |||
5204 | 5159 | ||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5160 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5161 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->unplug_fn = raid5_unplug_queue; | 5162 | mddev->queue->queue_lock = &conf->device_lock; |
5208 | 5163 | ||
5209 | chunk_size = mddev->chunk_sectors << 9; | 5164 | chunk_size = mddev->chunk_sectors << 9; |
5210 | blk_queue_io_min(mddev->queue, chunk_size); | 5165 | blk_queue_io_min(mddev->queue, chunk_size); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2ace0582b409..8d563a4f022a 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout) | |||
503 | } | 503 | } |
504 | 504 | ||
505 | extern int md_raid5_congested(mddev_t *mddev, int bits); | 505 | extern int md_raid5_congested(mddev_t *mddev, int bits); |
506 | extern void md_raid5_unplug_device(raid5_conf_t *conf); | 506 | extern void md_raid5_kick_device(raid5_conf_t *conf); |
507 | extern int raid5_set_cache_size(mddev_t *mddev, int size); | 507 | extern int raid5_set_cache_size(mddev_t *mddev, int size); |
508 | #endif | 508 | #endif |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index f81c25d4a125..47ec5bc0ed21 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -897,11 +897,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
897 | { | 897 | { |
898 | struct request *req; | 898 | struct request *req; |
899 | 899 | ||
900 | while (!blk_queue_plugged(q)) { | 900 | while ((req = blk_peek_request(q)) != NULL) { |
901 | req = blk_peek_request(q); | ||
902 | if (!req) | ||
903 | break; | ||
904 | |||
905 | if (req->cmd_type == REQ_TYPE_FS) { | 901 | if (req->cmd_type == REQ_TYPE_FS) { |
906 | struct i2o_block_delayed_request *dreq; | 902 | struct i2o_block_delayed_request *dreq; |
907 | struct i2o_block_request *ireq = req->special; | 903 | struct i2o_block_request *ireq = req->special; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 4e42d030e097..2ae727568df9 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d) | |||
55 | 55 | ||
56 | spin_lock_irq(q->queue_lock); | 56 | spin_lock_irq(q->queue_lock); |
57 | set_current_state(TASK_INTERRUPTIBLE); | 57 | set_current_state(TASK_INTERRUPTIBLE); |
58 | if (!blk_queue_plugged(q)) | 58 | req = blk_fetch_request(q); |
59 | req = blk_fetch_request(q); | ||
60 | mq->req = req; | 59 | mq->req = req; |
61 | spin_unlock_irq(q->queue_lock); | 60 | spin_unlock_irq(q->queue_lock); |
62 | 61 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 794bfd962266..4d2df2f76ea0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) | |||
1917 | return; | 1917 | return; |
1918 | } | 1918 | } |
1919 | /* Now we try to fetch requests from the request queue */ | 1919 | /* Now we try to fetch requests from the request queue */ |
1920 | while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { | 1920 | while ((req = blk_peek_request(queue))) { |
1921 | if (basedev->features & DASD_FEATURE_READONLY && | 1921 | if (basedev->features & DASD_FEATURE_READONLY && |
1922 | rq_data_dir(req) == WRITE) { | 1922 | rq_data_dir(req) == WRITE) { |
1923 | DBF_DEV_EVENT(DBF_ERR, basedev, | 1923 | DBF_DEV_EVENT(DBF_ERR, basedev, |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index ad8431636012..83cea9a55e2f 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) { | |||
161 | 161 | ||
162 | spin_lock_irq(&device->blk_data.request_queue_lock); | 162 | spin_lock_irq(&device->blk_data.request_queue_lock); |
163 | while ( | 163 | while ( |
164 | !blk_queue_plugged(queue) && | ||
165 | blk_peek_request(queue) && | 164 | blk_peek_request(queue) && |
166 | nr_queued < TAPEBLOCK_MIN_REQUEUE | 165 | nr_queued < TAPEBLOCK_MIN_REQUEUE |
167 | ) { | 166 | ) { |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fb2bb35c62cb..bf80a4c5a481 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { | |||
67 | 67 | ||
68 | struct kmem_cache *scsi_sdb_cache; | 68 | struct kmem_cache *scsi_sdb_cache; |
69 | 69 | ||
70 | /* | ||
71 | * When to reinvoke queueing after a resource shortage. It's 3 msecs to | ||
72 | * not change behaviour from the previous unplug mechanism, experimentation | ||
73 | * may prove this needs changing. | ||
74 | */ | ||
75 | #define SCSI_QUEUE_DELAY 3 | ||
76 | |||
70 | static void scsi_run_queue(struct request_queue *q); | 77 | static void scsi_run_queue(struct request_queue *q); |
71 | 78 | ||
72 | /* | 79 | /* |
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
149 | /* | 156 | /* |
150 | * Requeue this command. It will go before all other commands | 157 | * Requeue this command. It will go before all other commands |
151 | * that are already in the queue. | 158 | * that are already in the queue. |
152 | * | 159 | */ |
153 | * NOTE: there is magic here about the way the queue is plugged if | ||
154 | * we have no outstanding commands. | ||
155 | * | ||
156 | * Although we *don't* plug the queue, we call the request | ||
157 | * function. The SCSI request function detects the blocked condition | ||
158 | * and plugs the queue appropriately. | ||
159 | */ | ||
160 | spin_lock_irqsave(q->queue_lock, flags); | 160 | spin_lock_irqsave(q->queue_lock, flags); |
161 | blk_requeue_request(q, cmd->request); | 161 | blk_requeue_request(q, cmd->request); |
162 | spin_unlock_irqrestore(q->queue_lock, flags); | 162 | spin_unlock_irqrestore(q->queue_lock, flags); |
@@ -1194,11 +1194,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) | |||
1194 | case BLKPREP_DEFER: | 1194 | case BLKPREP_DEFER: |
1195 | /* | 1195 | /* |
1196 | * If we defer, the blk_peek_request() returns NULL, but the | 1196 | * If we defer, the blk_peek_request() returns NULL, but the |
1197 | * queue must be restarted, so we plug here if no returning | 1197 | * queue must be restarted, so we schedule a callback to happen |
1198 | * command will automatically do that. | 1198 | * shortly. |
1199 | */ | 1199 | */ |
1200 | if (sdev->device_busy == 0) | 1200 | if (sdev->device_busy == 0) |
1201 | blk_plug_device(q); | 1201 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
1202 | break; | 1202 | break; |
1203 | default: | 1203 | default: |
1204 | req->cmd_flags |= REQ_DONTPREP; | 1204 | req->cmd_flags |= REQ_DONTPREP; |
@@ -1237,7 +1237,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, | |||
1237 | sdev_printk(KERN_INFO, sdev, | 1237 | sdev_printk(KERN_INFO, sdev, |
1238 | "unblocking device at zero depth\n")); | 1238 | "unblocking device at zero depth\n")); |
1239 | } else { | 1239 | } else { |
1240 | blk_plug_device(q); | 1240 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
1241 | return 0; | 1241 | return 0; |
1242 | } | 1242 | } |
1243 | } | 1243 | } |
@@ -1467,7 +1467,7 @@ static void scsi_request_fn(struct request_queue *q) | |||
1467 | * the host is no longer able to accept any more requests. | 1467 | * the host is no longer able to accept any more requests. |
1468 | */ | 1468 | */ |
1469 | shost = sdev->host; | 1469 | shost = sdev->host; |
1470 | while (!blk_queue_plugged(q)) { | 1470 | for (;;) { |
1471 | int rtn; | 1471 | int rtn; |
1472 | /* | 1472 | /* |
1473 | * get next queueable request. We do this early to make sure | 1473 | * get next queueable request. We do this early to make sure |
@@ -1546,15 +1546,8 @@ static void scsi_request_fn(struct request_queue *q) | |||
1546 | */ | 1546 | */ |
1547 | rtn = scsi_dispatch_cmd(cmd); | 1547 | rtn = scsi_dispatch_cmd(cmd); |
1548 | spin_lock_irq(q->queue_lock); | 1548 | spin_lock_irq(q->queue_lock); |
1549 | if(rtn) { | 1549 | if (rtn) |
1550 | /* we're refusing the command; because of | 1550 | goto out_delay; |
1551 | * the way locks get dropped, we need to | ||
1552 | * check here if plugging is required */ | ||
1553 | if(sdev->device_busy == 0) | ||
1554 | blk_plug_device(q); | ||
1555 | |||
1556 | break; | ||
1557 | } | ||
1558 | } | 1551 | } |
1559 | 1552 | ||
1560 | goto out; | 1553 | goto out; |
@@ -1573,9 +1566,10 @@ static void scsi_request_fn(struct request_queue *q) | |||
1573 | spin_lock_irq(q->queue_lock); | 1566 | spin_lock_irq(q->queue_lock); |
1574 | blk_requeue_request(q, req); | 1567 | blk_requeue_request(q, req); |
1575 | sdev->device_busy--; | 1568 | sdev->device_busy--; |
1576 | if(sdev->device_busy == 0) | 1569 | out_delay: |
1577 | blk_plug_device(q); | 1570 | if (sdev->device_busy == 0) |
1578 | out: | 1571 | blk_delay_queue(q, SCSI_QUEUE_DELAY); |
1572 | out: | ||
1579 | /* must be careful here...if we trigger the ->remove() function | 1573 | /* must be careful here...if we trigger the ->remove() function |
1580 | * we cannot be holding the q lock */ | 1574 | * we cannot be holding the q lock */ |
1581 | spin_unlock_irq(q->queue_lock); | 1575 | spin_unlock_irq(q->queue_lock); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 5c3ccfc6b622..2941d2d92c94 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, | |||
3913 | if (!get_device(dev)) | 3913 | if (!get_device(dev)) |
3914 | return; | 3914 | return; |
3915 | 3915 | ||
3916 | while (!blk_queue_plugged(q)) { | 3916 | while (1) { |
3917 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && | 3917 | if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && |
3918 | !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) | 3918 | !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) |
3919 | break; | 3919 | break; |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 927e99cb7225..c6fcf76cade5 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, | |||
173 | int ret; | 173 | int ret; |
174 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); | 174 | int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); |
175 | 175 | ||
176 | while (!blk_queue_plugged(q)) { | 176 | while ((req = blk_fetch_request(q)) != NULL) { |
177 | req = blk_fetch_request(q); | ||
178 | if (!req) | ||
179 | break; | ||
180 | |||
181 | spin_unlock_irq(q->queue_lock); | 177 | spin_unlock_irq(q->queue_lock); |
182 | 178 | ||
183 | handler = to_sas_internal(shost->transportt)->f->smp_handler; | 179 | handler = to_sas_internal(shost->transportt)->f->smp_handler; |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 67f0c09983c8..c1b539d7b0d3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task) | |||
392 | { | 392 | { |
393 | struct se_device *dev = task->task_se_cmd->se_dev; | 393 | struct se_device *dev = task->task_se_cmd->se_dev; |
394 | struct iblock_req *req = IBLOCK_REQ(task); | 394 | struct iblock_req *req = IBLOCK_REQ(task); |
395 | struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev; | ||
396 | struct request_queue *q = bdev_get_queue(ibd->ibd_bd); | ||
397 | struct bio *bio = req->ib_bio, *nbio = NULL; | 395 | struct bio *bio = req->ib_bio, *nbio = NULL; |
396 | struct blk_plug plug; | ||
398 | int rw; | 397 | int rw; |
399 | 398 | ||
400 | if (task->task_data_direction == DMA_TO_DEVICE) { | 399 | if (task->task_data_direction == DMA_TO_DEVICE) { |
@@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task) | |||
412 | rw = READ; | 411 | rw = READ; |
413 | } | 412 | } |
414 | 413 | ||
414 | blk_start_plug(&plug); | ||
415 | while (bio) { | 415 | while (bio) { |
416 | nbio = bio->bi_next; | 416 | nbio = bio->bi_next; |
417 | bio->bi_next = NULL; | 417 | bio->bi_next = NULL; |
@@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task) | |||
421 | submit_bio(rw, bio); | 421 | submit_bio(rw, bio); |
422 | bio = nbio; | 422 | bio = nbio; |
423 | } | 423 | } |
424 | blk_finish_plug(&plug); | ||
424 | 425 | ||
425 | if (q->unplug_fn) | ||
426 | q->unplug_fn(q); | ||
427 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 426 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
428 | } | 427 | } |
429 | 428 | ||