aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:58:35 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:58:35 -0500
commit4c63f5646e405b5010cc9499419060bf2e838f5b (patch)
treedf91ba315032c8ec4aafeb3ab96fdfa7c6c656e1 /drivers/block
parentcafb0bfca1a73efd6d8a4a6a6a716e6134b96c24 (diff)
parent69d60eb96ae8a73cf9b79cf28051caf973006011 (diff)
Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core
Conflicts: block/blk-core.c block/blk-flush.c drivers/md/raid1.c drivers/md/raid10.c drivers/md/raid5.c fs/nilfs2/btnode.c fs/nilfs2/mdt.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_bitmap.c1
-rw-r--r--drivers/block/drbd/drbd_int.h16
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_receiver.c29
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c1
-rw-r--r--drivers/block/drbd/drbd_wrappers.h18
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c16
14 files changed, 7 insertions, 143 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9279272b3732..35658f445fca 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
3170 int sg_index = 0; 3170 int sg_index = 0;
3171 int chained = 0; 3171 int chained = 0;
3172 3172
3173 /* We call start_io here in case there is a command waiting on the
3174 * queue that has not been sent.
3175 */
3176 if (blk_queue_plugged(q))
3177 goto startio;
3178
3179 queue: 3173 queue:
3180 creq = blk_peek_request(q); 3174 creq = blk_peek_request(q);
3181 if (!creq) 3175 if (!creq)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 946dad4caef3..b2fceb53e809 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
911 struct scatterlist tmp_sg[SG_MAX]; 911 struct scatterlist tmp_sg[SG_MAX];
912 int i, dir, seg; 912 int i, dir, seg;
913 913
914 if (blk_queue_plugged(q))
915 goto startio;
916
917queue_next: 914queue_next:
918 creq = blk_peek_request(q); 915 creq = blk_peek_request(q);
919 if (!creq) 916 if (!creq)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ba95cba192be..aca302492ff2 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
82 rw |= REQ_FUA; 82 rw |= REQ_FUA;
83 rw |= REQ_UNPLUG | REQ_SYNC; 83 rw |= REQ_SYNC;
84 84
85 bio = bio_alloc(GFP_NOIO, 1); 85 bio = bio_alloc(GFP_NOIO, 1);
86 bio->bi_bdev = bdev->md_bdev; 86 bio->bi_bdev = bdev->md_bdev;
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
689 } 689 }
690 } 690 }
691 691
692 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
693
694 /* always (try to) flush bitmap to stable storage */ 692 /* always (try to) flush bitmap to stable storage */
695 drbd_md_flush(mdev); 693 drbd_md_flush(mdev);
696 694
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fd42832f785b..0645ca829a94 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
840 for (i = 0; i < num_pages; i++) 840 for (i = 0; i < num_pages; i++)
841 bm_page_io_async(mdev, b, i, rw); 841 bm_page_io_async(mdev, b, i, rw);
842 842
843 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
844 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 843 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
845 844
846 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { 845 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 3803a0348937..b0bd27dfc1e8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -377,7 +377,7 @@ union p_header {
377#define DP_HARDBARRIER 1 /* depricated */ 377#define DP_HARDBARRIER 1 /* depricated */
378#define DP_RW_SYNC 2 /* equals REQ_SYNC */ 378#define DP_RW_SYNC 2 /* equals REQ_SYNC */
379#define DP_MAY_SET_IN_SYNC 4 379#define DP_MAY_SET_IN_SYNC 4
380#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ 380#define DP_UNPLUG 8 /* not used anymore */
381#define DP_FUA 16 /* equals REQ_FUA */ 381#define DP_FUA 16 /* equals REQ_FUA */
382#define DP_FLUSH 32 /* equals REQ_FLUSH */ 382#define DP_FLUSH 32 /* equals REQ_FLUSH */
383#define DP_DISCARD 64 /* equals REQ_DISCARD */ 383#define DP_DISCARD 64 /* equals REQ_DISCARD */
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2382 return QUEUE_ORDERED_NONE; 2382 return QUEUE_ORDERED_NONE;
2383} 2383}
2384 2384
2385static inline void drbd_blk_run_queue(struct request_queue *q)
2386{
2387 if (q && q->unplug_fn)
2388 q->unplug_fn(q);
2389}
2390
2391static inline void drbd_kick_lo(struct drbd_conf *mdev)
2392{
2393 if (get_ldev(mdev)) {
2394 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
2395 put_ldev(mdev);
2396 }
2397}
2398
2399static inline void drbd_md_flush(struct drbd_conf *mdev) 2385static inline void drbd_md_flush(struct drbd_conf *mdev)
2400{ 2386{
2401 int r; 2387 int r;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 29cd0dc9fe4f..8a43ce0edeed 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2477{ 2477{
2478 if (mdev->agreed_pro_version >= 95) 2478 if (mdev->agreed_pro_version >= 95)
2479 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 2479 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2480 (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
2481 (bi_rw & REQ_FUA ? DP_FUA : 0) | 2480 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2482 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 2481 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2483 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); 2482 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2484 else 2483 else
2485 return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; 2484 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2486} 2485}
2487 2486
2488/* Used to send write requests 2487/* Used to send write requests
@@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
2719 return 0; 2718 return 0;
2720} 2719}
2721 2720
2722static void drbd_unplug_fn(struct request_queue *q)
2723{
2724 struct drbd_conf *mdev = q->queuedata;
2725
2726 /* unplug FIRST */
2727 spin_lock_irq(q->queue_lock);
2728 blk_remove_plug(q);
2729 spin_unlock_irq(q->queue_lock);
2730
2731 /* only if connected */
2732 spin_lock_irq(&mdev->req_lock);
2733 if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2734 D_ASSERT(mdev->state.role == R_PRIMARY);
2735 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2736 /* add to the data.work queue,
2737 * unless already queued.
2738 * XXX this might be a good addition to drbd_queue_work
2739 * anyways, to detect "double queuing" ... */
2740 if (list_empty(&mdev->unplug_work.list))
2741 drbd_queue_work(&mdev->data.work,
2742 &mdev->unplug_work);
2743 }
2744 }
2745 spin_unlock_irq(&mdev->req_lock);
2746
2747 if (mdev->state.disk >= D_INCONSISTENT)
2748 drbd_kick_lo(mdev);
2749}
2750
2751static void drbd_set_defaults(struct drbd_conf *mdev) 2721static void drbd_set_defaults(struct drbd_conf *mdev)
2752{ 2722{
2753 /* This way we get a compile error when sync_conf grows, 2723 /* This way we get a compile error when sync_conf grows,
@@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3222 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); 3192 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3223 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3193 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3224 blk_queue_merge_bvec(q, drbd_merge_bvec); 3194 blk_queue_merge_bvec(q, drbd_merge_bvec);
3225 q->queue_lock = &mdev->req_lock; /* needed since we use */ 3195 q->queue_lock = &mdev->req_lock;
3226 /* plugging on a queue, that actually has no requests! */
3227 q->unplug_fn = drbd_unplug_fn;
3228 3196
3229 mdev->md_io_page = alloc_page(GFP_KERNEL); 3197 mdev->md_io_page = alloc_page(GFP_KERNEL);
3230 if (!mdev->md_io_page) 3198 if (!mdev->md_io_page)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 24487d4fb202..8e68be939deb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
187 return NULL; 187 return NULL;
188} 188}
189 189
190/* kick lower level device, if we have more than (arbitrary number)
191 * reference counts on it, which typically are locally submitted io
192 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
193static void maybe_kick_lo(struct drbd_conf *mdev)
194{
195 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
196 drbd_kick_lo(mdev);
197}
198
199static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
200{ 191{
201 struct drbd_epoch_entry *e; 192 struct drbd_epoch_entry *e;
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 LIST_HEAD(reclaimed); 210 LIST_HEAD(reclaimed);
220 struct drbd_epoch_entry *e, *t; 211 struct drbd_epoch_entry *e, *t;
221 212
222 maybe_kick_lo(mdev);
223 spin_lock_irq(&mdev->req_lock); 213 spin_lock_irq(&mdev->req_lock);
224 reclaim_net_ee(mdev, &reclaimed); 214 reclaim_net_ee(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->req_lock); 215 spin_unlock_irq(&mdev->req_lock);
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436 while (!list_empty(head)) { 426 while (!list_empty(head)) {
437 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
438 spin_unlock_irq(&mdev->req_lock); 428 spin_unlock_irq(&mdev->req_lock);
439 drbd_kick_lo(mdev); 429 io_schedule();
440 schedule();
441 finish_wait(&mdev->ee_wait, &wait); 430 finish_wait(&mdev->ee_wait, &wait);
442 spin_lock_irq(&mdev->req_lock); 431 spin_lock_irq(&mdev->req_lock);
443 } 432 }
@@ -1111,8 +1100,6 @@ next_bio:
1111 /* > e->sector, unless this is the first bio */ 1100 /* > e->sector, unless this is the first bio */
1112 bio->bi_sector = sector; 1101 bio->bi_sector = sector;
1113 bio->bi_bdev = mdev->ldev->backing_bdev; 1102 bio->bi_bdev = mdev->ldev->backing_bdev;
1114 /* we special case some flags in the multi-bio case, see below
1115 * (REQ_UNPLUG) */
1116 bio->bi_rw = rw; 1103 bio->bi_rw = rw;
1117 bio->bi_private = e; 1104 bio->bi_private = e;
1118 bio->bi_end_io = drbd_endio_sec; 1105 bio->bi_end_io = drbd_endio_sec;
@@ -1141,13 +1128,8 @@ next_bio:
1141 bios = bios->bi_next; 1128 bios = bios->bi_next;
1142 bio->bi_next = NULL; 1129 bio->bi_next = NULL;
1143 1130
1144 /* strip off REQ_UNPLUG unless it is the last bio */
1145 if (bios)
1146 bio->bi_rw &= ~REQ_UNPLUG;
1147
1148 drbd_generic_make_request(mdev, fault_type, bio); 1131 drbd_generic_make_request(mdev, fault_type, bio);
1149 } while (bios); 1132 } while (bios);
1150 maybe_kick_lo(mdev);
1151 return 0; 1133 return 0;
1152 1134
1153fail: 1135fail:
@@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1167 1149
1168 inc_unacked(mdev); 1150 inc_unacked(mdev);
1169 1151
1170 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1171 drbd_kick_lo(mdev);
1172
1173 mdev->current_epoch->barrier_nr = p->barrier; 1152 mdev->current_epoch->barrier_nr = p->barrier;
1174 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1153 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1175 1154
@@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1636{ 1615{
1637 if (mdev->agreed_pro_version >= 95) 1616 if (mdev->agreed_pro_version >= 95)
1638 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 1617 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1639 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1640 (dpf & DP_FUA ? REQ_FUA : 0) | 1618 (dpf & DP_FUA ? REQ_FUA : 0) |
1641 (dpf & DP_FLUSH ? REQ_FUA : 0) | 1619 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1642 (dpf & DP_DISCARD ? REQ_DISCARD : 0); 1620 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1643 else 1621 else
1644 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; 1622 return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
1645} 1623}
1646 1624
1647/* mirrored write */ 1625/* mirrored write */
@@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3556 3534
3557static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3535static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3558{ 3536{
3559 if (mdev->state.disk >= D_INCONSISTENT)
3560 drbd_kick_lo(mdev);
3561
3562 /* Make sure we've acked all the TCP data associated 3537 /* Make sure we've acked all the TCP data associated
3563 * with the data requests being unplugged */ 3538 * with the data requests being unplugged */
3564 drbd_tcp_quickack(mdev->data.socket); 3539 drbd_tcp_quickack(mdev->data.socket);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 11a75d32a2e2..ad3fc6228f27 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -960,10 +960,6 @@ allocate_barrier:
960 bio_endio(req->private_bio, -EIO); 960 bio_endio(req->private_bio, -EIO);
961 } 961 }
962 962
963 /* we need to plug ALWAYS since we possibly need to kick lo_dev.
964 * we plug after submit, so we won't miss an unplug event */
965 drbd_plug_device(mdev);
966
967 return 0; 963 return 0;
968 964
969fail_conflicting: 965fail_conflicting:
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 34f224b018b3..e027446590d3 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
792 * queue (or even the read operations for those packets 792 * queue (or even the read operations for those packets
793 * is not finished by now). Retry in 100ms. */ 793 * is not finished by now). Retry in 100ms. */
794 794
795 drbd_kick_lo(mdev);
796 __set_current_state(TASK_INTERRUPTIBLE); 795 __set_current_state(TASK_INTERRUPTIBLE);
797 schedule_timeout(HZ / 10); 796 schedule_timeout(HZ / 10);
798 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); 797 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index defdb5013ea3..53586fa5ae1b 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
45 generic_make_request(bio); 45 generic_make_request(bio);
46} 46}
47 47
48static inline void drbd_plug_device(struct drbd_conf *mdev)
49{
50 struct request_queue *q;
51 q = bdev_get_queue(mdev->this_bdev);
52
53 spin_lock_irq(q->queue_lock);
54
55/* XXX the check on !blk_queue_plugged is redundant,
56 * implicitly checked in blk_plug_device */
57
58 if (!blk_queue_plugged(q)) {
59 blk_plug_device(q);
60 del_timer(&q->unplug_timer);
61 /* unplugging should not happen automatically... */
62 }
63 spin_unlock_irq(q->queue_lock);
64}
65
66static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) 48static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
67{ 49{
68 return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) 50 return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3851dbc03e05..301d7a9a41a6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3838,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
3838 bio.bi_end_io = floppy_rb0_complete; 3838 bio.bi_end_io = floppy_rb0_complete;
3839 3839
3840 submit_bio(READ, &bio); 3840 submit_bio(READ, &bio);
3841 generic_unplug_device(bdev_get_queue(bdev));
3842 process_fd_request(); 3841 process_fd_request();
3843 wait_for_completion(&complete); 3842 wait_for_completion(&complete);
3844 3843
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 79c3079a6203..a076a14ca72d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -540,17 +540,6 @@ out:
540 return 0; 540 return 0;
541} 541}
542 542
543/*
544 * kick off io on the underlying address space
545 */
546static void loop_unplug(struct request_queue *q)
547{
548 struct loop_device *lo = q->queuedata;
549
550 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
551 blk_run_address_space(lo->lo_backing_file->f_mapping);
552}
553
554struct switch_request { 543struct switch_request {
555 struct file *file; 544 struct file *file;
556 struct completion wait; 545 struct completion wait;
@@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
917 */ 906 */
918 blk_queue_make_request(lo->lo_queue, loop_make_request); 907 blk_queue_make_request(lo->lo_queue, loop_make_request);
919 lo->lo_queue->queuedata = lo; 908 lo->lo_queue->queuedata = lo;
920 lo->lo_queue->unplug_fn = loop_unplug;
921 909
922 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 910 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
923 blk_queue_flush(lo->lo_queue, REQ_FLUSH); 911 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1019 1007
1020 kthread_stop(lo->lo_thread); 1008 kthread_stop(lo->lo_thread);
1021 1009
1022 lo->lo_queue->unplug_fn = NULL;
1023 lo->lo_backing_file = NULL; 1010 lo->lo_backing_file = NULL;
1024 1011
1025 loop_release_xfer(lo); 1012 loop_release_xfer(lo);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a077db27b7c9..07a382eaf0a8 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
1606 min_sleep_time = pkt->sleep_time; 1606 min_sleep_time = pkt->sleep_time;
1607 } 1607 }
1608 1608
1609 generic_unplug_device(bdev_get_queue(pd->bdev));
1610
1611 VPRINTK("kcdrwd: sleeping\n"); 1609 VPRINTK("kcdrwd: sleeping\n");
1612 residue = schedule_timeout(min_sleep_time); 1610 residue = schedule_timeout(min_sleep_time);
1613 VPRINTK("kcdrwd: wake up\n"); 1611 VPRINTK("kcdrwd: wake up\n");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 9b102abe209c..031ca720d926 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
241 * 241 *
242 * Whenever IO on the active page completes, the Ready page is activated 242 * Whenever IO on the active page completes, the Ready page is activated
243 * and the ex-Active page is clean out and made Ready. 243 * and the ex-Active page is clean out and made Ready.
244 * Otherwise the Ready page is only activated when it becomes full, or 244 * Otherwise the Ready page is only activated when it becomes full.
245 * when mm_unplug_device is called via the unplug_io_fn.
246 * 245 *
247 * If a request arrives while both pages a full, it is queued, and b_rdev is 246 * If a request arrives while both pages a full, it is queued, and b_rdev is
248 * overloaded to record whether it was a read or a write. 247 * overloaded to record whether it was a read or a write.
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
333 page->biotail = &page->bio; 332 page->biotail = &page->bio;
334} 333}
335 334
336static void mm_unplug_device(struct request_queue *q)
337{
338 struct cardinfo *card = q->queuedata;
339 unsigned long flags;
340
341 spin_lock_irqsave(&card->lock, flags);
342 if (blk_remove_plug(q))
343 activate(card);
344 spin_unlock_irqrestore(&card->lock, flags);
345}
346
347/* 335/*
348 * If there is room on Ready page, take 336 * If there is room on Ready page, take
349 * one bh off list and add it. 337 * one bh off list and add it.
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
535 *card->biotail = bio; 523 *card->biotail = bio;
536 bio->bi_next = NULL; 524 bio->bi_next = NULL;
537 card->biotail = &bio->bi_next; 525 card->biotail = &bio->bi_next;
538 blk_plug_device(q);
539 spin_unlock_irq(&card->lock); 526 spin_unlock_irq(&card->lock);
540 527
541 return 0; 528 return 0;
@@ -897,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
897 blk_queue_make_request(card->queue, mm_make_request); 884 blk_queue_make_request(card->queue, mm_make_request);
898 card->queue->queue_lock = &card->lock; 885 card->queue->queue_lock = &card->lock;
899 card->queue->queuedata = card; 886 card->queue->queuedata = card;
900 card->queue->unplug_fn = mm_unplug_device;
901 887
902 tasklet_init(&card->tasklet, process_page, (unsigned long)card); 888 tasklet_init(&card->tasklet, process_page, (unsigned long)card);
903 889