aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/DAC960.c8
-rw-r--r--drivers/block/amiflop.c9
-rw-r--r--drivers/block/ataflop.c14
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_bitmap.c1
-rw-r--r--drivers/block/drbd/drbd_int.h16
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_receiver.c29
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c1
-rw-r--r--drivers/block/drbd/drbd_wrappers.h18
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/paride/pcd.c18
-rw-r--r--drivers/block/paride/pd.c7
-rw-r--r--drivers/block/paride/pf.c10
-rw-r--r--drivers/block/pktcdvd.c15
-rw-r--r--drivers/block/swim.c8
-rw-r--r--drivers/block/swim3.c11
-rw-r--r--drivers/block/ub.c10
-rw-r--r--drivers/block/umem.c26
-rw-r--r--drivers/block/xsysace.c9
-rw-r--r--drivers/cdrom/gdrom.c16
-rw-r--r--drivers/cdrom/viocd.c17
-rw-r--r--drivers/ide/ide-atapi.c3
-rw-r--r--drivers/ide/ide-cd.c23
-rw-r--r--drivers/ide/ide-cd.h3
-rw-r--r--drivers/ide/ide-cd_ioctl.c8
-rw-r--r--drivers/ide/ide-gd.c14
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-kcopyd.c55
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c31
-rw-r--r--drivers/md/dm.c52
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/linear.c20
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/md/multipath.c38
-rw-r--r--drivers/md/raid0.c19
-rw-r--r--drivers/md/raid1.c91
-rw-r--r--drivers/md/raid10.c97
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/message/i2o/i2o_block.c17
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/tape_block.c12
-rw-r--r--drivers/scsi/scsi_lib.c44
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/staging/hv/blkvsc_drv.c11
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_block.c11
-rw-r--r--drivers/target/target_core_iblock.c7
60 files changed, 284 insertions, 721 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 1f286ab461d3..79882104e431 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -140,13 +140,14 @@ static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
140 return 0; 140 return 0;
141} 141}
142 142
143static int DAC960_media_changed(struct gendisk *disk) 143static unsigned int DAC960_check_events(struct gendisk *disk,
144 unsigned int clearing)
144{ 145{
145 DAC960_Controller_T *p = disk->queue->queuedata; 146 DAC960_Controller_T *p = disk->queue->queuedata;
146 int drive_nr = (long)disk->private_data; 147 int drive_nr = (long)disk->private_data;
147 148
148 if (!p->LogicalDriveInitiallyAccessible[drive_nr]) 149 if (!p->LogicalDriveInitiallyAccessible[drive_nr])
149 return 1; 150 return DISK_EVENT_MEDIA_CHANGE;
150 return 0; 151 return 0;
151} 152}
152 153
@@ -163,7 +164,7 @@ static const struct block_device_operations DAC960_BlockDeviceOperations = {
163 .owner = THIS_MODULE, 164 .owner = THIS_MODULE,
164 .open = DAC960_open, 165 .open = DAC960_open,
165 .getgeo = DAC960_getgeo, 166 .getgeo = DAC960_getgeo,
166 .media_changed = DAC960_media_changed, 167 .check_events = DAC960_check_events,
167 .revalidate_disk = DAC960_revalidate_disk, 168 .revalidate_disk = DAC960_revalidate_disk,
168}; 169};
169 170
@@ -2546,6 +2547,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2546 disk->major = MajorNumber; 2547 disk->major = MajorNumber;
2547 disk->first_minor = n << DAC960_MaxPartitionsBits; 2548 disk->first_minor = n << DAC960_MaxPartitionsBits;
2548 disk->fops = &DAC960_BlockDeviceOperations; 2549 disk->fops = &DAC960_BlockDeviceOperations;
2550 disk->events = DISK_EVENT_MEDIA_CHANGE;
2549 } 2551 }
2550 /* 2552 /*
2551 Indicate the Block Device Registration completed successfully, 2553 Indicate the Block Device Registration completed successfully,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 363855ca376e..456c0cc90dcf 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1658,12 +1658,12 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
1658} 1658}
1659 1659
1660/* 1660/*
1661 * floppy-change is never called from an interrupt, so we can relax a bit 1661 * check_events is never called from an interrupt, so we can relax a bit
1662 * here, sleep etc. Note that floppy-on tries to set current_DOR to point 1662 * here, sleep etc. Note that floppy-on tries to set current_DOR to point
1663 * to the desired drive, but it will probably not survive the sleep if 1663 * to the desired drive, but it will probably not survive the sleep if
1664 * several floppies are used at the same time: thus the loop. 1664 * several floppies are used at the same time: thus the loop.
1665 */ 1665 */
1666static int amiga_floppy_change(struct gendisk *disk) 1666static unsigned amiga_check_events(struct gendisk *disk, unsigned int clearing)
1667{ 1667{
1668 struct amiga_floppy_struct *p = disk->private_data; 1668 struct amiga_floppy_struct *p = disk->private_data;
1669 int drive = p - unit; 1669 int drive = p - unit;
@@ -1686,7 +1686,7 @@ static int amiga_floppy_change(struct gendisk *disk)
1686 p->dirty = 0; 1686 p->dirty = 0;
1687 writepending = 0; /* if this was true before, too bad! */ 1687 writepending = 0; /* if this was true before, too bad! */
1688 writefromint = 0; 1688 writefromint = 0;
1689 return 1; 1689 return DISK_EVENT_MEDIA_CHANGE;
1690 } 1690 }
1691 return 0; 1691 return 0;
1692} 1692}
@@ -1697,7 +1697,7 @@ static const struct block_device_operations floppy_fops = {
1697 .release = floppy_release, 1697 .release = floppy_release,
1698 .ioctl = fd_ioctl, 1698 .ioctl = fd_ioctl,
1699 .getgeo = fd_getgeo, 1699 .getgeo = fd_getgeo,
1700 .media_changed = amiga_floppy_change, 1700 .check_events = amiga_check_events,
1701}; 1701};
1702 1702
1703static int __init fd_probe_drives(void) 1703static int __init fd_probe_drives(void)
@@ -1736,6 +1736,7 @@ static int __init fd_probe_drives(void)
1736 disk->major = FLOPPY_MAJOR; 1736 disk->major = FLOPPY_MAJOR;
1737 disk->first_minor = drive; 1737 disk->first_minor = drive;
1738 disk->fops = &floppy_fops; 1738 disk->fops = &floppy_fops;
1739 disk->events = DISK_EVENT_MEDIA_CHANGE;
1739 sprintf(disk->disk_name, "fd%d", drive); 1740 sprintf(disk->disk_name, "fd%d", drive);
1740 disk->private_data = &unit[drive]; 1741 disk->private_data = &unit[drive];
1741 set_capacity(disk, 880*2); 1742 set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 605a67e40bbf..c871eae14120 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1324,23 +1324,24 @@ static void finish_fdc_done( int dummy )
1324 * due to unrecognised disk changes. 1324 * due to unrecognised disk changes.
1325 */ 1325 */
1326 1326
1327static int check_floppy_change(struct gendisk *disk) 1327static unsigned int floppy_check_events(struct gendisk *disk,
1328 unsigned int clearing)
1328{ 1329{
1329 struct atari_floppy_struct *p = disk->private_data; 1330 struct atari_floppy_struct *p = disk->private_data;
1330 unsigned int drive = p - unit; 1331 unsigned int drive = p - unit;
1331 if (test_bit (drive, &fake_change)) { 1332 if (test_bit (drive, &fake_change)) {
1332 /* simulated change (e.g. after formatting) */ 1333 /* simulated change (e.g. after formatting) */
1333 return 1; 1334 return DISK_EVENT_MEDIA_CHANGE;
1334 } 1335 }
1335 if (test_bit (drive, &changed_floppies)) { 1336 if (test_bit (drive, &changed_floppies)) {
1336 /* surely changed (the WP signal changed at least once) */ 1337 /* surely changed (the WP signal changed at least once) */
1337 return 1; 1338 return DISK_EVENT_MEDIA_CHANGE;
1338 } 1339 }
1339 if (UD.wpstat) { 1340 if (UD.wpstat) {
1340 /* WP is on -> could be changed: to be sure, buffers should be 1341 /* WP is on -> could be changed: to be sure, buffers should be
1341 * invalidated... 1342 * invalidated...
1342 */ 1343 */
1343 return 1; 1344 return DISK_EVENT_MEDIA_CHANGE;
1344 } 1345 }
1345 1346
1346 return 0; 1347 return 0;
@@ -1570,7 +1571,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
1570 * or the next access will revalidate - and clear UDT :-( 1571 * or the next access will revalidate - and clear UDT :-(
1571 */ 1572 */
1572 1573
1573 if (check_floppy_change(disk)) 1574 if (floppy_check_events(disk, 0))
1574 floppy_revalidate(disk); 1575 floppy_revalidate(disk);
1575 1576
1576 if (UD.flags & FTD_MSG) 1577 if (UD.flags & FTD_MSG)
@@ -1904,7 +1905,7 @@ static const struct block_device_operations floppy_fops = {
1904 .open = floppy_unlocked_open, 1905 .open = floppy_unlocked_open,
1905 .release = floppy_release, 1906 .release = floppy_release,
1906 .ioctl = fd_ioctl, 1907 .ioctl = fd_ioctl,
1907 .media_changed = check_floppy_change, 1908 .check_events = floppy_check_events,
1908 .revalidate_disk= floppy_revalidate, 1909 .revalidate_disk= floppy_revalidate,
1909}; 1910};
1910 1911
@@ -1963,6 +1964,7 @@ static int __init atari_floppy_init (void)
1963 unit[i].disk->first_minor = i; 1964 unit[i].disk->first_minor = i;
1964 sprintf(unit[i].disk->disk_name, "fd%d", i); 1965 sprintf(unit[i].disk->disk_name, "fd%d", i);
1965 unit[i].disk->fops = &floppy_fops; 1966 unit[i].disk->fops = &floppy_fops;
1967 unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
1966 unit[i].disk->private_data = &unit[i]; 1968 unit[i].disk->private_data = &unit[i];
1967 unit[i].disk->queue = blk_init_queue(do_fd_request, 1969 unit[i].disk->queue = blk_init_queue(do_fd_request,
1968 &ataflop_lock); 1970 &ataflop_lock);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9279272b3732..35658f445fca 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
3170 int sg_index = 0; 3170 int sg_index = 0;
3171 int chained = 0; 3171 int chained = 0;
3172 3172
3173 /* We call start_io here in case there is a command waiting on the
3174 * queue that has not been sent.
3175 */
3176 if (blk_queue_plugged(q))
3177 goto startio;
3178
3179 queue: 3173 queue:
3180 creq = blk_peek_request(q); 3174 creq = blk_peek_request(q);
3181 if (!creq) 3175 if (!creq)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 946dad4caef3..b2fceb53e809 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
911 struct scatterlist tmp_sg[SG_MAX]; 911 struct scatterlist tmp_sg[SG_MAX];
912 int i, dir, seg; 912 int i, dir, seg;
913 913
914 if (blk_queue_plugged(q))
915 goto startio;
916
917queue_next: 914queue_next:
918 creq = blk_peek_request(q); 915 creq = blk_peek_request(q);
919 if (!creq) 916 if (!creq)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ba95cba192be..aca302492ff2 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
82 rw |= REQ_FUA; 82 rw |= REQ_FUA;
83 rw |= REQ_UNPLUG | REQ_SYNC; 83 rw |= REQ_SYNC;
84 84
85 bio = bio_alloc(GFP_NOIO, 1); 85 bio = bio_alloc(GFP_NOIO, 1);
86 bio->bi_bdev = bdev->md_bdev; 86 bio->bi_bdev = bdev->md_bdev;
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
689 } 689 }
690 } 690 }
691 691
692 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
693
694 /* always (try to) flush bitmap to stable storage */ 692 /* always (try to) flush bitmap to stable storage */
695 drbd_md_flush(mdev); 693 drbd_md_flush(mdev);
696 694
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fd42832f785b..0645ca829a94 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
840 for (i = 0; i < num_pages; i++) 840 for (i = 0; i < num_pages; i++)
841 bm_page_io_async(mdev, b, i, rw); 841 bm_page_io_async(mdev, b, i, rw);
842 842
843 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
844 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 843 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
845 844
846 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { 845 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 3803a0348937..b0bd27dfc1e8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -377,7 +377,7 @@ union p_header {
377#define DP_HARDBARRIER 1 /* depricated */ 377#define DP_HARDBARRIER 1 /* depricated */
378#define DP_RW_SYNC 2 /* equals REQ_SYNC */ 378#define DP_RW_SYNC 2 /* equals REQ_SYNC */
379#define DP_MAY_SET_IN_SYNC 4 379#define DP_MAY_SET_IN_SYNC 4
380#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ 380#define DP_UNPLUG 8 /* not used anymore */
381#define DP_FUA 16 /* equals REQ_FUA */ 381#define DP_FUA 16 /* equals REQ_FUA */
382#define DP_FLUSH 32 /* equals REQ_FLUSH */ 382#define DP_FLUSH 32 /* equals REQ_FLUSH */
383#define DP_DISCARD 64 /* equals REQ_DISCARD */ 383#define DP_DISCARD 64 /* equals REQ_DISCARD */
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2382 return QUEUE_ORDERED_NONE; 2382 return QUEUE_ORDERED_NONE;
2383} 2383}
2384 2384
2385static inline void drbd_blk_run_queue(struct request_queue *q)
2386{
2387 if (q && q->unplug_fn)
2388 q->unplug_fn(q);
2389}
2390
2391static inline void drbd_kick_lo(struct drbd_conf *mdev)
2392{
2393 if (get_ldev(mdev)) {
2394 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
2395 put_ldev(mdev);
2396 }
2397}
2398
2399static inline void drbd_md_flush(struct drbd_conf *mdev) 2385static inline void drbd_md_flush(struct drbd_conf *mdev)
2400{ 2386{
2401 int r; 2387 int r;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 29cd0dc9fe4f..8a43ce0edeed 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2477{ 2477{
2478 if (mdev->agreed_pro_version >= 95) 2478 if (mdev->agreed_pro_version >= 95)
2479 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 2479 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2480 (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
2481 (bi_rw & REQ_FUA ? DP_FUA : 0) | 2480 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2482 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 2481 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2483 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); 2482 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2484 else 2483 else
2485 return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; 2484 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2486} 2485}
2487 2486
2488/* Used to send write requests 2487/* Used to send write requests
@@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
2719 return 0; 2718 return 0;
2720} 2719}
2721 2720
2722static void drbd_unplug_fn(struct request_queue *q)
2723{
2724 struct drbd_conf *mdev = q->queuedata;
2725
2726 /* unplug FIRST */
2727 spin_lock_irq(q->queue_lock);
2728 blk_remove_plug(q);
2729 spin_unlock_irq(q->queue_lock);
2730
2731 /* only if connected */
2732 spin_lock_irq(&mdev->req_lock);
2733 if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2734 D_ASSERT(mdev->state.role == R_PRIMARY);
2735 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2736 /* add to the data.work queue,
2737 * unless already queued.
2738 * XXX this might be a good addition to drbd_queue_work
2739 * anyways, to detect "double queuing" ... */
2740 if (list_empty(&mdev->unplug_work.list))
2741 drbd_queue_work(&mdev->data.work,
2742 &mdev->unplug_work);
2743 }
2744 }
2745 spin_unlock_irq(&mdev->req_lock);
2746
2747 if (mdev->state.disk >= D_INCONSISTENT)
2748 drbd_kick_lo(mdev);
2749}
2750
2751static void drbd_set_defaults(struct drbd_conf *mdev) 2721static void drbd_set_defaults(struct drbd_conf *mdev)
2752{ 2722{
2753 /* This way we get a compile error when sync_conf grows, 2723 /* This way we get a compile error when sync_conf grows,
@@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3222 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); 3192 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3223 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3193 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3224 blk_queue_merge_bvec(q, drbd_merge_bvec); 3194 blk_queue_merge_bvec(q, drbd_merge_bvec);
3225 q->queue_lock = &mdev->req_lock; /* needed since we use */ 3195 q->queue_lock = &mdev->req_lock;
3226 /* plugging on a queue, that actually has no requests! */
3227 q->unplug_fn = drbd_unplug_fn;
3228 3196
3229 mdev->md_io_page = alloc_page(GFP_KERNEL); 3197 mdev->md_io_page = alloc_page(GFP_KERNEL);
3230 if (!mdev->md_io_page) 3198 if (!mdev->md_io_page)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 24487d4fb202..8e68be939deb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
187 return NULL; 187 return NULL;
188} 188}
189 189
190/* kick lower level device, if we have more than (arbitrary number)
191 * reference counts on it, which typically are locally submitted io
192 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
193static void maybe_kick_lo(struct drbd_conf *mdev)
194{
195 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
196 drbd_kick_lo(mdev);
197}
198
199static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
200{ 191{
201 struct drbd_epoch_entry *e; 192 struct drbd_epoch_entry *e;
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 LIST_HEAD(reclaimed); 210 LIST_HEAD(reclaimed);
220 struct drbd_epoch_entry *e, *t; 211 struct drbd_epoch_entry *e, *t;
221 212
222 maybe_kick_lo(mdev);
223 spin_lock_irq(&mdev->req_lock); 213 spin_lock_irq(&mdev->req_lock);
224 reclaim_net_ee(mdev, &reclaimed); 214 reclaim_net_ee(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->req_lock); 215 spin_unlock_irq(&mdev->req_lock);
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436 while (!list_empty(head)) { 426 while (!list_empty(head)) {
437 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
438 spin_unlock_irq(&mdev->req_lock); 428 spin_unlock_irq(&mdev->req_lock);
439 drbd_kick_lo(mdev); 429 io_schedule();
440 schedule();
441 finish_wait(&mdev->ee_wait, &wait); 430 finish_wait(&mdev->ee_wait, &wait);
442 spin_lock_irq(&mdev->req_lock); 431 spin_lock_irq(&mdev->req_lock);
443 } 432 }
@@ -1111,8 +1100,6 @@ next_bio:
1111 /* > e->sector, unless this is the first bio */ 1100 /* > e->sector, unless this is the first bio */
1112 bio->bi_sector = sector; 1101 bio->bi_sector = sector;
1113 bio->bi_bdev = mdev->ldev->backing_bdev; 1102 bio->bi_bdev = mdev->ldev->backing_bdev;
1114 /* we special case some flags in the multi-bio case, see below
1115 * (REQ_UNPLUG) */
1116 bio->bi_rw = rw; 1103 bio->bi_rw = rw;
1117 bio->bi_private = e; 1104 bio->bi_private = e;
1118 bio->bi_end_io = drbd_endio_sec; 1105 bio->bi_end_io = drbd_endio_sec;
@@ -1141,13 +1128,8 @@ next_bio:
1141 bios = bios->bi_next; 1128 bios = bios->bi_next;
1142 bio->bi_next = NULL; 1129 bio->bi_next = NULL;
1143 1130
1144 /* strip off REQ_UNPLUG unless it is the last bio */
1145 if (bios)
1146 bio->bi_rw &= ~REQ_UNPLUG;
1147
1148 drbd_generic_make_request(mdev, fault_type, bio); 1131 drbd_generic_make_request(mdev, fault_type, bio);
1149 } while (bios); 1132 } while (bios);
1150 maybe_kick_lo(mdev);
1151 return 0; 1133 return 0;
1152 1134
1153fail: 1135fail:
@@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1167 1149
1168 inc_unacked(mdev); 1150 inc_unacked(mdev);
1169 1151
1170 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1171 drbd_kick_lo(mdev);
1172
1173 mdev->current_epoch->barrier_nr = p->barrier; 1152 mdev->current_epoch->barrier_nr = p->barrier;
1174 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1153 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1175 1154
@@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1636{ 1615{
1637 if (mdev->agreed_pro_version >= 95) 1616 if (mdev->agreed_pro_version >= 95)
1638 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 1617 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1639 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1640 (dpf & DP_FUA ? REQ_FUA : 0) | 1618 (dpf & DP_FUA ? REQ_FUA : 0) |
1641 (dpf & DP_FLUSH ? REQ_FUA : 0) | 1619 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1642 (dpf & DP_DISCARD ? REQ_DISCARD : 0); 1620 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1643 else 1621 else
1644 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; 1622 return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
1645} 1623}
1646 1624
1647/* mirrored write */ 1625/* mirrored write */
@@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3556 3534
3557static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3535static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3558{ 3536{
3559 if (mdev->state.disk >= D_INCONSISTENT)
3560 drbd_kick_lo(mdev);
3561
3562 /* Make sure we've acked all the TCP data associated 3537 /* Make sure we've acked all the TCP data associated
3563 * with the data requests being unplugged */ 3538 * with the data requests being unplugged */
3564 drbd_tcp_quickack(mdev->data.socket); 3539 drbd_tcp_quickack(mdev->data.socket);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 11a75d32a2e2..ad3fc6228f27 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -960,10 +960,6 @@ allocate_barrier:
960 bio_endio(req->private_bio, -EIO); 960 bio_endio(req->private_bio, -EIO);
961 } 961 }
962 962
963 /* we need to plug ALWAYS since we possibly need to kick lo_dev.
964 * we plug after submit, so we won't miss an unplug event */
965 drbd_plug_device(mdev);
966
967 return 0; 963 return 0;
968 964
969fail_conflicting: 965fail_conflicting:
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 34f224b018b3..e027446590d3 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
792 * queue (or even the read operations for those packets 792 * queue (or even the read operations for those packets
793 * is not finished by now). Retry in 100ms. */ 793 * is not finished by now). Retry in 100ms. */
794 794
795 drbd_kick_lo(mdev);
796 __set_current_state(TASK_INTERRUPTIBLE); 795 __set_current_state(TASK_INTERRUPTIBLE);
797 schedule_timeout(HZ / 10); 796 schedule_timeout(HZ / 10);
798 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); 797 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index defdb5013ea3..53586fa5ae1b 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
45 generic_make_request(bio); 45 generic_make_request(bio);
46} 46}
47 47
48static inline void drbd_plug_device(struct drbd_conf *mdev)
49{
50 struct request_queue *q;
51 q = bdev_get_queue(mdev->this_bdev);
52
53 spin_lock_irq(q->queue_lock);
54
55/* XXX the check on !blk_queue_plugged is redundant,
56 * implicitly checked in blk_plug_device */
57
58 if (!blk_queue_plugged(q)) {
59 blk_plug_device(q);
60 del_timer(&q->unplug_timer);
61 /* unplugging should not happen automatically... */
62 }
63 spin_unlock_irq(q->queue_lock);
64}
65
66static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) 48static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
67{ 49{
68 return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK) 50 return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 77fc76f8aea9..301d7a9a41a6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3770,13 +3770,14 @@ out2:
3770/* 3770/*
3771 * Check if the disk has been changed or if a change has been faked. 3771 * Check if the disk has been changed or if a change has been faked.
3772 */ 3772 */
3773static int check_floppy_change(struct gendisk *disk) 3773static unsigned int floppy_check_events(struct gendisk *disk,
3774 unsigned int clearing)
3774{ 3775{
3775 int drive = (long)disk->private_data; 3776 int drive = (long)disk->private_data;
3776 3777
3777 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3778 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3778 test_bit(FD_VERIFY_BIT, &UDRS->flags)) 3779 test_bit(FD_VERIFY_BIT, &UDRS->flags))
3779 return 1; 3780 return DISK_EVENT_MEDIA_CHANGE;
3780 3781
3781 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3782 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3782 lock_fdc(drive, false); 3783 lock_fdc(drive, false);
@@ -3788,7 +3789,7 @@ static int check_floppy_change(struct gendisk *disk)
3788 test_bit(FD_VERIFY_BIT, &UDRS->flags) || 3789 test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
3789 test_bit(drive, &fake_change) || 3790 test_bit(drive, &fake_change) ||
3790 drive_no_geom(drive)) 3791 drive_no_geom(drive))
3791 return 1; 3792 return DISK_EVENT_MEDIA_CHANGE;
3792 return 0; 3793 return 0;
3793} 3794}
3794 3795
@@ -3837,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
3837 bio.bi_end_io = floppy_rb0_complete; 3838 bio.bi_end_io = floppy_rb0_complete;
3838 3839
3839 submit_bio(READ, &bio); 3840 submit_bio(READ, &bio);
3840 generic_unplug_device(bdev_get_queue(bdev));
3841 process_fd_request(); 3841 process_fd_request();
3842 wait_for_completion(&complete); 3842 wait_for_completion(&complete);
3843 3843
@@ -3898,7 +3898,7 @@ static const struct block_device_operations floppy_fops = {
3898 .release = floppy_release, 3898 .release = floppy_release,
3899 .ioctl = fd_ioctl, 3899 .ioctl = fd_ioctl,
3900 .getgeo = fd_getgeo, 3900 .getgeo = fd_getgeo,
3901 .media_changed = check_floppy_change, 3901 .check_events = floppy_check_events,
3902 .revalidate_disk = floppy_revalidate, 3902 .revalidate_disk = floppy_revalidate,
3903}; 3903};
3904 3904
@@ -4205,6 +4205,7 @@ static int __init floppy_init(void)
4205 disks[dr]->major = FLOPPY_MAJOR; 4205 disks[dr]->major = FLOPPY_MAJOR;
4206 disks[dr]->first_minor = TOMINOR(dr); 4206 disks[dr]->first_minor = TOMINOR(dr);
4207 disks[dr]->fops = &floppy_fops; 4207 disks[dr]->fops = &floppy_fops;
4208 disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
4208 sprintf(disks[dr]->disk_name, "fd%d", dr); 4209 sprintf(disks[dr]->disk_name, "fd%d", dr);
4209 4210
4210 init_timer(&motor_off_timer[dr]); 4211 init_timer(&motor_off_timer[dr]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index dbf31ec9114d..a076a14ca72d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -540,17 +540,6 @@ out:
540 return 0; 540 return 0;
541} 541}
542 542
543/*
544 * kick off io on the underlying address space
545 */
546static void loop_unplug(struct request_queue *q)
547{
548 struct loop_device *lo = q->queuedata;
549
550 queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
551 blk_run_address_space(lo->lo_backing_file->f_mapping);
552}
553
554struct switch_request { 543struct switch_request {
555 struct file *file; 544 struct file *file;
556 struct completion wait; 545 struct completion wait;
@@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
917 */ 906 */
918 blk_queue_make_request(lo->lo_queue, loop_make_request); 907 blk_queue_make_request(lo->lo_queue, loop_make_request);
919 lo->lo_queue->queuedata = lo; 908 lo->lo_queue->queuedata = lo;
920 lo->lo_queue->unplug_fn = loop_unplug;
921 909
922 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 910 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
923 blk_queue_flush(lo->lo_queue, REQ_FLUSH); 911 blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1019 1007
1020 kthread_stop(lo->lo_thread); 1008 kthread_stop(lo->lo_thread);
1021 1009
1022 lo->lo_queue->unplug_fn = NULL;
1023 lo->lo_backing_file = NULL; 1010 lo->lo_backing_file = NULL;
1024 1011
1025 loop_release_xfer(lo); 1012 loop_release_xfer(lo);
@@ -1636,9 +1623,6 @@ out:
1636 1623
1637static void loop_free(struct loop_device *lo) 1624static void loop_free(struct loop_device *lo)
1638{ 1625{
1639 if (!lo->lo_queue->queue_lock)
1640 lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
1641
1642 blk_cleanup_queue(lo->lo_queue); 1626 blk_cleanup_queue(lo->lo_queue);
1643 put_disk(lo->lo_disk); 1627 put_disk(lo->lo_disk);
1644 list_del(&lo->lo_list); 1628 list_del(&lo->lo_list);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 62cec6afd7ad..2f2ccf686251 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -172,7 +172,8 @@ module_param_array(drive3, int, NULL, 0);
172static int pcd_open(struct cdrom_device_info *cdi, int purpose); 172static int pcd_open(struct cdrom_device_info *cdi, int purpose);
173static void pcd_release(struct cdrom_device_info *cdi); 173static void pcd_release(struct cdrom_device_info *cdi);
174static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr); 174static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
175static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr); 175static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
176 unsigned int clearing, int slot_nr);
176static int pcd_tray_move(struct cdrom_device_info *cdi, int position); 177static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
177static int pcd_lock_door(struct cdrom_device_info *cdi, int lock); 178static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
178static int pcd_drive_reset(struct cdrom_device_info *cdi); 179static int pcd_drive_reset(struct cdrom_device_info *cdi);
@@ -257,10 +258,11 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
257 return ret; 258 return ret;
258} 259}
259 260
260static int pcd_block_media_changed(struct gendisk *disk) 261static unsigned int pcd_block_check_events(struct gendisk *disk,
262 unsigned int clearing)
261{ 263{
262 struct pcd_unit *cd = disk->private_data; 264 struct pcd_unit *cd = disk->private_data;
263 return cdrom_media_changed(&cd->info); 265 return cdrom_check_events(&cd->info, clearing);
264} 266}
265 267
266static const struct block_device_operations pcd_bdops = { 268static const struct block_device_operations pcd_bdops = {
@@ -268,14 +270,14 @@ static const struct block_device_operations pcd_bdops = {
268 .open = pcd_block_open, 270 .open = pcd_block_open,
269 .release = pcd_block_release, 271 .release = pcd_block_release,
270 .ioctl = pcd_block_ioctl, 272 .ioctl = pcd_block_ioctl,
271 .media_changed = pcd_block_media_changed, 273 .check_events = pcd_block_check_events,
272}; 274};
273 275
274static struct cdrom_device_ops pcd_dops = { 276static struct cdrom_device_ops pcd_dops = {
275 .open = pcd_open, 277 .open = pcd_open,
276 .release = pcd_release, 278 .release = pcd_release,
277 .drive_status = pcd_drive_status, 279 .drive_status = pcd_drive_status,
278 .media_changed = pcd_media_changed, 280 .check_events = pcd_check_events,
279 .tray_move = pcd_tray_move, 281 .tray_move = pcd_tray_move,
280 .lock_door = pcd_lock_door, 282 .lock_door = pcd_lock_door,
281 .get_mcn = pcd_get_mcn, 283 .get_mcn = pcd_get_mcn,
@@ -318,6 +320,7 @@ static void pcd_init_units(void)
318 disk->first_minor = unit; 320 disk->first_minor = unit;
319 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
320 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->events = DISK_EVENT_MEDIA_CHANGE;
321 } 324 }
322} 325}
323 326
@@ -502,13 +505,14 @@ static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
502 505
503#define DBMSG(msg) ((verbose>1)?(msg):NULL) 506#define DBMSG(msg) ((verbose>1)?(msg):NULL)
504 507
505static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr) 508static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
509 unsigned int clearing, int slot_nr)
506{ 510{
507 struct pcd_unit *cd = cdi->handle; 511 struct pcd_unit *cd = cdi->handle;
508 int res = cd->changed; 512 int res = cd->changed;
509 if (res) 513 if (res)
510 cd->changed = 0; 514 cd->changed = 0;
511 return res; 515 return res ? DISK_EVENT_MEDIA_CHANGE : 0;
512} 516}
513 517
514static int pcd_lock_door(struct cdrom_device_info *cdi, int lock) 518static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c0ee1558b9bb..21dfdb776869 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -794,7 +794,7 @@ static int pd_release(struct gendisk *p, fmode_t mode)
794 return 0; 794 return 0;
795} 795}
796 796
797static int pd_check_media(struct gendisk *p) 797static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
798{ 798{
799 struct pd_unit *disk = p->private_data; 799 struct pd_unit *disk = p->private_data;
800 int r; 800 int r;
@@ -803,7 +803,7 @@ static int pd_check_media(struct gendisk *p)
803 pd_special_command(disk, pd_media_check); 803 pd_special_command(disk, pd_media_check);
804 r = disk->changed; 804 r = disk->changed;
805 disk->changed = 0; 805 disk->changed = 0;
806 return r; 806 return r ? DISK_EVENT_MEDIA_CHANGE : 0;
807} 807}
808 808
809static int pd_revalidate(struct gendisk *p) 809static int pd_revalidate(struct gendisk *p)
@@ -822,7 +822,7 @@ static const struct block_device_operations pd_fops = {
822 .release = pd_release, 822 .release = pd_release,
823 .ioctl = pd_ioctl, 823 .ioctl = pd_ioctl,
824 .getgeo = pd_getgeo, 824 .getgeo = pd_getgeo,
825 .media_changed = pd_check_media, 825 .check_events = pd_check_events,
826 .revalidate_disk= pd_revalidate 826 .revalidate_disk= pd_revalidate
827}; 827};
828 828
@@ -837,6 +837,7 @@ static void pd_probe_drive(struct pd_unit *disk)
837 p->fops = &pd_fops; 837 p->fops = &pd_fops;
838 p->major = major; 838 p->major = major;
839 p->first_minor = (disk - pd) << PD_BITS; 839 p->first_minor = (disk - pd) << PD_BITS;
840 p->events = DISK_EVENT_MEDIA_CHANGE;
840 disk->gd = p; 841 disk->gd = p;
841 p->private_data = disk; 842 p->private_data = disk;
842 p->queue = pd_queue; 843 p->queue = pd_queue;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 635f25dd9e10..7adeb1edbf43 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -243,7 +243,8 @@ static struct pf_unit units[PF_UNITS];
243static int pf_identify(struct pf_unit *pf); 243static int pf_identify(struct pf_unit *pf);
244static void pf_lock(struct pf_unit *pf, int func); 244static void pf_lock(struct pf_unit *pf, int func);
245static void pf_eject(struct pf_unit *pf); 245static void pf_eject(struct pf_unit *pf);
246static int pf_check_media(struct gendisk *disk); 246static unsigned int pf_check_events(struct gendisk *disk,
247 unsigned int clearing);
247 248
248static char pf_scratch[512]; /* scratch block buffer */ 249static char pf_scratch[512]; /* scratch block buffer */
249 250
@@ -270,7 +271,7 @@ static const struct block_device_operations pf_fops = {
270 .release = pf_release, 271 .release = pf_release,
271 .ioctl = pf_ioctl, 272 .ioctl = pf_ioctl,
272 .getgeo = pf_getgeo, 273 .getgeo = pf_getgeo,
273 .media_changed = pf_check_media, 274 .check_events = pf_check_events,
274}; 275};
275 276
276static void __init pf_init_units(void) 277static void __init pf_init_units(void)
@@ -293,6 +294,7 @@ static void __init pf_init_units(void)
293 disk->first_minor = unit; 294 disk->first_minor = unit;
294 strcpy(disk->disk_name, pf->name); 295 strcpy(disk->disk_name, pf->name);
295 disk->fops = &pf_fops; 296 disk->fops = &pf_fops;
297 disk->events = DISK_EVENT_MEDIA_CHANGE;
296 if (!(*drives[unit])[D_PRT]) 298 if (!(*drives[unit])[D_PRT])
297 pf_drive_count++; 299 pf_drive_count++;
298 } 300 }
@@ -377,9 +379,9 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
377 379
378} 380}
379 381
380static int pf_check_media(struct gendisk *disk) 382static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
381{ 383{
382 return 1; 384 return DISK_EVENT_MEDIA_CHANGE;
383} 385}
384 386
385static inline int status_reg(struct pf_unit *pf) 387static inline int status_reg(struct pf_unit *pf)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 77d70eebb6b2..07a382eaf0a8 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
1606 min_sleep_time = pkt->sleep_time; 1606 min_sleep_time = pkt->sleep_time;
1607 } 1607 }
1608 1608
1609 generic_unplug_device(bdev_get_queue(pd->bdev));
1610
1611 VPRINTK("kcdrwd: sleeping\n"); 1609 VPRINTK("kcdrwd: sleeping\n");
1612 residue = schedule_timeout(min_sleep_time); 1610 residue = schedule_timeout(min_sleep_time);
1613 VPRINTK("kcdrwd: wake up\n"); 1611 VPRINTK("kcdrwd: wake up\n");
@@ -2796,7 +2794,8 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
2796 return ret; 2794 return ret;
2797} 2795}
2798 2796
2799static int pkt_media_changed(struct gendisk *disk) 2797static unsigned int pkt_check_events(struct gendisk *disk,
2798 unsigned int clearing)
2800{ 2799{
2801 struct pktcdvd_device *pd = disk->private_data; 2800 struct pktcdvd_device *pd = disk->private_data;
2802 struct gendisk *attached_disk; 2801 struct gendisk *attached_disk;
@@ -2806,9 +2805,9 @@ static int pkt_media_changed(struct gendisk *disk)
2806 if (!pd->bdev) 2805 if (!pd->bdev)
2807 return 0; 2806 return 0;
2808 attached_disk = pd->bdev->bd_disk; 2807 attached_disk = pd->bdev->bd_disk;
2809 if (!attached_disk) 2808 if (!attached_disk || !attached_disk->fops->check_events)
2810 return 0; 2809 return 0;
2811 return attached_disk->fops->media_changed(attached_disk); 2810 return attached_disk->fops->check_events(attached_disk, clearing);
2812} 2811}
2813 2812
2814static const struct block_device_operations pktcdvd_ops = { 2813static const struct block_device_operations pktcdvd_ops = {
@@ -2816,7 +2815,7 @@ static const struct block_device_operations pktcdvd_ops = {
2816 .open = pkt_open, 2815 .open = pkt_open,
2817 .release = pkt_close, 2816 .release = pkt_close,
2818 .ioctl = pkt_ioctl, 2817 .ioctl = pkt_ioctl,
2819 .media_changed = pkt_media_changed, 2818 .check_events = pkt_check_events,
2820}; 2819};
2821 2820
2822static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode) 2821static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
@@ -2889,6 +2888,10 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2889 if (ret) 2888 if (ret)
2890 goto out_new_dev; 2889 goto out_new_dev;
2891 2890
2891 /* inherit events of the host device */
2892 disk->events = pd->bdev->bd_disk->events;
2893 disk->async_events = pd->bdev->bd_disk->async_events;
2894
2892 add_disk(disk); 2895 add_disk(disk);
2893 2896
2894 pkt_sysfs_dev_new(pd); 2897 pkt_sysfs_dev_new(pd);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 75333d0a3327..24a482f2fbd6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -741,11 +741,12 @@ static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
741 return 0; 741 return 0;
742} 742}
743 743
744static int floppy_check_change(struct gendisk *disk) 744static unsigned int floppy_check_events(struct gendisk *disk,
745 unsigned int clearing)
745{ 746{
746 struct floppy_state *fs = disk->private_data; 747 struct floppy_state *fs = disk->private_data;
747 748
748 return fs->ejected; 749 return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
749} 750}
750 751
751static int floppy_revalidate(struct gendisk *disk) 752static int floppy_revalidate(struct gendisk *disk)
@@ -772,7 +773,7 @@ static const struct block_device_operations floppy_fops = {
772 .release = floppy_release, 773 .release = floppy_release,
773 .ioctl = floppy_ioctl, 774 .ioctl = floppy_ioctl,
774 .getgeo = floppy_getgeo, 775 .getgeo = floppy_getgeo,
775 .media_changed = floppy_check_change, 776 .check_events = floppy_check_events,
776 .revalidate_disk = floppy_revalidate, 777 .revalidate_disk = floppy_revalidate,
777}; 778};
778 779
@@ -857,6 +858,7 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
857 swd->unit[drive].disk->first_minor = drive; 858 swd->unit[drive].disk->first_minor = drive;
858 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); 859 sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
859 swd->unit[drive].disk->fops = &floppy_fops; 860 swd->unit[drive].disk->fops = &floppy_fops;
861 swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
860 swd->unit[drive].disk->private_data = &swd->unit[drive]; 862 swd->unit[drive].disk->private_data = &swd->unit[drive];
861 swd->unit[drive].disk->queue = swd->queue; 863 swd->unit[drive].disk->queue = swd->queue;
862 set_capacity(swd->unit[drive].disk, 2880); 864 set_capacity(swd->unit[drive].disk, 2880);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index bf3a5b859299..4c10f56facbf 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -250,7 +250,8 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
250 unsigned int cmd, unsigned long param); 250 unsigned int cmd, unsigned long param);
251static int floppy_open(struct block_device *bdev, fmode_t mode); 251static int floppy_open(struct block_device *bdev, fmode_t mode);
252static int floppy_release(struct gendisk *disk, fmode_t mode); 252static int floppy_release(struct gendisk *disk, fmode_t mode);
253static int floppy_check_change(struct gendisk *disk); 253static unsigned int floppy_check_events(struct gendisk *disk,
254 unsigned int clearing);
254static int floppy_revalidate(struct gendisk *disk); 255static int floppy_revalidate(struct gendisk *disk);
255 256
256static bool swim3_end_request(int err, unsigned int nr_bytes) 257static bool swim3_end_request(int err, unsigned int nr_bytes)
@@ -975,10 +976,11 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
975 return 0; 976 return 0;
976} 977}
977 978
978static int floppy_check_change(struct gendisk *disk) 979static unsigned int floppy_check_events(struct gendisk *disk,
980 unsigned int clearing)
979{ 981{
980 struct floppy_state *fs = disk->private_data; 982 struct floppy_state *fs = disk->private_data;
981 return fs->ejected; 983 return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
982} 984}
983 985
984static int floppy_revalidate(struct gendisk *disk) 986static int floppy_revalidate(struct gendisk *disk)
@@ -1025,7 +1027,7 @@ static const struct block_device_operations floppy_fops = {
1025 .open = floppy_unlocked_open, 1027 .open = floppy_unlocked_open,
1026 .release = floppy_release, 1028 .release = floppy_release,
1027 .ioctl = floppy_ioctl, 1029 .ioctl = floppy_ioctl,
1028 .media_changed = floppy_check_change, 1030 .check_events = floppy_check_events,
1029 .revalidate_disk= floppy_revalidate, 1031 .revalidate_disk= floppy_revalidate,
1030}; 1032};
1031 1033
@@ -1161,6 +1163,7 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
1161 disk->major = FLOPPY_MAJOR; 1163 disk->major = FLOPPY_MAJOR;
1162 disk->first_minor = i; 1164 disk->first_minor = i;
1163 disk->fops = &floppy_fops; 1165 disk->fops = &floppy_fops;
1166 disk->events = DISK_EVENT_MEDIA_CHANGE;
1164 disk->private_data = &floppy_states[i]; 1167 disk->private_data = &floppy_states[i];
1165 disk->queue = swim3_queue; 1168 disk->queue = swim3_queue;
1166 disk->flags |= GENHD_FL_REMOVABLE; 1169 disk->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 9ae3bb713286..68b9430c7cfe 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1788,7 +1788,8 @@ static int ub_bd_revalidate(struct gendisk *disk)
1788 * 1788 *
1789 * The return code is bool! 1789 * The return code is bool!
1790 */ 1790 */
1791static int ub_bd_media_changed(struct gendisk *disk) 1791static unsigned int ub_bd_check_events(struct gendisk *disk,
1792 unsigned int clearing)
1792{ 1793{
1793 struct ub_lun *lun = disk->private_data; 1794 struct ub_lun *lun = disk->private_data;
1794 1795
@@ -1806,10 +1807,10 @@ static int ub_bd_media_changed(struct gendisk *disk)
1806 */ 1807 */
1807 if (ub_sync_tur(lun->udev, lun) != 0) { 1808 if (ub_sync_tur(lun->udev, lun) != 0) {
1808 lun->changed = 1; 1809 lun->changed = 1;
1809 return 1; 1810 return DISK_EVENT_MEDIA_CHANGE;
1810 } 1811 }
1811 1812
1812 return lun->changed; 1813 return lun->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
1813} 1814}
1814 1815
1815static const struct block_device_operations ub_bd_fops = { 1816static const struct block_device_operations ub_bd_fops = {
@@ -1817,7 +1818,7 @@ static const struct block_device_operations ub_bd_fops = {
1817 .open = ub_bd_unlocked_open, 1818 .open = ub_bd_unlocked_open,
1818 .release = ub_bd_release, 1819 .release = ub_bd_release,
1819 .ioctl = ub_bd_ioctl, 1820 .ioctl = ub_bd_ioctl,
1820 .media_changed = ub_bd_media_changed, 1821 .check_events = ub_bd_check_events,
1821 .revalidate_disk = ub_bd_revalidate, 1822 .revalidate_disk = ub_bd_revalidate,
1822}; 1823};
1823 1824
@@ -2333,6 +2334,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2333 disk->major = UB_MAJOR; 2334 disk->major = UB_MAJOR;
2334 disk->first_minor = lun->id * UB_PARTS_PER_LUN; 2335 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2335 disk->fops = &ub_bd_fops; 2336 disk->fops = &ub_bd_fops;
2337 disk->events = DISK_EVENT_MEDIA_CHANGE;
2336 disk->private_data = lun; 2338 disk->private_data = lun;
2337 disk->driverfs_dev = &sc->intf->dev; 2339 disk->driverfs_dev = &sc->intf->dev;
2338 2340
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 8be57151f5d6..031ca720d926 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
241 * 241 *
242 * Whenever IO on the active page completes, the Ready page is activated 242 * Whenever IO on the active page completes, the Ready page is activated
243 * and the ex-Active page is clean out and made Ready. 243 * and the ex-Active page is clean out and made Ready.
244 * Otherwise the Ready page is only activated when it becomes full, or 244 * Otherwise the Ready page is only activated when it becomes full.
245 * when mm_unplug_device is called via the unplug_io_fn.
246 * 245 *
247 * If a request arrives while both pages a full, it is queued, and b_rdev is 246 * If a request arrives while both pages a full, it is queued, and b_rdev is
248 * overloaded to record whether it was a read or a write. 247 * overloaded to record whether it was a read or a write.
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
333 page->biotail = &page->bio; 332 page->biotail = &page->bio;
334} 333}
335 334
336static void mm_unplug_device(struct request_queue *q)
337{
338 struct cardinfo *card = q->queuedata;
339 unsigned long flags;
340
341 spin_lock_irqsave(&card->lock, flags);
342 if (blk_remove_plug(q))
343 activate(card);
344 spin_unlock_irqrestore(&card->lock, flags);
345}
346
347/* 335/*
348 * If there is room on Ready page, take 336 * If there is room on Ready page, take
349 * one bh off list and add it. 337 * one bh off list and add it.
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
535 *card->biotail = bio; 523 *card->biotail = bio;
536 bio->bi_next = NULL; 524 bio->bi_next = NULL;
537 card->biotail = &bio->bi_next; 525 card->biotail = &bio->bi_next;
538 blk_plug_device(q);
539 spin_unlock_irq(&card->lock); 526 spin_unlock_irq(&card->lock);
540 527
541 return 0; 528 return 0;
@@ -779,20 +766,10 @@ static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
779 return 0; 766 return 0;
780} 767}
781 768
782/*
783 * Future support for removable devices
784 */
785static int mm_check_change(struct gendisk *disk)
786{
787/* struct cardinfo *dev = disk->private_data; */
788 return 0;
789}
790
791static const struct block_device_operations mm_fops = { 769static const struct block_device_operations mm_fops = {
792 .owner = THIS_MODULE, 770 .owner = THIS_MODULE,
793 .getgeo = mm_getgeo, 771 .getgeo = mm_getgeo,
794 .revalidate_disk = mm_revalidate, 772 .revalidate_disk = mm_revalidate,
795 .media_changed = mm_check_change,
796}; 773};
797 774
798static int __devinit mm_pci_probe(struct pci_dev *dev, 775static int __devinit mm_pci_probe(struct pci_dev *dev,
@@ -907,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
907 blk_queue_make_request(card->queue, mm_make_request); 884 blk_queue_make_request(card->queue, mm_make_request);
908 card->queue->queue_lock = &card->lock; 885 card->queue->queue_lock = &card->lock;
909 card->queue->queuedata = card; 886 card->queue->queuedata = card;
910 card->queue->unplug_fn = mm_unplug_device;
911 887
912 tasklet_init(&card->tasklet, process_page, (unsigned long)card); 888 tasklet_init(&card->tasklet, process_page, (unsigned long)card);
913 889
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 2c590a796aa1..73354b081ed3 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -867,12 +867,12 @@ static void ace_request(struct request_queue * q)
867 } 867 }
868} 868}
869 869
870static int ace_media_changed(struct gendisk *gd) 870static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
871{ 871{
872 struct ace_device *ace = gd->private_data; 872 struct ace_device *ace = gd->private_data;
873 dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change); 873 dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change);
874 874
875 return ace->media_change; 875 return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
876} 876}
877 877
878static int ace_revalidate_disk(struct gendisk *gd) 878static int ace_revalidate_disk(struct gendisk *gd)
@@ -953,7 +953,7 @@ static const struct block_device_operations ace_fops = {
953 .owner = THIS_MODULE, 953 .owner = THIS_MODULE,
954 .open = ace_open, 954 .open = ace_open,
955 .release = ace_release, 955 .release = ace_release,
956 .media_changed = ace_media_changed, 956 .check_events = ace_check_events,
957 .revalidate_disk = ace_revalidate_disk, 957 .revalidate_disk = ace_revalidate_disk,
958 .getgeo = ace_getgeo, 958 .getgeo = ace_getgeo,
959}; 959};
@@ -1005,6 +1005,7 @@ static int __devinit ace_setup(struct ace_device *ace)
1005 ace->gd->major = ace_major; 1005 ace->gd->major = ace_major;
1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS; 1006 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
1007 ace->gd->fops = &ace_fops; 1007 ace->gd->fops = &ace_fops;
1008 ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
1008 ace->gd->queue = ace->queue; 1009 ace->gd->queue = ace->queue;
1009 ace->gd->private_data = ace; 1010 ace->gd->private_data = ace;
1010 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); 1011 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 64a21461c408..b2b034fea34e 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -395,10 +395,12 @@ static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
395 return CDS_NO_INFO; 395 return CDS_NO_INFO;
396} 396}
397 397
398static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore) 398static unsigned int gdrom_check_events(struct cdrom_device_info *cd_info,
399 unsigned int clearing, int ignore)
399{ 400{
400 /* check the sense key */ 401 /* check the sense key */
401 return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60; 402 return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60 ?
403 DISK_EVENT_MEDIA_CHANGE : 0;
402} 404}
403 405
404/* reset the G1 bus */ 406/* reset the G1 bus */
@@ -483,7 +485,7 @@ static struct cdrom_device_ops gdrom_ops = {
483 .open = gdrom_open, 485 .open = gdrom_open,
484 .release = gdrom_release, 486 .release = gdrom_release,
485 .drive_status = gdrom_drivestatus, 487 .drive_status = gdrom_drivestatus,
486 .media_changed = gdrom_mediachanged, 488 .check_events = gdrom_check_events,
487 .get_last_session = gdrom_get_last_session, 489 .get_last_session = gdrom_get_last_session,
488 .reset = gdrom_hardreset, 490 .reset = gdrom_hardreset,
489 .audio_ioctl = gdrom_audio_ioctl, 491 .audio_ioctl = gdrom_audio_ioctl,
@@ -509,9 +511,10 @@ static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
509 return 0; 511 return 0;
510} 512}
511 513
512static int gdrom_bdops_mediachanged(struct gendisk *disk) 514static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
515 unsigned int clearing)
513{ 516{
514 return cdrom_media_changed(gd.cd_info); 517 return cdrom_check_events(gd.cd_info, clearing);
515} 518}
516 519
517static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, 520static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
@@ -530,7 +533,7 @@ static const struct block_device_operations gdrom_bdops = {
530 .owner = THIS_MODULE, 533 .owner = THIS_MODULE,
531 .open = gdrom_bdops_open, 534 .open = gdrom_bdops_open,
532 .release = gdrom_bdops_release, 535 .release = gdrom_bdops_release,
533 .media_changed = gdrom_bdops_mediachanged, 536 .check_events = gdrom_bdops_check_events,
534 .ioctl = gdrom_bdops_ioctl, 537 .ioctl = gdrom_bdops_ioctl,
535}; 538};
536 539
@@ -800,6 +803,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
800 goto probe_fail_cdrom_register; 803 goto probe_fail_cdrom_register;
801 } 804 }
802 gd.disk->fops = &gdrom_bdops; 805 gd.disk->fops = &gdrom_bdops;
806 gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
803 /* latch on to the interrupt */ 807 /* latch on to the interrupt */
804 err = gdrom_set_interrupt_handlers(); 808 err = gdrom_set_interrupt_handlers();
805 if (err) 809 if (err)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index be73a9b493a6..4e874c5fa605 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -186,10 +186,11 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
186 return ret; 186 return ret;
187} 187}
188 188
189static int viocd_blk_media_changed(struct gendisk *disk) 189static unsigned int viocd_blk_check_events(struct gendisk *disk,
190 unsigned int clearing)
190{ 191{
191 struct disk_info *di = disk->private_data; 192 struct disk_info *di = disk->private_data;
192 return cdrom_media_changed(&di->viocd_info); 193 return cdrom_check_events(&di->viocd_info, clearing);
193} 194}
194 195
195static const struct block_device_operations viocd_fops = { 196static const struct block_device_operations viocd_fops = {
@@ -197,7 +198,7 @@ static const struct block_device_operations viocd_fops = {
197 .open = viocd_blk_open, 198 .open = viocd_blk_open,
198 .release = viocd_blk_release, 199 .release = viocd_blk_release,
199 .ioctl = viocd_blk_ioctl, 200 .ioctl = viocd_blk_ioctl,
200 .media_changed = viocd_blk_media_changed, 201 .check_events = viocd_blk_check_events,
201}; 202};
202 203
203static int viocd_open(struct cdrom_device_info *cdi, int purpose) 204static int viocd_open(struct cdrom_device_info *cdi, int purpose)
@@ -320,7 +321,8 @@ static void do_viocd_request(struct request_queue *q)
320 } 321 }
321} 322}
322 323
323static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr) 324static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
325 unsigned int clearing, int disc_nr)
324{ 326{
325 struct viocd_waitevent we; 327 struct viocd_waitevent we;
326 HvLpEvent_Rc hvrc; 328 HvLpEvent_Rc hvrc;
@@ -340,7 +342,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
340 if (hvrc != 0) { 342 if (hvrc != 0) {
341 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", 343 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
342 (int)hvrc); 344 (int)hvrc);
343 return -EIO; 345 return 0;
344 } 346 }
345 347
346 wait_for_completion(&we.com); 348 wait_for_completion(&we.com);
@@ -354,7 +356,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
354 return 0; 356 return 0;
355 } 357 }
356 358
357 return we.changed; 359 return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
358} 360}
359 361
360static int viocd_lock_door(struct cdrom_device_info *cdi, int locking) 362static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
@@ -550,7 +552,7 @@ static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
550static struct cdrom_device_ops viocd_dops = { 552static struct cdrom_device_ops viocd_dops = {
551 .open = viocd_open, 553 .open = viocd_open,
552 .release = viocd_release, 554 .release = viocd_release,
553 .media_changed = viocd_media_changed, 555 .check_events = viocd_check_events,
554 .lock_door = viocd_lock_door, 556 .lock_door = viocd_lock_door,
555 .generic_packet = viocd_packet, 557 .generic_packet = viocd_packet,
556 .audio_ioctl = viocd_audio_ioctl, 558 .audio_ioctl = viocd_audio_ioctl,
@@ -624,6 +626,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
624 gendisk->queue = q; 626 gendisk->queue = q;
625 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
626 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
629 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
627 set_capacity(gendisk, 0); 630 set_capacity(gendisk, 0);
628 gendisk->private_data = d; 631 gendisk->private_data = d;
629 d->viocd_disk = gendisk; 632 d->viocd_disk = gendisk;
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index e88a2cf17711..6f218e014e99 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
233 233
234 drive->hwif->rq = NULL; 234 drive->hwif->rq = NULL;
235 235
236 elv_add_request(drive->queue, &drive->sense_rq, 236 elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
237 ELEVATOR_INSERT_FRONT, 0);
238 return 0; 237 return 0;
239} 238}
240EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 239EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 0c73fe39a236..fd1e11799137 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -258,17 +258,10 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
258 if (time_after(jiffies, info->write_timeout)) 258 if (time_after(jiffies, info->write_timeout))
259 return 0; 259 return 0;
260 else { 260 else {
261 struct request_queue *q = drive->queue;
262 unsigned long flags;
263
264 /* 261 /*
265 * take a breather relying on the unplug timer to kick us again 262 * take a breather
266 */ 263 */
267 264 blk_delay_queue(drive->queue, 1);
268 spin_lock_irqsave(q->queue_lock, flags);
269 blk_plug_device(q);
270 spin_unlock_irqrestore(q->queue_lock, flags);
271
272 return 1; 265 return 1;
273 } 266 }
274} 267}
@@ -1177,7 +1170,7 @@ static struct cdrom_device_ops ide_cdrom_dops = {
1177 .open = ide_cdrom_open_real, 1170 .open = ide_cdrom_open_real,
1178 .release = ide_cdrom_release_real, 1171 .release = ide_cdrom_release_real,
1179 .drive_status = ide_cdrom_drive_status, 1172 .drive_status = ide_cdrom_drive_status,
1180 .media_changed = ide_cdrom_check_media_change_real, 1173 .check_events = ide_cdrom_check_events_real,
1181 .tray_move = ide_cdrom_tray_move, 1174 .tray_move = ide_cdrom_tray_move,
1182 .lock_door = ide_cdrom_lock_door, 1175 .lock_door = ide_cdrom_lock_door,
1183 .select_speed = ide_cdrom_select_speed, 1176 .select_speed = ide_cdrom_select_speed,
@@ -1514,8 +1507,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1514 blk_queue_dma_alignment(q, 31); 1507 blk_queue_dma_alignment(q, 31);
1515 blk_queue_update_dma_pad(q, 15); 1508 blk_queue_update_dma_pad(q, 15);
1516 1509
1517 q->unplug_delay = max((1 * HZ) / 1000, 1);
1518
1519 drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; 1510 drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
1520 drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); 1511 drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
1521 1512
@@ -1702,10 +1693,11 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
1702} 1693}
1703 1694
1704 1695
1705static int idecd_media_changed(struct gendisk *disk) 1696static unsigned int idecd_check_events(struct gendisk *disk,
1697 unsigned int clearing)
1706{ 1698{
1707 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); 1699 struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
1708 return cdrom_media_changed(&info->devinfo); 1700 return cdrom_check_events(&info->devinfo, clearing);
1709} 1701}
1710 1702
1711static int idecd_revalidate_disk(struct gendisk *disk) 1703static int idecd_revalidate_disk(struct gendisk *disk)
@@ -1723,7 +1715,7 @@ static const struct block_device_operations idecd_ops = {
1723 .open = idecd_open, 1715 .open = idecd_open,
1724 .release = idecd_release, 1716 .release = idecd_release,
1725 .ioctl = idecd_ioctl, 1717 .ioctl = idecd_ioctl,
1726 .media_changed = idecd_media_changed, 1718 .check_events = idecd_check_events,
1727 .revalidate_disk = idecd_revalidate_disk 1719 .revalidate_disk = idecd_revalidate_disk
1728}; 1720};
1729 1721
@@ -1790,6 +1782,7 @@ static int ide_cd_probe(ide_drive_t *drive)
1790 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1791 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1792 g->flags |= GENHD_FL_REMOVABLE; 1784 g->flags |= GENHD_FL_REMOVABLE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1793 add_disk(g); 1786 add_disk(g);
1794 return 0; 1787 return 0;
1795 1788
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 93a3cf1b0f3f..1efc936f5b66 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -111,7 +111,8 @@ int cdrom_check_status(ide_drive_t *, struct request_sense *);
111int ide_cdrom_open_real(struct cdrom_device_info *, int); 111int ide_cdrom_open_real(struct cdrom_device_info *, int);
112void ide_cdrom_release_real(struct cdrom_device_info *); 112void ide_cdrom_release_real(struct cdrom_device_info *);
113int ide_cdrom_drive_status(struct cdrom_device_info *, int); 113int ide_cdrom_drive_status(struct cdrom_device_info *, int);
114int ide_cdrom_check_media_change_real(struct cdrom_device_info *, int); 114unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *,
115 unsigned int clearing, int slot_nr);
115int ide_cdrom_tray_move(struct cdrom_device_info *, int); 116int ide_cdrom_tray_move(struct cdrom_device_info *, int);
116int ide_cdrom_lock_door(struct cdrom_device_info *, int); 117int ide_cdrom_lock_door(struct cdrom_device_info *, int);
117int ide_cdrom_select_speed(struct cdrom_device_info *, int); 118int ide_cdrom_select_speed(struct cdrom_device_info *, int);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 766b3deeb23c..2a6bc50e8a41 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -79,8 +79,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
79 return CDS_DRIVE_NOT_READY; 79 return CDS_DRIVE_NOT_READY;
80} 80}
81 81
82int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi, 82unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
83 int slot_nr) 83 unsigned int clearing, int slot_nr)
84{ 84{
85 ide_drive_t *drive = cdi->handle; 85 ide_drive_t *drive = cdi->handle;
86 int retval; 86 int retval;
@@ -89,9 +89,9 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
89 (void) cdrom_check_status(drive, NULL); 89 (void) cdrom_check_status(drive, NULL);
90 retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0; 90 retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
91 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; 91 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
92 return retval; 92 return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
93 } else { 93 } else {
94 return -EINVAL; 94 return 0;
95 } 95 }
96} 96}
97 97
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 35c4b43585e3..c4ffd4888939 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -285,11 +285,12 @@ static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
285 return 0; 285 return 0;
286} 286}
287 287
288static int ide_gd_media_changed(struct gendisk *disk) 288static unsigned int ide_gd_check_events(struct gendisk *disk,
289 unsigned int clearing)
289{ 290{
290 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 291 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
291 ide_drive_t *drive = idkp->drive; 292 ide_drive_t *drive = idkp->drive;
292 int ret; 293 bool ret;
293 294
294 /* do not scan partitions twice if this is a removable device */ 295 /* do not scan partitions twice if this is a removable device */
295 if (drive->dev_flags & IDE_DFLAG_ATTACH) { 296 if (drive->dev_flags & IDE_DFLAG_ATTACH) {
@@ -297,10 +298,10 @@ static int ide_gd_media_changed(struct gendisk *disk)
297 return 0; 298 return 0;
298 } 299 }
299 300
300 ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED); 301 ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
301 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; 302 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
302 303
303 return ret; 304 return ret ? DISK_EVENT_MEDIA_CHANGE : 0;
304} 305}
305 306
306static void ide_gd_unlock_native_capacity(struct gendisk *disk) 307static void ide_gd_unlock_native_capacity(struct gendisk *disk)
@@ -318,7 +319,7 @@ static int ide_gd_revalidate_disk(struct gendisk *disk)
318 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 319 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
319 ide_drive_t *drive = idkp->drive; 320 ide_drive_t *drive = idkp->drive;
320 321
321 if (ide_gd_media_changed(disk)) 322 if (ide_gd_check_events(disk, 0))
322 drive->disk_ops->get_capacity(drive); 323 drive->disk_ops->get_capacity(drive);
323 324
324 set_capacity(disk, ide_gd_capacity(drive)); 325 set_capacity(disk, ide_gd_capacity(drive));
@@ -340,7 +341,7 @@ static const struct block_device_operations ide_gd_ops = {
340 .release = ide_gd_release, 341 .release = ide_gd_release,
341 .ioctl = ide_gd_ioctl, 342 .ioctl = ide_gd_ioctl,
342 .getgeo = ide_gd_getgeo, 343 .getgeo = ide_gd_getgeo,
343 .media_changed = ide_gd_media_changed, 344 .check_events = ide_gd_check_events,
344 .unlock_native_capacity = ide_gd_unlock_native_capacity, 345 .unlock_native_capacity = ide_gd_unlock_native_capacity,
345 .revalidate_disk = ide_gd_revalidate_disk 346 .revalidate_disk = ide_gd_revalidate_disk
346}; 347};
@@ -412,6 +413,7 @@ static int ide_gd_probe(ide_drive_t *drive)
412 if (drive->dev_flags & IDE_DFLAG_REMOVABLE) 413 if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
413 g->flags = GENHD_FL_REMOVABLE; 414 g->flags = GENHD_FL_REMOVABLE;
414 g->fops = &ide_gd_ops; 415 g->fops = &ide_gd_ops;
416 g->events = DISK_EVENT_MEDIA_CHANGE;
415 add_disk(g); 417 add_disk(g);
416 return 0; 418 return 0;
417 419
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 999dac054bcc..f4077840d3ab 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -549,8 +549,6 @@ plug_device_2:
549 549
550 if (rq) 550 if (rq)
551 blk_requeue_request(q, rq); 551 blk_requeue_request(q, rq);
552 if (!elv_queue_empty(q))
553 blk_plug_device(q);
554} 552}
555 553
556void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 554void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
562 560
563 if (rq) 561 if (rq)
564 blk_requeue_request(q, rq); 562 blk_requeue_request(q, rq);
565 if (!elv_queue_empty(q))
566 blk_plug_device(q);
567 563
568 spin_unlock_irqrestore(q->queue_lock, flags); 564 spin_unlock_irqrestore(q->queue_lock, flags);
569} 565}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 88a380c5a470..6ab9ab2a5081 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
52 rq->cmd[0] = REQ_UNPARK_HEADS; 52 rq->cmd[0] = REQ_UNPARK_HEADS;
53 rq->cmd_len = 1; 53 rq->cmd_len = 1;
54 rq->cmd_type = REQ_TYPE_SPECIAL; 54 rq->cmd_type = REQ_TYPE_SPECIAL;
55 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); 55 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
56 56
57out: 57out:
58 return; 58 return;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a2ce0b2da281..5c9362792f1d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
347 atomic_inc(&bitmap->pending_writes); 347 atomic_inc(&bitmap->pending_writes);
348 set_buffer_locked(bh); 348 set_buffer_locked(bh);
349 set_buffer_mapped(bh); 349 set_buffer_mapped(bh);
350 submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); 350 submit_bh(WRITE | REQ_SYNC, bh);
351 bh = bh->b_this_page; 351 bh = bh->b_this_page;
352 } 352 }
353 353
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1339 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1339 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1340 TASK_UNINTERRUPTIBLE); 1340 TASK_UNINTERRUPTIBLE);
1341 spin_unlock_irq(&bitmap->lock); 1341 spin_unlock_irq(&bitmap->lock);
1342 md_unplug(bitmap->mddev); 1342 io_schedule();
1343 schedule();
1344 finish_wait(&bitmap->overflow_wait, &__wait); 1343 finish_wait(&bitmap->overflow_wait, &__wait);
1345 continue; 1344 continue;
1346 } 1345 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd91664..2c62c1169f78 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
991 clone->bi_destructor = dm_crypt_bio_destructor; 991 clone->bi_destructor = dm_crypt_bio_destructor;
992} 992}
993 993
994static void kcryptd_unplug(struct crypt_config *cc)
995{
996 blk_unplug(bdev_get_queue(cc->dev->bdev));
997}
998
999static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 994static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1000{ 995{
1001 struct crypt_config *cc = io->target->private; 996 struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1008 * one in order to decrypt the whole bio data *afterwards*. 1003 * one in order to decrypt the whole bio data *afterwards*.
1009 */ 1004 */
1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1005 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
1011 if (!clone) { 1006 if (!clone)
1012 kcryptd_unplug(cc);
1013 return 1; 1007 return 1;
1014 }
1015 1008
1016 crypt_inc_pending(io); 1009 crypt_inc_pending(io);
1017 1010
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 136d4f71a116..76a5af00a26b 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
352 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 352 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
353 353
354 if (sync) 354 if (sync)
355 rw |= REQ_SYNC | REQ_UNPLUG; 355 rw |= REQ_SYNC;
356 356
357 /* 357 /*
358 * For multiple regions we need to be careful to rewind 358 * For multiple regions we need to be careful to rewind
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 924f5f0084c2..1bb73a13ca40 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
37 unsigned int nr_pages; 37 unsigned int nr_pages;
38 unsigned int nr_free_pages; 38 unsigned int nr_free_pages;
39 39
40 /*
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
44 */
45 struct block_device *unplug[2];
46
47 struct dm_io_client *io_client; 40 struct dm_io_client *io_client;
48 41
49 wait_queue_head_t destroyq; 42 wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
315 return 0; 308 return 0;
316} 309}
317 310
318/*
319 * Unplug the block device at the specified index.
320 */
321static void unplug(struct dm_kcopyd_client *kc, int rw)
322{
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
326 }
327}
328
329/*
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
333 */
334static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
336{
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
341}
342
343static void complete_io(unsigned long error, void *context) 311static void complete_io(unsigned long error, void *context)
344{ 312{
345 struct kcopyd_job *job = (struct kcopyd_job *) context; 313 struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job)
386 .client = job->kc->io_client, 354 .client = job->kc->io_client,
387 }; 355 };
388 356
389 if (job->rw == READ) { 357 if (job->rw == READ)
390 r = dm_io(&io_req, 1, &job->source, NULL); 358 r = dm_io(&io_req, 1, &job->source, NULL);
391 prepare_unplug(job->kc, READ, job->source.bdev); 359 else
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
395 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 360 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
398 }
399 361
400 return r; 362 return r;
401} 363}
@@ -466,6 +428,7 @@ static void do_work(struct work_struct *work)
466{ 428{
467 struct dm_kcopyd_client *kc = container_of(work, 429 struct dm_kcopyd_client *kc = container_of(work,
468 struct dm_kcopyd_client, kcopyd_work); 430 struct dm_kcopyd_client, kcopyd_work);
431 struct blk_plug plug;
469 432
470 /* 433 /*
471 * The order that these are called is *very* important. 434 * The order that these are called is *very* important.
@@ -473,18 +436,12 @@ static void do_work(struct work_struct *work)
473 * Pages jobs when successful will jump onto the io jobs 436 * Pages jobs when successful will jump onto the io jobs
474 * list. io jobs call wake when they complete and it all 437 * list. io jobs call wake when they complete and it all
475 * starts again. 438 * starts again.
476 *
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
482 */ 439 */
440 blk_start_plug(&plug);
483 process_jobs(&kc->complete_jobs, kc, run_complete_job); 441 process_jobs(&kc->complete_jobs, kc, run_complete_job);
484 process_jobs(&kc->pages_jobs, kc, run_pages_job); 442 process_jobs(&kc->pages_jobs, kc, run_pages_job);
485 process_jobs(&kc->io_jobs, kc, run_io_job); 443 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ); 444 blk_finish_plug(&plug);
487 unplug(kc, WRITE);
488} 445}
489 446
490/* 447/*
@@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
665 INIT_LIST_HEAD(&kc->io_jobs); 622 INIT_LIST_HEAD(&kc->io_jobs);
666 INIT_LIST_HEAD(&kc->pages_jobs); 623 INIT_LIST_HEAD(&kc->pages_jobs);
667 624
668 memset(kc->unplug, 0, sizeof(kc->unplug));
669
670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 625 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
671 if (!kc->job_pool) 626 if (!kc->job_pool)
672 goto bad_slab; 627 goto bad_slab;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b9e1e15ef11c..5ef136cdba91 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
394{ 394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396 396
397 md_raid5_unplug_device(rs->md.private); 397 md_raid5_kick_device(rs->md.private);
398} 398}
399 399
400/* 400/*
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dee326775c60..976ad4688afc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
842 do_reads(ms, &reads); 842 do_reads(ms, &reads);
843 do_writes(ms, &writes); 843 do_writes(ms, &writes);
844 do_failures(ms, &failures); 844 do_failures(ms, &failures);
845
846 dm_table_unplug_all(ms->ti->table);
847} 845}
848 846
849/*----------------------------------------------------------------- 847/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 38e4eb1bb965..416d4e258df6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -55,6 +55,7 @@ struct dm_table {
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1; 57 unsigned discards_supported:1;
58 unsigned integrity_supported:1;
58 59
59 /* 60 /*
60 * Indicates the rw permissions for the new logical 61 * Indicates the rw permissions for the new logical
@@ -859,7 +860,7 @@ int dm_table_alloc_md_mempools(struct dm_table *t)
859 return -EINVAL; 860 return -EINVAL;
860 } 861 }
861 862
862 t->mempools = dm_alloc_md_mempools(type); 863 t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
863 if (!t->mempools) 864 if (!t->mempools)
864 return -ENOMEM; 865 return -ENOMEM;
865 866
@@ -935,8 +936,10 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
935 struct dm_dev_internal *dd; 936 struct dm_dev_internal *dd;
936 937
937 list_for_each_entry(dd, devices, list) 938 list_for_each_entry(dd, devices, list)
938 if (bdev_get_integrity(dd->dm_dev.bdev)) 939 if (bdev_get_integrity(dd->dm_dev.bdev)) {
940 t->integrity_supported = 1;
939 return blk_integrity_register(dm_disk(md), NULL); 941 return blk_integrity_register(dm_disk(md), NULL);
942 }
940 943
941 return 0; 944 return 0;
942} 945}
@@ -1275,29 +1278,6 @@ int dm_table_any_busy_target(struct dm_table *t)
1275 return 0; 1278 return 0;
1276} 1279}
1277 1280
1278void dm_table_unplug_all(struct dm_table *t)
1279{
1280 struct dm_dev_internal *dd;
1281 struct list_head *devices = dm_table_get_devices(t);
1282 struct dm_target_callbacks *cb;
1283
1284 list_for_each_entry(dd, devices, list) {
1285 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1286 char b[BDEVNAME_SIZE];
1287
1288 if (likely(q))
1289 blk_unplug(q);
1290 else
1291 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
1292 dm_device_name(t->md),
1293 bdevname(dd->dm_dev.bdev, b));
1294 }
1295
1296 list_for_each_entry(cb, &t->target_callbacks, list)
1297 if (cb->unplug_fn)
1298 cb->unplug_fn(cb);
1299}
1300
1301struct mapped_device *dm_table_get_md(struct dm_table *t) 1281struct mapped_device *dm_table_get_md(struct dm_table *t)
1302{ 1282{
1303 return t->md; 1283 return t->md;
@@ -1345,4 +1325,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
1345EXPORT_SYMBOL(dm_table_get_md); 1325EXPORT_SYMBOL(dm_table_get_md);
1346EXPORT_SYMBOL(dm_table_put); 1326EXPORT_SYMBOL(dm_table_put);
1347EXPORT_SYMBOL(dm_table_get); 1327EXPORT_SYMBOL(dm_table_get);
1348EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eaa3af0e0632..0cf68b478878 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -477,7 +477,8 @@ static void start_io_acct(struct dm_io *io)
477 cpu = part_stat_lock(); 477 cpu = part_stat_lock();
478 part_round_stats(cpu, &dm_disk(md)->part0); 478 part_round_stats(cpu, &dm_disk(md)->part0);
479 part_stat_unlock(); 479 part_stat_unlock();
480 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 480 atomic_set(&dm_disk(md)->part0.in_flight[rw],
481 atomic_inc_return(&md->pending[rw]));
481} 482}
482 483
483static void end_io_acct(struct dm_io *io) 484static void end_io_acct(struct dm_io *io)
@@ -497,8 +498,8 @@ static void end_io_acct(struct dm_io *io)
497 * After this is decremented the bio must not be touched if it is 498 * After this is decremented the bio must not be touched if it is
498 * a flush. 499 * a flush.
499 */ 500 */
500 dm_disk(md)->part0.in_flight[rw] = pending = 501 pending = atomic_dec_return(&md->pending[rw]);
501 atomic_dec_return(&md->pending[rw]); 502 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
502 pending += atomic_read(&md->pending[rw^0x1]); 503 pending += atomic_read(&md->pending[rw^0x1]);
503 504
504 /* nudge anyone waiting on suspend queue */ 505 /* nudge anyone waiting on suspend queue */
@@ -807,8 +808,6 @@ void dm_requeue_unmapped_request(struct request *clone)
807 dm_unprep_request(rq); 808 dm_unprep_request(rq);
808 809
809 spin_lock_irqsave(q->queue_lock, flags); 810 spin_lock_irqsave(q->queue_lock, flags);
810 if (elv_queue_empty(q))
811 blk_plug_device(q);
812 blk_requeue_request(q, rq); 811 blk_requeue_request(q, rq);
813 spin_unlock_irqrestore(q->queue_lock, flags); 812 spin_unlock_irqrestore(q->queue_lock, flags);
814 813
@@ -1613,10 +1612,10 @@ static void dm_request_fn(struct request_queue *q)
1613 * number of in-flight I/Os after the queue is stopped in 1612 * number of in-flight I/Os after the queue is stopped in
1614 * dm_suspend(). 1613 * dm_suspend().
1615 */ 1614 */
1616 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1615 while (!blk_queue_stopped(q)) {
1617 rq = blk_peek_request(q); 1616 rq = blk_peek_request(q);
1618 if (!rq) 1617 if (!rq)
1619 goto plug_and_out; 1618 goto delay_and_out;
1620 1619
1621 /* always use block 0 to find the target for flushes for now */ 1620 /* always use block 0 to find the target for flushes for now */
1622 pos = 0; 1621 pos = 0;
@@ -1627,7 +1626,7 @@ static void dm_request_fn(struct request_queue *q)
1627 BUG_ON(!dm_target_is_valid(ti)); 1626 BUG_ON(!dm_target_is_valid(ti));
1628 1627
1629 if (ti->type->busy && ti->type->busy(ti)) 1628 if (ti->type->busy && ti->type->busy(ti))
1630 goto plug_and_out; 1629 goto delay_and_out;
1631 1630
1632 blk_start_request(rq); 1631 blk_start_request(rq);
1633 clone = rq->special; 1632 clone = rq->special;
@@ -1647,11 +1646,8 @@ requeued:
1647 BUG_ON(!irqs_disabled()); 1646 BUG_ON(!irqs_disabled());
1648 spin_lock(q->queue_lock); 1647 spin_lock(q->queue_lock);
1649 1648
1650plug_and_out: 1649delay_and_out:
1651 if (!elv_queue_empty(q)) 1650 blk_delay_queue(q, HZ / 10);
1652 /* Some requests still remain, retry later */
1653 blk_plug_device(q);
1654
1655out: 1651out:
1656 dm_table_put(map); 1652 dm_table_put(map);
1657 1653
@@ -1680,20 +1676,6 @@ static int dm_lld_busy(struct request_queue *q)
1680 return r; 1676 return r;
1681} 1677}
1682 1678
1683static void dm_unplug_all(struct request_queue *q)
1684{
1685 struct mapped_device *md = q->queuedata;
1686 struct dm_table *map = dm_get_live_table(md);
1687
1688 if (map) {
1689 if (dm_request_based(md))
1690 generic_unplug_device(q);
1691
1692 dm_table_unplug_all(map);
1693 dm_table_put(map);
1694 }
1695}
1696
1697static int dm_any_congested(void *congested_data, int bdi_bits) 1679static int dm_any_congested(void *congested_data, int bdi_bits)
1698{ 1680{
1699 int r = bdi_bits; 1681 int r = bdi_bits;
@@ -1817,7 +1799,6 @@ static void dm_init_md_queue(struct mapped_device *md)
1817 md->queue->backing_dev_info.congested_data = md; 1799 md->queue->backing_dev_info.congested_data = md;
1818 blk_queue_make_request(md->queue, dm_request); 1800 blk_queue_make_request(md->queue, dm_request);
1819 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1801 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1820 md->queue->unplug_fn = dm_unplug_all;
1821 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1802 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1822 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 1803 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1823} 1804}
@@ -2263,8 +2244,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2263 int r = 0; 2244 int r = 0;
2264 DECLARE_WAITQUEUE(wait, current); 2245 DECLARE_WAITQUEUE(wait, current);
2265 2246
2266 dm_unplug_all(md->queue);
2267
2268 add_wait_queue(&md->wait, &wait); 2247 add_wait_queue(&md->wait, &wait);
2269 2248
2270 while (1) { 2249 while (1) {
@@ -2539,7 +2518,6 @@ int dm_resume(struct mapped_device *md)
2539 2518
2540 clear_bit(DMF_SUSPENDED, &md->flags); 2519 clear_bit(DMF_SUSPENDED, &md->flags);
2541 2520
2542 dm_table_unplug_all(map);
2543 r = 0; 2521 r = 0;
2544out: 2522out:
2545 dm_table_put(map); 2523 dm_table_put(map);
@@ -2643,9 +2621,10 @@ int dm_noflush_suspending(struct dm_target *ti)
2643} 2621}
2644EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2622EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2645 2623
2646struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2624struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2647{ 2625{
2648 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2626 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2627 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
2649 2628
2650 if (!pools) 2629 if (!pools)
2651 return NULL; 2630 return NULL;
@@ -2662,13 +2641,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2662 if (!pools->tio_pool) 2641 if (!pools->tio_pool)
2663 goto free_io_pool_and_out; 2642 goto free_io_pool_and_out;
2664 2643
2665 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2644 pools->bs = bioset_create(pool_size, 0);
2666 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2667 if (!pools->bs) 2645 if (!pools->bs)
2668 goto free_tio_pool_and_out; 2646 goto free_tio_pool_and_out;
2669 2647
2648 if (integrity && bioset_integrity_create(pools->bs, pool_size))
2649 goto free_bioset_and_out;
2650
2670 return pools; 2651 return pools;
2671 2652
2653free_bioset_and_out:
2654 bioset_free(pools->bs);
2655
2672free_tio_pool_and_out: 2656free_tio_pool_and_out:
2673 mempool_destroy(pools->tio_pool); 2657 mempool_destroy(pools->tio_pool);
2674 2658
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 0c2dd5f4af76..1aaf16746da8 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -149,7 +149,7 @@ void dm_kcopyd_exit(void);
149/* 149/*
150 * Mempool operations 150 * Mempool operations
151 */ 151 */
152struct dm_md_mempools *dm_alloc_md_mempools(unsigned type); 152struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity);
153void dm_free_md_mempools(struct dm_md_mempools *pools); 153void dm_free_md_mempools(struct dm_md_mempools *pools);
154 154
155#endif 155#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 0ed7f6bc2a7f..abfb59a61ede 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
87 return maxsectors << 9; 87 return maxsectors << 9;
88} 88}
89 89
90static void linear_unplug(struct request_queue *q)
91{
92 mddev_t *mddev = q->queuedata;
93 linear_conf_t *conf;
94 int i;
95
96 rcu_read_lock();
97 conf = rcu_dereference(mddev->private);
98
99 for (i=0; i < mddev->raid_disks; i++) {
100 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
101 blk_unplug(r_queue);
102 }
103 rcu_read_unlock();
104}
105
106static int linear_congested(void *data, int bits) 90static int linear_congested(void *data, int bits)
107{ 91{
108 mddev_t *mddev = data; 92 mddev_t *mddev = data;
@@ -224,11 +208,9 @@ static int linear_run (mddev_t *mddev)
224 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 208 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
225 209
226 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 210 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
227 mddev->queue->unplug_fn = linear_unplug;
228 mddev->queue->backing_dev_info.congested_fn = linear_congested; 211 mddev->queue->backing_dev_info.congested_fn = linear_congested;
229 mddev->queue->backing_dev_info.congested_data = mddev; 212 mddev->queue->backing_dev_info.congested_data = mddev;
230 md_integrity_register(mddev); 213 return md_integrity_register(mddev);
231 return 0;
232} 214}
233 215
234static void free_conf(struct rcu_head *head) 216static void free_conf(struct rcu_head *head)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d5ad7723b172..06ecea751a39 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
780 bio->bi_end_io = super_written; 780 bio->bi_end_io = super_written;
781 781
782 atomic_inc(&mddev->pending_writes); 782 atomic_inc(&mddev->pending_writes);
783 submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, 783 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
784 bio);
785} 784}
786 785
787void md_super_wait(mddev_t *mddev) 786void md_super_wait(mddev_t *mddev)
@@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
809 struct completion event; 808 struct completion event;
810 int ret; 809 int ret;
811 810
812 rw |= REQ_SYNC | REQ_UNPLUG; 811 rw |= REQ_SYNC;
813 812
814 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 813 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
815 rdev->meta_bdev : rdev->bdev; 814 rdev->meta_bdev : rdev->bdev;
@@ -1804,8 +1803,12 @@ int md_integrity_register(mddev_t *mddev)
1804 mdname(mddev)); 1803 mdname(mddev));
1805 return -EINVAL; 1804 return -EINVAL;
1806 } 1805 }
1807 printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1806 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
1808 mdname(mddev)); 1807 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
1808 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
1809 mdname(mddev));
1810 return -EINVAL;
1811 }
1809 return 0; 1812 return 0;
1810} 1813}
1811EXPORT_SYMBOL(md_integrity_register); 1814EXPORT_SYMBOL(md_integrity_register);
@@ -4817,7 +4820,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4817 __md_stop_writes(mddev); 4820 __md_stop_writes(mddev);
4818 md_stop(mddev); 4821 md_stop(mddev);
4819 mddev->queue->merge_bvec_fn = NULL; 4822 mddev->queue->merge_bvec_fn = NULL;
4820 mddev->queue->unplug_fn = NULL;
4821 mddev->queue->backing_dev_info.congested_fn = NULL; 4823 mddev->queue->backing_dev_info.congested_fn = NULL;
4822 4824
4823 /* tell userspace to handle 'inactive' */ 4825 /* tell userspace to handle 'inactive' */
@@ -6692,8 +6694,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
6692 6694
6693void md_unplug(mddev_t *mddev) 6695void md_unplug(mddev_t *mddev)
6694{ 6696{
6695 if (mddev->queue)
6696 blk_unplug(mddev->queue);
6697 if (mddev->plug) 6697 if (mddev->plug)
6698 mddev->plug->unplug_fn(mddev->plug); 6698 mddev->plug->unplug_fn(mddev->plug);
6699} 6699}
@@ -6876,7 +6876,6 @@ void md_do_sync(mddev_t *mddev)
6876 >= mddev->resync_max - mddev->curr_resync_completed 6876 >= mddev->resync_max - mddev->curr_resync_completed
6877 )) { 6877 )) {
6878 /* time to update curr_resync_completed */ 6878 /* time to update curr_resync_completed */
6879 md_unplug(mddev);
6880 wait_event(mddev->recovery_wait, 6879 wait_event(mddev->recovery_wait,
6881 atomic_read(&mddev->recovery_active) == 0); 6880 atomic_read(&mddev->recovery_active) == 0);
6882 mddev->curr_resync_completed = j; 6881 mddev->curr_resync_completed = j;
@@ -6952,7 +6951,6 @@ void md_do_sync(mddev_t *mddev)
6952 * about not overloading the IO subsystem. (things like an 6951 * about not overloading the IO subsystem. (things like an
6953 * e2fsck being done on the RAID array should execute fast) 6952 * e2fsck being done on the RAID array should execute fast)
6954 */ 6953 */
6955 md_unplug(mddev);
6956 cond_resched(); 6954 cond_resched();
6957 6955
6958 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6956 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6971,8 +6969,6 @@ void md_do_sync(mddev_t *mddev)
6971 * this also signals 'finished resyncing' to md_stop 6969 * this also signals 'finished resyncing' to md_stop
6972 */ 6970 */
6973 out: 6971 out:
6974 md_unplug(mddev);
6975
6976 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6972 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6977 6973
6978 /* tell personality that we are finished */ 6974 /* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3a62d440e27b..c35890990985 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
106 rdev_dec_pending(rdev, conf->mddev); 106 rdev_dec_pending(rdev, conf->mddev);
107} 107}
108 108
109static void unplug_slaves(mddev_t *mddev)
110{
111 multipath_conf_t *conf = mddev->private;
112 int i;
113
114 rcu_read_lock();
115 for (i=0; i<mddev->raid_disks; i++) {
116 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
117 if (rdev && !test_bit(Faulty, &rdev->flags)
118 && atomic_read(&rdev->nr_pending)) {
119 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
120
121 atomic_inc(&rdev->nr_pending);
122 rcu_read_unlock();
123
124 blk_unplug(r_queue);
125
126 rdev_dec_pending(rdev, mddev);
127 rcu_read_lock();
128 }
129 }
130 rcu_read_unlock();
131}
132
133static void multipath_unplug(struct request_queue *q)
134{
135 unplug_slaves(q->queuedata);
136}
137
138
139static int multipath_make_request(mddev_t *mddev, struct bio * bio) 109static int multipath_make_request(mddev_t *mddev, struct bio * bio)
140{ 110{
141 multipath_conf_t *conf = mddev->private; 111 multipath_conf_t *conf = mddev->private;
@@ -345,7 +315,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
345 p->rdev = rdev; 315 p->rdev = rdev;
346 goto abort; 316 goto abort;
347 } 317 }
348 md_integrity_register(mddev); 318 err = md_integrity_register(mddev);
349 } 319 }
350abort: 320abort:
351 321
@@ -517,10 +487,12 @@ static int multipath_run (mddev_t *mddev)
517 */ 487 */
518 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 488 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
519 489
520 mddev->queue->unplug_fn = multipath_unplug;
521 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 490 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
522 mddev->queue->backing_dev_info.congested_data = mddev; 491 mddev->queue->backing_dev_info.congested_data = mddev;
523 md_integrity_register(mddev); 492
493 if (md_integrity_register(mddev))
494 goto out_free_conf;
495
524 return 0; 496 return 0;
525 497
526out_free_conf: 498out_free_conf:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c0ac457f1218..e86bf3682e1e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
25#include "raid0.h" 25#include "raid0.h"
26#include "raid5.h" 26#include "raid5.h"
27 27
28static void raid0_unplug(struct request_queue *q)
29{
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev->private;
32 mdk_rdev_t **devlist = conf->devlist;
33 int raid_disks = conf->strip_zone[0].nb_dev;
34 int i;
35
36 for (i=0; i < raid_disks; i++) {
37 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38
39 blk_unplug(r_queue);
40 }
41}
42
43static int raid0_congested(void *data, int bits) 28static int raid0_congested(void *data, int bits)
44{ 29{
45 mddev_t *mddev = data; 30 mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
272 mdname(mddev), 257 mdname(mddev),
273 (unsigned long long)smallest->sectors); 258 (unsigned long long)smallest->sectors);
274 } 259 }
275 mddev->queue->unplug_fn = raid0_unplug;
276 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 260 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
277 mddev->queue->backing_dev_info.congested_data = mddev; 261 mddev->queue->backing_dev_info.congested_data = mddev;
278 262
@@ -395,8 +379,7 @@ static int raid0_run(mddev_t *mddev)
395 379
396 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 380 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
397 dump_zones(mddev); 381 dump_zones(mddev);
398 md_integrity_register(mddev); 382 return md_integrity_register(mddev);
399 return 0;
400} 383}
401 384
402static int raid0_stop(mddev_t *mddev) 385static int raid0_stop(mddev_t *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 06cd712807d0..c2a21ae56d97 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,23 +52,16 @@
52#define NR_RAID1_BIOS 256 52#define NR_RAID1_BIOS 256
53 53
54 54
55static void unplug_slaves(mddev_t *mddev);
56
57static void allow_barrier(conf_t *conf); 55static void allow_barrier(conf_t *conf);
58static void lower_barrier(conf_t *conf); 56static void lower_barrier(conf_t *conf);
59 57
60static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 58static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
61{ 59{
62 struct pool_info *pi = data; 60 struct pool_info *pi = data;
63 r1bio_t *r1_bio;
64 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 61 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
65 62
66 /* allocate a r1bio with room for raid_disks entries in the bios array */ 63 /* allocate a r1bio with room for raid_disks entries in the bios array */
67 r1_bio = kzalloc(size, gfp_flags); 64 return kzalloc(size, gfp_flags);
68 if (!r1_bio && pi->mddev)
69 unplug_slaves(pi->mddev);
70
71 return r1_bio;
72} 65}
73 66
74static void r1bio_pool_free(void *r1_bio, void *data) 67static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
91 int i, j; 84 int i, j;
92 85
93 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 86 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
94 if (!r1_bio) { 87 if (!r1_bio)
95 unplug_slaves(pi->mddev);
96 return NULL; 88 return NULL;
97 }
98 89
99 /* 90 /*
100 * Allocate bios : 1 for reading, n-1 for writing 91 * Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
520 return new_disk; 511 return new_disk;
521} 512}
522 513
523static void unplug_slaves(mddev_t *mddev)
524{
525 conf_t *conf = mddev->private;
526 int i;
527
528 rcu_read_lock();
529 for (i=0; i<mddev->raid_disks; i++) {
530 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
531 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
532 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
533
534 atomic_inc(&rdev->nr_pending);
535 rcu_read_unlock();
536
537 blk_unplug(r_queue);
538
539 rdev_dec_pending(rdev, mddev);
540 rcu_read_lock();
541 }
542 }
543 rcu_read_unlock();
544}
545
546static void raid1_unplug(struct request_queue *q)
547{
548 mddev_t *mddev = q->queuedata;
549
550 unplug_slaves(mddev);
551 md_wakeup_thread(mddev->thread);
552}
553
554static int raid1_congested(void *data, int bits) 514static int raid1_congested(void *data, int bits)
555{ 515{
556 mddev_t *mddev = data; 516 mddev_t *mddev = data;
@@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits)
580} 540}
581 541
582 542
583static int flush_pending_writes(conf_t *conf) 543static void flush_pending_writes(conf_t *conf)
584{ 544{
585 /* Any writes that have been queued but are awaiting 545 /* Any writes that have been queued but are awaiting
586 * bitmap updates get flushed here. 546 * bitmap updates get flushed here.
587 * We return 1 if any requests were actually submitted.
588 */ 547 */
589 int rv = 0;
590
591 spin_lock_irq(&conf->device_lock); 548 spin_lock_irq(&conf->device_lock);
592 549
593 if (conf->pending_bio_list.head) { 550 if (conf->pending_bio_list.head) {
594 struct bio *bio; 551 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 552 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
600 spin_unlock_irq(&conf->device_lock); 553 spin_unlock_irq(&conf->device_lock);
601 /* flush any pending bitmap writes to 554 /* flush any pending bitmap writes to
602 * disk before proceeding w/ I/O */ 555 * disk before proceeding w/ I/O */
@@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
608 generic_make_request(bio); 561 generic_make_request(bio);
609 bio = next; 562 bio = next;
610 } 563 }
611 rv = 1;
612 } else 564 } else
613 spin_unlock_irq(&conf->device_lock); 565 spin_unlock_irq(&conf->device_lock);
614 return rv; 566}
567
568static void md_kick_device(mddev_t *mddev)
569{
570 blk_flush_plug(current);
571 md_wakeup_thread(mddev->thread);
615} 572}
616 573
617/* Barriers.... 574/* Barriers....
@@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf)
643 600
644 /* Wait until no block IO is waiting */ 601 /* Wait until no block IO is waiting */
645 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
646 conf->resync_lock, 603 conf->resync_lock, md_kick_device(conf->mddev));
647 raid1_unplug(conf->mddev->queue));
648 604
649 /* block any new IO from starting */ 605 /* block any new IO from starting */
650 conf->barrier++; 606 conf->barrier++;
@@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf)
652 /* Now wait for all pending IO to complete */ 608 /* Now wait for all pending IO to complete */
653 wait_event_lock_irq(conf->wait_barrier, 609 wait_event_lock_irq(conf->wait_barrier,
654 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
655 conf->resync_lock, 611 conf->resync_lock, md_kick_device(conf->mddev));
656 raid1_unplug(conf->mddev->queue));
657 612
658 spin_unlock_irq(&conf->resync_lock); 613 spin_unlock_irq(&conf->resync_lock);
659} 614}
@@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf)
675 conf->nr_waiting++; 630 conf->nr_waiting++;
676 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
677 conf->resync_lock, 632 conf->resync_lock,
678 raid1_unplug(conf->mddev->queue)); 633 md_kick_device(conf->mddev));
679 conf->nr_waiting--; 634 conf->nr_waiting--;
680 } 635 }
681 conf->nr_pending++; 636 conf->nr_pending++;
@@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf)
712 conf->nr_pending == conf->nr_queued+1, 667 conf->nr_pending == conf->nr_queued+1,
713 conf->resync_lock, 668 conf->resync_lock,
714 ({ flush_pending_writes(conf); 669 ({ flush_pending_writes(conf);
715 raid1_unplug(conf->mddev->queue); })); 670 md_kick_device(conf->mddev); }));
716 spin_unlock_irq(&conf->resync_lock); 671 spin_unlock_irq(&conf->resync_lock);
717} 672}
718static void unfreeze_array(conf_t *conf) 673static void unfreeze_array(conf_t *conf)
@@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
962 atomic_inc(&r1_bio->remaining); 917 atomic_inc(&r1_bio->remaining);
963 spin_lock_irqsave(&conf->device_lock, flags); 918 spin_lock_irqsave(&conf->device_lock, flags);
964 bio_list_add(&conf->pending_bio_list, mbio); 919 bio_list_add(&conf->pending_bio_list, mbio);
965 blk_plug_device_unlocked(mddev->queue);
966 spin_unlock_irqrestore(&conf->device_lock, flags); 920 spin_unlock_irqrestore(&conf->device_lock, flags);
967 } 921 }
968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 922 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 /* In case raid1d snuck in to freeze_array */ 925 /* In case raid1d snuck in to freeze_array */
972 wake_up(&conf->wait_barrier); 926 wake_up(&conf->wait_barrier);
973 927
974 if (do_sync) 928 if (do_sync || !bitmap)
975 md_wakeup_thread(mddev->thread); 929 md_wakeup_thread(mddev->thread);
976 930
977 return 0; 931 return 0;
@@ -1178,7 +1132,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1178 p->rdev = rdev; 1132 p->rdev = rdev;
1179 goto abort; 1133 goto abort;
1180 } 1134 }
1181 md_integrity_register(mddev); 1135 err = md_integrity_register(mddev);
1182 } 1136 }
1183abort: 1137abort:
1184 1138
@@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
1561 unsigned long flags; 1515 unsigned long flags;
1562 conf_t *conf = mddev->private; 1516 conf_t *conf = mddev->private;
1563 struct list_head *head = &conf->retry_list; 1517 struct list_head *head = &conf->retry_list;
1564 int unplug=0;
1565 mdk_rdev_t *rdev; 1518 mdk_rdev_t *rdev;
1566 1519
1567 md_check_recovery(mddev); 1520 md_check_recovery(mddev);
@@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
1569 for (;;) { 1522 for (;;) {
1570 char b[BDEVNAME_SIZE]; 1523 char b[BDEVNAME_SIZE];
1571 1524
1572 unplug += flush_pending_writes(conf); 1525 flush_pending_writes(conf);
1573 1526
1574 spin_lock_irqsave(&conf->device_lock, flags); 1527 spin_lock_irqsave(&conf->device_lock, flags);
1575 if (list_empty(head)) { 1528 if (list_empty(head)) {
@@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
1583 1536
1584 mddev = r1_bio->mddev; 1537 mddev = r1_bio->mddev;
1585 conf = mddev->private; 1538 conf = mddev->private;
1586 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1539 if (test_bit(R1BIO_IsSync, &r1_bio->state))
1587 sync_request_write(mddev, r1_bio); 1540 sync_request_write(mddev, r1_bio);
1588 unplug = 1; 1541 else {
1589 } else {
1590 int disk; 1542 int disk;
1591 1543
1592 /* we got a read error. Maybe the drive is bad. Maybe just 1544 /* we got a read error. Maybe the drive is bad. Maybe just
@@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
1636 bio->bi_end_io = raid1_end_read_request; 1588 bio->bi_end_io = raid1_end_read_request;
1637 bio->bi_rw = READ | do_sync; 1589 bio->bi_rw = READ | do_sync;
1638 bio->bi_private = r1_bio; 1590 bio->bi_private = r1_bio;
1639 unplug = 1;
1640 generic_make_request(bio); 1591 generic_make_request(bio);
1641 } 1592 }
1642 } 1593 }
1643 cond_resched(); 1594 cond_resched();
1644 } 1595 }
1645 if (unplug)
1646 unplug_slaves(mddev);
1647} 1596}
1648 1597
1649 1598
@@ -2066,11 +2015,9 @@ static int run(mddev_t *mddev)
2066 2015
2067 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2016 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2068 2017
2069 mddev->queue->unplug_fn = raid1_unplug;
2070 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2018 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2071 mddev->queue->backing_dev_info.congested_data = mddev; 2019 mddev->queue->backing_dev_info.congested_data = mddev;
2072 md_integrity_register(mddev); 2020 return md_integrity_register(mddev);
2073 return 0;
2074} 2021}
2075 2022
2076static int stop(mddev_t *mddev) 2023static int stop(mddev_t *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 747d061d8e05..f7b62370b374 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
57 */ 57 */
58#define NR_RAID10_BIOS 256 58#define NR_RAID10_BIOS 256
59 59
60static void unplug_slaves(mddev_t *mddev);
61
62static void allow_barrier(conf_t *conf); 60static void allow_barrier(conf_t *conf);
63static void lower_barrier(conf_t *conf); 61static void lower_barrier(conf_t *conf);
64 62
65static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 63static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
66{ 64{
67 conf_t *conf = data; 65 conf_t *conf = data;
68 r10bio_t *r10_bio;
69 int size = offsetof(struct r10bio_s, devs[conf->copies]); 66 int size = offsetof(struct r10bio_s, devs[conf->copies]);
70 67
71 /* allocate a r10bio with room for raid_disks entries in the bios array */ 68 /* allocate a r10bio with room for raid_disks entries in the bios array */
72 r10_bio = kzalloc(size, gfp_flags); 69 return kzalloc(size, gfp_flags);
73 if (!r10_bio && conf->mddev)
74 unplug_slaves(conf->mddev);
75
76 return r10_bio;
77} 70}
78 71
79static void r10bio_pool_free(void *r10_bio, void *data) 72static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
106 int nalloc; 99 int nalloc;
107 100
108 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 101 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
109 if (!r10_bio) { 102 if (!r10_bio)
110 unplug_slaves(conf->mddev);
111 return NULL; 103 return NULL;
112 }
113 104
114 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 105 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
115 nalloc = conf->copies; /* resync */ 106 nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
597 return disk; 588 return disk;
598} 589}
599 590
600static void unplug_slaves(mddev_t *mddev)
601{
602 conf_t *conf = mddev->private;
603 int i;
604
605 rcu_read_lock();
606 for (i=0; i < conf->raid_disks; i++) {
607 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
608 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
609 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
610
611 atomic_inc(&rdev->nr_pending);
612 rcu_read_unlock();
613
614 blk_unplug(r_queue);
615
616 rdev_dec_pending(rdev, mddev);
617 rcu_read_lock();
618 }
619 }
620 rcu_read_unlock();
621}
622
623static void raid10_unplug(struct request_queue *q)
624{
625 mddev_t *mddev = q->queuedata;
626
627 unplug_slaves(q->queuedata);
628 md_wakeup_thread(mddev->thread);
629}
630
631static int raid10_congested(void *data, int bits) 591static int raid10_congested(void *data, int bits)
632{ 592{
633 mddev_t *mddev = data; 593 mddev_t *mddev = data;
@@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits)
649 return ret; 609 return ret;
650} 610}
651 611
652static int flush_pending_writes(conf_t *conf) 612static void flush_pending_writes(conf_t *conf)
653{ 613{
654 /* Any writes that have been queued but are awaiting 614 /* Any writes that have been queued but are awaiting
655 * bitmap updates get flushed here. 615 * bitmap updates get flushed here.
656 * We return 1 if any requests were actually submitted.
657 */ 616 */
658 int rv = 0;
659
660 spin_lock_irq(&conf->device_lock); 617 spin_lock_irq(&conf->device_lock);
661 618
662 if (conf->pending_bio_list.head) { 619 if (conf->pending_bio_list.head) {
663 struct bio *bio; 620 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 621 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
669 spin_unlock_irq(&conf->device_lock); 622 spin_unlock_irq(&conf->device_lock);
670 /* flush any pending bitmap writes to disk 623 /* flush any pending bitmap writes to disk
671 * before proceeding w/ I/O */ 624 * before proceeding w/ I/O */
@@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
677 generic_make_request(bio); 630 generic_make_request(bio);
678 bio = next; 631 bio = next;
679 } 632 }
680 rv = 1;
681 } else 633 } else
682 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
683 return rv;
684} 635}
636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
685/* Barriers.... 643/* Barriers....
686 * Sometimes we need to suspend IO while we do something else, 644 * Sometimes we need to suspend IO while we do something else,
687 * either some resync/recovery, or reconfigure the array. 645 * either some resync/recovery, or reconfigure the array.
@@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
711 669
712 /* Wait until no block IO is waiting (unless 'force') */ 670 /* Wait until no block IO is waiting (unless 'force') */
713 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
714 conf->resync_lock, 672 conf->resync_lock, md_kick_device(conf->mddev));
715 raid10_unplug(conf->mddev->queue));
716 673
717 /* block any new IO from starting */ 674 /* block any new IO from starting */
718 conf->barrier++; 675 conf->barrier++;
@@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
720 /* No wait for all pending IO to complete */ 677 /* No wait for all pending IO to complete */
721 wait_event_lock_irq(conf->wait_barrier, 678 wait_event_lock_irq(conf->wait_barrier,
722 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
723 conf->resync_lock, 680 conf->resync_lock, md_kick_device(conf->mddev));
724 raid10_unplug(conf->mddev->queue));
725 681
726 spin_unlock_irq(&conf->resync_lock); 682 spin_unlock_irq(&conf->resync_lock);
727} 683}
@@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf)
742 conf->nr_waiting++; 698 conf->nr_waiting++;
743 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
744 conf->resync_lock, 700 conf->resync_lock,
745 raid10_unplug(conf->mddev->queue)); 701 md_kick_device(conf->mddev));
746 conf->nr_waiting--; 702 conf->nr_waiting--;
747 } 703 }
748 conf->nr_pending++; 704 conf->nr_pending++;
@@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf)
779 conf->nr_pending == conf->nr_queued+1, 735 conf->nr_pending == conf->nr_queued+1,
780 conf->resync_lock, 736 conf->resync_lock,
781 ({ flush_pending_writes(conf); 737 ({ flush_pending_writes(conf);
782 raid10_unplug(conf->mddev->queue); })); 738 md_kick_device(conf->mddev); }));
783 spin_unlock_irq(&conf->resync_lock); 739 spin_unlock_irq(&conf->resync_lock);
784} 740}
785 741
@@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
974 atomic_inc(&r10_bio->remaining); 930 atomic_inc(&r10_bio->remaining);
975 spin_lock_irqsave(&conf->device_lock, flags); 931 spin_lock_irqsave(&conf->device_lock, flags);
976 bio_list_add(&conf->pending_bio_list, mbio); 932 bio_list_add(&conf->pending_bio_list, mbio);
977 blk_plug_device_unlocked(mddev->queue);
978 spin_unlock_irqrestore(&conf->device_lock, flags); 933 spin_unlock_irqrestore(&conf->device_lock, flags);
979 } 934 }
980 935
@@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
991 /* In case raid10d snuck in to freeze_array */ 946 /* In case raid10d snuck in to freeze_array */
992 wake_up(&conf->wait_barrier); 947 wake_up(&conf->wait_barrier);
993 948
994 if (do_sync) 949 if (do_sync || !mddev->bitmap)
995 md_wakeup_thread(mddev->thread); 950 md_wakeup_thread(mddev->thread);
996 951
997 return 0; 952 return 0;
@@ -1233,7 +1188,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
1233 p->rdev = rdev; 1188 p->rdev = rdev;
1234 goto abort; 1189 goto abort;
1235 } 1190 }
1236 md_integrity_register(mddev); 1191 err = md_integrity_register(mddev);
1237 } 1192 }
1238abort: 1193abort:
1239 1194
@@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
1684 unsigned long flags; 1639 unsigned long flags;
1685 conf_t *conf = mddev->private; 1640 conf_t *conf = mddev->private;
1686 struct list_head *head = &conf->retry_list; 1641 struct list_head *head = &conf->retry_list;
1687 int unplug=0;
1688 mdk_rdev_t *rdev; 1642 mdk_rdev_t *rdev;
1689 1643
1690 md_check_recovery(mddev); 1644 md_check_recovery(mddev);
@@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
1692 for (;;) { 1646 for (;;) {
1693 char b[BDEVNAME_SIZE]; 1647 char b[BDEVNAME_SIZE];
1694 1648
1695 unplug += flush_pending_writes(conf); 1649 flush_pending_writes(conf);
1696 1650
1697 spin_lock_irqsave(&conf->device_lock, flags); 1651 spin_lock_irqsave(&conf->device_lock, flags);
1698 if (list_empty(head)) { 1652 if (list_empty(head)) {
@@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
1706 1660
1707 mddev = r10_bio->mddev; 1661 mddev = r10_bio->mddev;
1708 conf = mddev->private; 1662 conf = mddev->private;
1709 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1663 if (test_bit(R10BIO_IsSync, &r10_bio->state))
1710 sync_request_write(mddev, r10_bio); 1664 sync_request_write(mddev, r10_bio);
1711 unplug = 1; 1665 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1712 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1713 recovery_request_write(mddev, r10_bio); 1666 recovery_request_write(mddev, r10_bio);
1714 unplug = 1; 1667 else {
1715 } else {
1716 int mirror; 1668 int mirror;
1717 /* we got a read error. Maybe the drive is bad. Maybe just 1669 /* we got a read error. Maybe the drive is bad. Maybe just
1718 * the block and we can fix it. 1670 * the block and we can fix it.
@@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
1759 bio->bi_rw = READ | do_sync; 1711 bio->bi_rw = READ | do_sync;
1760 bio->bi_private = r10_bio; 1712 bio->bi_private = r10_bio;
1761 bio->bi_end_io = raid10_end_read_request; 1713 bio->bi_end_io = raid10_end_read_request;
1762 unplug = 1;
1763 generic_make_request(bio); 1714 generic_make_request(bio);
1764 } 1715 }
1765 } 1716 }
1766 cond_resched(); 1717 cond_resched();
1767 } 1718 }
1768 if (unplug)
1769 unplug_slaves(mddev);
1770} 1719}
1771 1720
1772 1721
@@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev)
2377 md_set_array_sectors(mddev, size); 2326 md_set_array_sectors(mddev, size);
2378 mddev->resync_max_sectors = size; 2327 mddev->resync_max_sectors = size;
2379 2328
2380 mddev->queue->unplug_fn = raid10_unplug;
2381 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2329 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2382 mddev->queue->backing_dev_info.congested_data = mddev; 2330 mddev->queue->backing_dev_info.congested_data = mddev;
2383 2331
@@ -2395,7 +2343,10 @@ static int run(mddev_t *mddev)
2395 2343
2396 if (conf->near_copies < conf->raid_disks) 2344 if (conf->near_copies < conf->raid_disks)
2397 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2345 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2398 md_integrity_register(mddev); 2346
2347 if (md_integrity_register(mddev))
2348 goto out_free_conf;
2349
2399 return 0; 2350 return 0;
2400 2351
2401out_free_conf: 2352out_free_conf:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78536fdbd87f..e867ee42b152 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
433 return 0; 433 return 0;
434} 434}
435 435
436static void unplug_slaves(mddev_t *mddev);
437
438static struct stripe_head * 436static struct stripe_head *
439get_active_stripe(raid5_conf_t *conf, sector_t sector, 437get_active_stripe(raid5_conf_t *conf, sector_t sector,
440 int previous, int noblock, int noquiesce) 438 int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
463 < (conf->max_nr_stripes *3/4) 461 < (conf->max_nr_stripes *3/4)
464 || !conf->inactive_blocked), 462 || !conf->inactive_blocked),
465 conf->device_lock, 463 conf->device_lock,
466 md_raid5_unplug_device(conf) 464 md_raid5_kick_device(conf));
467 );
468 conf->inactive_blocked = 0; 465 conf->inactive_blocked = 0;
469 } else 466 } else
470 init_stripe(sh, sector, previous); 467 init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1473 wait_event_lock_irq(conf->wait_for_stripe, 1470 wait_event_lock_irq(conf->wait_for_stripe,
1474 !list_empty(&conf->inactive_list), 1471 !list_empty(&conf->inactive_list),
1475 conf->device_lock, 1472 conf->device_lock,
1476 unplug_slaves(conf->mddev) 1473 blk_flush_plug(current));
1477 );
1478 osh = get_free_stripe(conf); 1474 osh = get_free_stripe(conf);
1479 spin_unlock_irq(&conf->device_lock); 1475 spin_unlock_irq(&conf->device_lock);
1480 atomic_set(&nsh->count, 1); 1476 atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
3645 } 3641 }
3646} 3642}
3647 3643
3648static void unplug_slaves(mddev_t *mddev) 3644void md_raid5_kick_device(raid5_conf_t *conf)
3649{ 3645{
3650 raid5_conf_t *conf = mddev->private; 3646 blk_flush_plug(current);
3651 int i; 3647 raid5_activate_delayed(conf);
3652 int devs = max(conf->raid_disks, conf->previous_raid_disks);
3653
3654 rcu_read_lock();
3655 for (i = 0; i < devs; i++) {
3656 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3657 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3658 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3659
3660 atomic_inc(&rdev->nr_pending);
3661 rcu_read_unlock();
3662
3663 blk_unplug(r_queue);
3664
3665 rdev_dec_pending(rdev, mddev);
3666 rcu_read_lock();
3667 }
3668 }
3669 rcu_read_unlock();
3670}
3671
3672void md_raid5_unplug_device(raid5_conf_t *conf)
3673{
3674 unsigned long flags;
3675
3676 spin_lock_irqsave(&conf->device_lock, flags);
3677
3678 if (plugger_remove_plug(&conf->plug)) {
3679 conf->seq_flush++;
3680 raid5_activate_delayed(conf);
3681 }
3682 md_wakeup_thread(conf->mddev->thread); 3648 md_wakeup_thread(conf->mddev->thread);
3683
3684 spin_unlock_irqrestore(&conf->device_lock, flags);
3685
3686 unplug_slaves(conf->mddev);
3687} 3649}
3688EXPORT_SYMBOL_GPL(md_raid5_unplug_device); 3650EXPORT_SYMBOL_GPL(md_raid5_kick_device);
3689 3651
3690static void raid5_unplug(struct plug_handle *plug) 3652static void raid5_unplug(struct plug_handle *plug)
3691{ 3653{
3692 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); 3654 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3693 md_raid5_unplug_device(conf);
3694}
3695 3655
3696static void raid5_unplug_queue(struct request_queue *q) 3656 md_raid5_kick_device(conf);
3697{
3698 mddev_t *mddev = q->queuedata;
3699 md_raid5_unplug_device(mddev->private);
3700} 3657}
3701 3658
3702int md_raid5_congested(mddev_t *mddev, int bits) 3659int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4100 * add failed due to overlap. Flush everything 4057 * add failed due to overlap. Flush everything
4101 * and wait a while 4058 * and wait a while
4102 */ 4059 */
4103 md_raid5_unplug_device(conf); 4060 md_raid5_kick_device(conf);
4104 release_stripe(sh); 4061 release_stripe(sh);
4105 schedule(); 4062 schedule();
4106 goto retry; 4063 goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4365 4322
4366 if (sector_nr >= max_sector) { 4323 if (sector_nr >= max_sector) {
4367 /* just being told to finish up .. nothing much to do */ 4324 /* just being told to finish up .. nothing much to do */
4368 unplug_slaves(mddev);
4369 4325
4370 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4326 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4371 end_reshape(conf); 4327 end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
4569 spin_unlock_irq(&conf->device_lock); 4525 spin_unlock_irq(&conf->device_lock);
4570 4526
4571 async_tx_issue_pending_all(); 4527 async_tx_issue_pending_all();
4572 unplug_slaves(mddev);
4573 4528
4574 pr_debug("--- raid5d inactive\n"); 4529 pr_debug("--- raid5d inactive\n");
4575} 4530}
@@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev)
5204 5159
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5160 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5161 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->unplug_fn = raid5_unplug_queue; 5162 mddev->queue->queue_lock = &conf->device_lock;
5208 5163
5209 chunk_size = mddev->chunk_sectors << 9; 5164 chunk_size = mddev->chunk_sectors << 9;
5210 blk_queue_io_min(mddev->queue, chunk_size); 5165 blk_queue_io_min(mddev->queue, chunk_size);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2ace0582b409..8d563a4f022a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
503} 503}
504 504
505extern int md_raid5_congested(mddev_t *mddev, int bits); 505extern int md_raid5_congested(mddev_t *mddev, int bits);
506extern void md_raid5_unplug_device(raid5_conf_t *conf); 506extern void md_raid5_kick_device(raid5_conf_t *conf);
507extern int raid5_set_cache_size(mddev_t *mddev, int size); 507extern int raid5_set_cache_size(mddev_t *mddev, int size);
508#endif 508#endif
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index ae7cad185898..47ec5bc0ed21 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -695,20 +695,22 @@ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
695}; 695};
696 696
697/** 697/**
698 * i2o_block_media_changed - Have we seen a media change? 698 * i2o_block_check_events - Have we seen a media change?
699 * @disk: gendisk which should be verified 699 * @disk: gendisk which should be verified
700 * @clearing: events being cleared
700 * 701 *
701 * Verifies if the media has changed. 702 * Verifies if the media has changed.
702 * 703 *
703 * Returns 1 if the media was changed or 0 otherwise. 704 * Returns 1 if the media was changed or 0 otherwise.
704 */ 705 */
705static int i2o_block_media_changed(struct gendisk *disk) 706static unsigned int i2o_block_check_events(struct gendisk *disk,
707 unsigned int clearing)
706{ 708{
707 struct i2o_block_device *p = disk->private_data; 709 struct i2o_block_device *p = disk->private_data;
708 710
709 if (p->media_change_flag) { 711 if (p->media_change_flag) {
710 p->media_change_flag = 0; 712 p->media_change_flag = 0;
711 return 1; 713 return DISK_EVENT_MEDIA_CHANGE;
712 } 714 }
713 return 0; 715 return 0;
714} 716}
@@ -895,11 +897,7 @@ static void i2o_block_request_fn(struct request_queue *q)
895{ 897{
896 struct request *req; 898 struct request *req;
897 899
898 while (!blk_queue_plugged(q)) { 900 while ((req = blk_peek_request(q)) != NULL) {
899 req = blk_peek_request(q);
900 if (!req)
901 break;
902
903 if (req->cmd_type == REQ_TYPE_FS) { 901 if (req->cmd_type == REQ_TYPE_FS) {
904 struct i2o_block_delayed_request *dreq; 902 struct i2o_block_delayed_request *dreq;
905 struct i2o_block_request *ireq = req->special; 903 struct i2o_block_request *ireq = req->special;
@@ -950,7 +948,7 @@ static const struct block_device_operations i2o_block_fops = {
950 .ioctl = i2o_block_ioctl, 948 .ioctl = i2o_block_ioctl,
951 .compat_ioctl = i2o_block_ioctl, 949 .compat_ioctl = i2o_block_ioctl,
952 .getgeo = i2o_block_getgeo, 950 .getgeo = i2o_block_getgeo,
953 .media_changed = i2o_block_media_changed 951 .check_events = i2o_block_check_events,
954}; 952};
955 953
956/** 954/**
@@ -1002,6 +1000,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
1002 gd->major = I2O_MAJOR; 1000 gd->major = I2O_MAJOR;
1003 gd->queue = queue; 1001 gd->queue = queue;
1004 gd->fops = &i2o_block_fops; 1002 gd->fops = &i2o_block_fops;
1003 gd->events = DISK_EVENT_MEDIA_CHANGE;
1005 gd->private_data = dev; 1004 gd->private_data = dev;
1006 1005
1007 dev->gd = gd; 1006 dev->gd = gd;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4e42d030e097..2ae727568df9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
55 55
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 if (!blk_queue_plugged(q)) 58 req = blk_fetch_request(q);
59 req = blk_fetch_request(q);
60 mq->req = req; 59 mq->req = req;
61 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
62 61
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 794bfd962266..4d2df2f76ea0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1917 return; 1917 return;
1918 } 1918 }
1919 /* Now we try to fetch requests from the request queue */ 1919 /* Now we try to fetch requests from the request queue */
1920 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1920 while ((req = blk_peek_request(queue))) {
1921 if (basedev->features & DASD_FEATURE_READONLY && 1921 if (basedev->features & DASD_FEATURE_READONLY &&
1922 rq_data_dir(req) == WRITE) { 1922 rq_data_dir(req) == WRITE) {
1923 DBF_DEV_EVENT(DBF_ERR, basedev, 1923 DBF_DEV_EVENT(DBF_ERR, basedev,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 55d2d0f4eabc..83cea9a55e2f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -48,14 +48,14 @@
48static DEFINE_MUTEX(tape_block_mutex); 48static DEFINE_MUTEX(tape_block_mutex);
49static int tapeblock_open(struct block_device *, fmode_t); 49static int tapeblock_open(struct block_device *, fmode_t);
50static int tapeblock_release(struct gendisk *, fmode_t); 50static int tapeblock_release(struct gendisk *, fmode_t);
51static int tapeblock_medium_changed(struct gendisk *); 51static unsigned int tapeblock_check_events(struct gendisk *, unsigned int);
52static int tapeblock_revalidate_disk(struct gendisk *); 52static int tapeblock_revalidate_disk(struct gendisk *);
53 53
54static const struct block_device_operations tapeblock_fops = { 54static const struct block_device_operations tapeblock_fops = {
55 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
56 .open = tapeblock_open, 56 .open = tapeblock_open,
57 .release = tapeblock_release, 57 .release = tapeblock_release,
58 .media_changed = tapeblock_medium_changed, 58 .check_events = tapeblock_check_events,
59 .revalidate_disk = tapeblock_revalidate_disk, 59 .revalidate_disk = tapeblock_revalidate_disk,
60}; 60};
61 61
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
161 161
162 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
163 while ( 163 while (
164 !blk_queue_plugged(queue) &&
165 blk_peek_request(queue) && 164 blk_peek_request(queue) &&
166 nr_queued < TAPEBLOCK_MIN_REQUEUE 165 nr_queued < TAPEBLOCK_MIN_REQUEUE
167 ) { 166 ) {
@@ -237,6 +236,7 @@ tapeblock_setup_device(struct tape_device * device)
237 disk->major = tapeblock_major; 236 disk->major = tapeblock_major;
238 disk->first_minor = device->first_minor; 237 disk->first_minor = device->first_minor;
239 disk->fops = &tapeblock_fops; 238 disk->fops = &tapeblock_fops;
239 disk->events = DISK_EVENT_MEDIA_CHANGE;
240 disk->private_data = tape_get_device(device); 240 disk->private_data = tape_get_device(device);
241 disk->queue = blkdat->request_queue; 241 disk->queue = blkdat->request_queue;
242 set_capacity(disk, 0); 242 set_capacity(disk, 0);
@@ -340,8 +340,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
340 return 0; 340 return 0;
341} 341}
342 342
343static int 343static unsigned int
344tapeblock_medium_changed(struct gendisk *disk) 344tapeblock_check_events(struct gendisk *disk, unsigned int clearing)
345{ 345{
346 struct tape_device *device; 346 struct tape_device *device;
347 347
@@ -349,7 +349,7 @@ tapeblock_medium_changed(struct gendisk *disk)
349 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", 349 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
350 device, device->blk_data.medium_changed); 350 device, device->blk_data.medium_changed);
351 351
352 return device->blk_data.medium_changed; 352 return device->blk_data.medium_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
353} 353}
354 354
355/* 355/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2d63c8ad1442..6d5c7ff43f5b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
67 67
68struct kmem_cache *scsi_sdb_cache; 68struct kmem_cache *scsi_sdb_cache;
69 69
70/*
71 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
72 * not change behaviour from the previous unplug mechanism, experimentation
73 * may prove this needs changing.
74 */
75#define SCSI_QUEUE_DELAY 3
76
70static void scsi_run_queue(struct request_queue *q); 77static void scsi_run_queue(struct request_queue *q);
71 78
72/* 79/*
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
149 /* 156 /*
150 * Requeue this command. It will go before all other commands 157 * Requeue this command. It will go before all other commands
151 * that are already in the queue. 158 * that are already in the queue.
152 * 159 */
153 * NOTE: there is magic here about the way the queue is plugged if
154 * we have no outstanding commands.
155 *
156 * Although we *don't* plug the queue, we call the request
157 * function. The SCSI request function detects the blocked condition
158 * and plugs the queue appropriately.
159 */
160 spin_lock_irqsave(q->queue_lock, flags); 160 spin_lock_irqsave(q->queue_lock, flags);
161 blk_requeue_request(q, cmd->request); 161 blk_requeue_request(q, cmd->request);
162 spin_unlock_irqrestore(q->queue_lock, flags); 162 spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1226,11 +1226,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1226 case BLKPREP_DEFER: 1226 case BLKPREP_DEFER:
1227 /* 1227 /*
1228 * If we defer, the blk_peek_request() returns NULL, but the 1228 * If we defer, the blk_peek_request() returns NULL, but the
1229 * queue must be restarted, so we plug here if no returning 1229 * queue must be restarted, so we schedule a callback to happen
1230 * command will automatically do that. 1230 * shortly.
1231 */ 1231 */
1232 if (sdev->device_busy == 0) 1232 if (sdev->device_busy == 0)
1233 blk_plug_device(q); 1233 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1234 break; 1234 break;
1235 default: 1235 default:
1236 req->cmd_flags |= REQ_DONTPREP; 1236 req->cmd_flags |= REQ_DONTPREP;
@@ -1269,7 +1269,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
1269 sdev_printk(KERN_INFO, sdev, 1269 sdev_printk(KERN_INFO, sdev,
1270 "unblocking device at zero depth\n")); 1270 "unblocking device at zero depth\n"));
1271 } else { 1271 } else {
1272 blk_plug_device(q); 1272 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1273 return 0; 1273 return 0;
1274 } 1274 }
1275 } 1275 }
@@ -1499,7 +1499,7 @@ static void scsi_request_fn(struct request_queue *q)
1499 * the host is no longer able to accept any more requests. 1499 * the host is no longer able to accept any more requests.
1500 */ 1500 */
1501 shost = sdev->host; 1501 shost = sdev->host;
1502 while (!blk_queue_plugged(q)) { 1502 for (;;) {
1503 int rtn; 1503 int rtn;
1504 /* 1504 /*
1505 * get next queueable request. We do this early to make sure 1505 * get next queueable request. We do this early to make sure
@@ -1578,15 +1578,8 @@ static void scsi_request_fn(struct request_queue *q)
1578 */ 1578 */
1579 rtn = scsi_dispatch_cmd(cmd); 1579 rtn = scsi_dispatch_cmd(cmd);
1580 spin_lock_irq(q->queue_lock); 1580 spin_lock_irq(q->queue_lock);
1581 if(rtn) { 1581 if (rtn)
1582 /* we're refusing the command; because of 1582 goto out_delay;
1583 * the way locks get dropped, we need to
1584 * check here if plugging is required */
1585 if(sdev->device_busy == 0)
1586 blk_plug_device(q);
1587
1588 break;
1589 }
1590 } 1583 }
1591 1584
1592 goto out; 1585 goto out;
@@ -1605,9 +1598,10 @@ static void scsi_request_fn(struct request_queue *q)
1605 spin_lock_irq(q->queue_lock); 1598 spin_lock_irq(q->queue_lock);
1606 blk_requeue_request(q, req); 1599 blk_requeue_request(q, req);
1607 sdev->device_busy--; 1600 sdev->device_busy--;
1608 if(sdev->device_busy == 0) 1601out_delay:
1609 blk_plug_device(q); 1602 if (sdev->device_busy == 0)
1610 out: 1603 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1604out:
1611 /* must be careful here...if we trigger the ->remove() function 1605 /* must be careful here...if we trigger the ->remove() function
1612 * we cannot be holding the q lock */ 1606 * we cannot be holding the q lock */
1613 spin_unlock_irq(q->queue_lock); 1607 spin_unlock_irq(q->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5c3ccfc6b622..2941d2d92c94 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3913 if (!get_device(dev)) 3913 if (!get_device(dev))
3914 return; 3914 return;
3915 3915
3916 while (!blk_queue_plugged(q)) { 3916 while (1) {
3917 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && 3917 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3918 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 3918 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3919 break; 3919 break;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 927e99cb7225..c6fcf76cade5 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
173 int ret; 173 int ret;
174 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 174 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
175 175
176 while (!blk_queue_plugged(q)) { 176 while ((req = blk_fetch_request(q)) != NULL) {
177 req = blk_fetch_request(q);
178 if (!req)
179 break;
180
181 spin_unlock_irq(q->queue_lock); 177 spin_unlock_irq(q->queue_lock);
182 178
183 handler = to_sas_internal(shost->transportt)->f->smp_handler; 179 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 6e02f1b0c46f..af789937be4e 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -124,7 +124,8 @@ static void blkvsc_shutdown(struct device *device);
124 124
125static int blkvsc_open(struct block_device *bdev, fmode_t mode); 125static int blkvsc_open(struct block_device *bdev, fmode_t mode);
126static int blkvsc_release(struct gendisk *disk, fmode_t mode); 126static int blkvsc_release(struct gendisk *disk, fmode_t mode);
127static int blkvsc_media_changed(struct gendisk *gd); 127static unsigned int blkvsc_check_events(struct gendisk *gd,
128 unsigned int clearing);
128static int blkvsc_revalidate_disk(struct gendisk *gd); 129static int blkvsc_revalidate_disk(struct gendisk *gd);
129static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg); 130static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
130static int blkvsc_ioctl(struct block_device *bd, fmode_t mode, 131static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
@@ -155,7 +156,7 @@ static const struct block_device_operations block_ops = {
155 .owner = THIS_MODULE, 156 .owner = THIS_MODULE,
156 .open = blkvsc_open, 157 .open = blkvsc_open,
157 .release = blkvsc_release, 158 .release = blkvsc_release,
158 .media_changed = blkvsc_media_changed, 159 .check_events = blkvsc_check_events,
159 .revalidate_disk = blkvsc_revalidate_disk, 160 .revalidate_disk = blkvsc_revalidate_disk,
160 .getgeo = blkvsc_getgeo, 161 .getgeo = blkvsc_getgeo,
161 .ioctl = blkvsc_ioctl, 162 .ioctl = blkvsc_ioctl,
@@ -357,6 +358,7 @@ static int blkvsc_probe(struct device *device)
357 else 358 else
358 blkdev->gd->first_minor = 0; 359 blkdev->gd->first_minor = 0;
359 blkdev->gd->fops = &block_ops; 360 blkdev->gd->fops = &block_ops;
361 blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
360 blkdev->gd->private_data = blkdev; 362 blkdev->gd->private_data = blkdev;
361 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device); 363 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
362 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum); 364 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
@@ -1337,10 +1339,11 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode)
1337 return 0; 1339 return 0;
1338} 1340}
1339 1341
1340static int blkvsc_media_changed(struct gendisk *gd) 1342static unsigned int blkvsc_check_events(struct gendisk *gd,
1343 unsigned int clearing)
1341{ 1344{
1342 DPRINT_DBG(BLKVSC_DRV, "- enter\n"); 1345 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1343 return 1; 1346 return DISK_EVENT_MEDIA_CHANGE;
1344} 1347}
1345 1348
1346static int blkvsc_revalidate_disk(struct gendisk *gd) 1349static int blkvsc_revalidate_disk(struct gendisk *gd)
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
index e1851f00be56..842cd9214a5e 100644
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
+++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
@@ -381,10 +381,10 @@ static int cyasblkdev_blk_ioctl(
381 return -ENOTTY; 381 return -ENOTTY;
382} 382}
383 383
384/* Media_changed block_device opp 384/* check_events block_device opp
385 * this one is called by kernel to confirm if the media really changed 385 * this one is called by kernel to confirm if the media really changed
386 * as we indicated by issuing check_disk_change() call */ 386 * as we indicated by issuing check_disk_change() call */
387int cyasblkdev_media_changed(struct gendisk *gd) 387unsigned int cyasblkdev_check_events(struct gendisk *gd, unsigned int clearing)
388{ 388{
389 struct cyasblkdev_blk_data *bd; 389 struct cyasblkdev_blk_data *bd;
390 390
@@ -402,7 +402,7 @@ int cyasblkdev_media_changed(struct gendisk *gd)
402 #endif 402 #endif
403 } 403 }
404 404
405 /* return media change state "1" yes, 0 no */ 405 /* return media change state - DISK_EVENT_MEDIA_CHANGE yes, 0 no */
406 return 0; 406 return 0;
407} 407}
408 408
@@ -432,7 +432,7 @@ static struct block_device_operations cyasblkdev_bdops = {
432 .ioctl = cyasblkdev_blk_ioctl, 432 .ioctl = cyasblkdev_blk_ioctl,
433 /* .getgeo = cyasblkdev_blk_getgeo, */ 433 /* .getgeo = cyasblkdev_blk_getgeo, */
434 /* added to support media removal( real and simulated) media */ 434 /* added to support media removal( real and simulated) media */
435 .media_changed = cyasblkdev_media_changed, 435 .check_events = cyasblkdev_check_events,
436 /* added to support media removal( real and simulated) media */ 436 /* added to support media removal( real and simulated) media */
437 .revalidate_disk = cyasblkdev_revalidate_disk, 437 .revalidate_disk = cyasblkdev_revalidate_disk,
438 .owner = THIS_MODULE, 438 .owner = THIS_MODULE,
@@ -1090,6 +1090,7 @@ static int cyasblkdev_add_disks(int bus_num,
1090 bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT; 1090 bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT;
1091 bd->user_disk_0->minors = 8; 1091 bd->user_disk_0->minors = 8;
1092 bd->user_disk_0->fops = &cyasblkdev_bdops; 1092 bd->user_disk_0->fops = &cyasblkdev_bdops;
1093 bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
1093 bd->user_disk_0->private_data = bd; 1094 bd->user_disk_0->private_data = bd;
1094 bd->user_disk_0->queue = bd->queue.queue; 1095 bd->user_disk_0->queue = bd->queue.queue;
1095 bd->dbgprn_flags = DBGPRN_RD_RQ; 1096 bd->dbgprn_flags = DBGPRN_RD_RQ;
@@ -1190,6 +1191,7 @@ static int cyasblkdev_add_disks(int bus_num,
1190 bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; 1191 bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
1191 bd->user_disk_1->minors = 8; 1192 bd->user_disk_1->minors = 8;
1192 bd->user_disk_1->fops = &cyasblkdev_bdops; 1193 bd->user_disk_1->fops = &cyasblkdev_bdops;
1194 bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
1193 bd->user_disk_1->private_data = bd; 1195 bd->user_disk_1->private_data = bd;
1194 bd->user_disk_1->queue = bd->queue.queue; 1196 bd->user_disk_1->queue = bd->queue.queue;
1195 bd->dbgprn_flags = DBGPRN_RD_RQ; 1197 bd->dbgprn_flags = DBGPRN_RD_RQ;
@@ -1278,6 +1280,7 @@ static int cyasblkdev_add_disks(int bus_num,
1278 (devidx + 2) << CYASBLKDEV_SHIFT; 1280 (devidx + 2) << CYASBLKDEV_SHIFT;
1279 bd->system_disk->minors = 8; 1281 bd->system_disk->minors = 8;
1280 bd->system_disk->fops = &cyasblkdev_bdops; 1282 bd->system_disk->fops = &cyasblkdev_bdops;
1283 bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE;
1281 bd->system_disk->private_data = bd; 1284 bd->system_disk->private_data = bd;
1282 bd->system_disk->queue = bd->queue.queue; 1285 bd->system_disk->queue = bd->queue.queue;
1283 /* don't search for vfat 1286 /* don't search for vfat
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3df570db0e4f..eb0afec046e1 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -391,9 +391,8 @@ static int iblock_do_task(struct se_task *task)
391{ 391{
392 struct se_device *dev = task->task_se_cmd->se_dev; 392 struct se_device *dev = task->task_se_cmd->se_dev;
393 struct iblock_req *req = IBLOCK_REQ(task); 393 struct iblock_req *req = IBLOCK_REQ(task);
394 struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
395 struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
396 struct bio *bio = req->ib_bio, *nbio = NULL; 394 struct bio *bio = req->ib_bio, *nbio = NULL;
395 struct blk_plug plug;
397 int rw; 396 int rw;
398 397
399 if (task->task_data_direction == DMA_TO_DEVICE) { 398 if (task->task_data_direction == DMA_TO_DEVICE) {
@@ -411,6 +410,7 @@ static int iblock_do_task(struct se_task *task)
411 rw = READ; 410 rw = READ;
412 } 411 }
413 412
413 blk_start_plug(&plug);
414 while (bio) { 414 while (bio) {
415 nbio = bio->bi_next; 415 nbio = bio->bi_next;
416 bio->bi_next = NULL; 416 bio->bi_next = NULL;
@@ -420,9 +420,8 @@ static int iblock_do_task(struct se_task *task)
420 submit_bio(rw, bio); 420 submit_bio(rw, bio);
421 bio = nbio; 421 bio = nbio;
422 } 422 }
423 blk_finish_plug(&plug);
423 424
424 if (q->unplug_fn)
425 q->unplug_fn(q);
426 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 425 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
427} 426}
428 427