aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2012-07-30 03:03:10 -0400
committerJens Axboe <axboe@kernel.dk>2012-07-30 03:03:10 -0400
commit72ea1f74fcdf874cca6d2c0962379523bbd99e2c (patch)
tree4c67be6c73356086ff44ef1b8b1c9479702689ca /drivers/block
parentb1af9be5ef77898c05667bb9dbf3b180d91d3292 (diff)
parenta73ff3231df59a4b92ccd0dd4e73897c5822489b (diff)
Merge branch 'for-jens' of git://git.drbd.org/linux-drbd into for-3.6/drivers
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_int.h44
-rw-r--r--drivers/block/drbd/drbd_main.c65
-rw-r--r--drivers/block/drbd/drbd_nl.c36
-rw-r--r--drivers/block/drbd/drbd_proc.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c38
-rw-r--r--drivers/block/drbd/drbd_req.c9
-rw-r--r--drivers/block/drbd/drbd_worker.c12
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/rbd.c4
12 files changed, 159 insertions, 73 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e54e31b02b88..3fbef018ce55 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
411 + mdev->ldev->md.al_offset + mdev->al_tr_pos; 411 + mdev->ldev->md.al_offset + mdev->al_tr_pos;
412 412
413 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) 413 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
414 drbd_chk_io_error(mdev, 1, true); 414 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
415 415
416 if (++mdev->al_tr_pos > 416 if (++mdev->al_tr_pos >
417 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) 417 div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
876 unsigned int enr, count = 0; 876 unsigned int enr, count = 0;
877 struct lc_element *e; 877 struct lc_element *e;
878 878
879 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 879 /* this should be an empty REQ_FLUSH */
880 if (size == 0)
881 return 0;
882
883 if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
880 dev_err(DEV, "sector: %llus, size: %d\n", 884 dev_err(DEV, "sector: %llus, size: %d\n",
881 (unsigned long long)sector, size); 885 (unsigned long long)sector, size);
882 return 0; 886 return 0;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fcb956bb4b4c..ba91b408abad 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
1096 1096
1097 if (ctx->error) { 1097 if (ctx->error) {
1098 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 1098 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
1099 drbd_chk_io_error(mdev, 1, true); 1099 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1100 err = -EIO; /* ctx->error ? */ 1100 err = -EIO; /* ctx->error ? */
1101 } 1101 }
1102 1102
@@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
1212 wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); 1212 wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
1213 1213
1214 if (ctx->error) 1214 if (ctx->error)
1215 drbd_chk_io_error(mdev, 1, true); 1215 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1216 /* that should force detach, so the in memory bitmap will be 1216 /* that should force detach, so the in memory bitmap will be
1217 * gone in a moment as well. */ 1217 * gone in a moment as well. */
1218 1218
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 02f013a073a7..b2ca143d0053 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -813,7 +813,6 @@ enum {
813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
814 SEND_PING, /* whether asender should send a ping asap */ 814 SEND_PING, /* whether asender should send a ping asap */
815 815
816 UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
817 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 816 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
818 MD_DIRTY, /* current uuids and flags not yet on disk */ 817 MD_DIRTY, /* current uuids and flags not yet on disk */
819 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ 818 DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
@@ -824,7 +823,6 @@ enum {
824 CRASHED_PRIMARY, /* This node was a crashed primary. 823 CRASHED_PRIMARY, /* This node was a crashed primary.
825 * Gets cleared when the state.conn 824 * Gets cleared when the state.conn
826 * goes into C_CONNECTED state. */ 825 * goes into C_CONNECTED state. */
827 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
828 CONSIDER_RESYNC, 826 CONSIDER_RESYNC,
829 827
830 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ 828 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -834,6 +832,7 @@ enum {
834 BITMAP_IO_QUEUED, /* Started bitmap IO */ 832 BITMAP_IO_QUEUED, /* Started bitmap IO */
835 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ 833 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
836 WAS_IO_ERROR, /* Local disk failed returned IO error */ 834 WAS_IO_ERROR, /* Local disk failed returned IO error */
835 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
837 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 836 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
838 NET_CONGESTED, /* The data socket is congested */ 837 NET_CONGESTED, /* The data socket is congested */
839 838
@@ -851,6 +850,13 @@ enum {
851 AL_SUSPENDED, /* Activity logging is currently suspended. */ 850 AL_SUSPENDED, /* Activity logging is currently suspended. */
852 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 851 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
853 STATE_SENT, /* Do not change state/UUIDs while this is set */ 852 STATE_SENT, /* Do not change state/UUIDs while this is set */
853
854 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
855 * pending, from drbd worker context.
856 * If set, bdi_write_congested() returns true,
857 * so shrink_page_list() would not recurse into,
858 * and potentially deadlock on, this drbd worker.
859 */
854}; 860};
855 861
856struct drbd_bitmap; /* opaque for drbd_conf */ 862struct drbd_bitmap; /* opaque for drbd_conf */
@@ -1130,8 +1136,8 @@ struct drbd_conf {
1130 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1136 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1131 int rs_planed; /* resync sectors already planned */ 1137 int rs_planed; /* resync sectors already planned */
1132 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1138 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
1133 int peer_max_bio_size; 1139 unsigned int peer_max_bio_size;
1134 int local_max_bio_size; 1140 unsigned int local_max_bio_size;
1135}; 1141};
1136 1142
1137static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1143static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1435,9 +1441,9 @@ struct bm_extent {
1435 * hash table. */ 1441 * hash table. */
1436#define HT_SHIFT 8 1442#define HT_SHIFT 8
1437#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) 1443#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1438#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ 1444#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
1439 1445
1440#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1446#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
1441 1447
1442/* Number of elements in the app_reads_hash */ 1448/* Number of elements in the app_reads_hash */
1443#define APP_R_HSIZE 15 1449#define APP_R_HSIZE 15
@@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
1840 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); 1846 return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
1841} 1847}
1842 1848
1849enum drbd_force_detach_flags {
1850 DRBD_IO_ERROR,
1851 DRBD_META_IO_ERROR,
1852 DRBD_FORCE_DETACH,
1853};
1854
1843#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1855#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1844static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) 1856static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
1857 enum drbd_force_detach_flags forcedetach,
1858 const char *where)
1845{ 1859{
1846 switch (mdev->ldev->dc.on_io_error) { 1860 switch (mdev->ldev->dc.on_io_error) {
1847 case EP_PASS_ON: 1861 case EP_PASS_ON:
1848 if (!forcedetach) { 1862 if (forcedetach == DRBD_IO_ERROR) {
1849 if (__ratelimit(&drbd_ratelimit_state)) 1863 if (__ratelimit(&drbd_ratelimit_state))
1850 dev_err(DEV, "Local IO failed in %s.\n", where); 1864 dev_err(DEV, "Local IO failed in %s.\n", where);
1851 if (mdev->state.disk > D_INCONSISTENT) 1865 if (mdev->state.disk > D_INCONSISTENT)
@@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1856 case EP_DETACH: 1870 case EP_DETACH:
1857 case EP_CALL_HELPER: 1871 case EP_CALL_HELPER:
1858 set_bit(WAS_IO_ERROR, &mdev->flags); 1872 set_bit(WAS_IO_ERROR, &mdev->flags);
1873 if (forcedetach == DRBD_FORCE_DETACH)
1874 set_bit(FORCE_DETACH, &mdev->flags);
1859 if (mdev->state.disk > D_FAILED) { 1875 if (mdev->state.disk > D_FAILED) {
1860 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1876 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1861 dev_err(DEV, 1877 dev_err(DEV,
@@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1875 */ 1891 */
1876#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1892#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1877static inline void drbd_chk_io_error_(struct drbd_conf *mdev, 1893static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1878 int error, int forcedetach, const char *where) 1894 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1879{ 1895{
1880 if (error) { 1896 if (error) {
1881 unsigned long flags; 1897 unsigned long flags;
@@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
2405 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); 2421 int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2406 2422
2407 D_ASSERT(ap_bio >= 0); 2423 D_ASSERT(ap_bio >= 0);
2424
2425 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2426 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2427 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2428 }
2429
2408 /* this currently does wake_up for every dec_ap_bio! 2430 /* this currently does wake_up for every dec_ap_bio!
2409 * maybe rather introduce some type of hysteresis? 2431 * maybe rather introduce some type of hysteresis?
2410 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2432 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2411 if (ap_bio < mxb) 2433 if (ap_bio < mxb)
2412 wake_up(&mdev->misc_wait); 2434 wake_up(&mdev->misc_wait);
2413 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2414 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2415 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2416 }
2417} 2435}
2418 2436
2419static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) 2437static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 920ede2829d6..2e0e7fc1dbba 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1514 1514
1515 /* Do not change the order of the if above and the two below... */ 1515 /* Do not change the order of the if above and the two below... */
1516 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ 1516 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1517 /* we probably will start a resync soon.
1518 * make sure those things are properly reset. */
1519 mdev->rs_total = 0;
1520 mdev->rs_failed = 0;
1521 atomic_set(&mdev->rs_pending_cnt, 0);
1522 drbd_rs_cancel_all(mdev);
1523
1517 drbd_send_uuids(mdev); 1524 drbd_send_uuids(mdev);
1518 drbd_send_state(mdev, ns); 1525 drbd_send_state(mdev, ns);
1519 } 1526 }
@@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1630 eh = mdev->ldev->dc.on_io_error; 1637 eh = mdev->ldev->dc.on_io_error;
1631 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); 1638 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1632 1639
1633 /* Immediately allow completion of all application IO, that waits 1640 if (was_io_error && eh == EP_CALL_HELPER)
1634 for completion from the local disk. */ 1641 drbd_khelper(mdev, "local-io-error");
1635 tl_abort_disk_io(mdev); 1642
1643 /* Immediately allow completion of all application IO,
1644 * that waits for completion from the local disk,
1645 * if this was a force-detach due to disk_timeout
1646 * or administrator request (drbdsetup detach --force).
1647 * Do NOT abort otherwise.
1648 * Aborting local requests may cause serious problems,
1649 * if requests are completed to upper layers already,
1650 * and then later the already submitted local bio completes.
1651 * This can cause DMA into former bio pages that meanwhile
1652 * have been re-used for other things.
1653 * So aborting local requests may cause crashes,
1654 * or even worse, silent data corruption.
1655 */
1656 if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
1657 tl_abort_disk_io(mdev);
1636 1658
1637 /* current state still has to be D_FAILED, 1659 /* current state still has to be D_FAILED,
1638 * there is only one way out: to D_DISKLESS, 1660 * there is only one way out: to D_DISKLESS,
@@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1653 drbd_md_sync(mdev); 1675 drbd_md_sync(mdev);
1654 } 1676 }
1655 put_ldev(mdev); 1677 put_ldev(mdev);
1656
1657 if (was_io_error && eh == EP_CALL_HELPER)
1658 drbd_khelper(mdev, "local-io-error");
1659 } 1678 }
1660 1679
1661 /* second half of local IO error, failure to attach, 1680 /* second half of local IO error, failure to attach,
@@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1669 "ASSERT FAILED: disk is %s while going diskless\n", 1688 "ASSERT FAILED: disk is %s while going diskless\n",
1670 drbd_disk_str(mdev->state.disk)); 1689 drbd_disk_str(mdev->state.disk));
1671 1690
1672 mdev->rs_total = 0;
1673 mdev->rs_failed = 0;
1674 atomic_set(&mdev->rs_pending_cnt, 0);
1675
1676 if (ns.conn >= C_CONNECTED) 1691 if (ns.conn >= C_CONNECTED)
1677 drbd_send_state(mdev, ns); 1692 drbd_send_state(mdev, ns);
1678 1693
@@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2194{ 2209{
2195 struct p_sizes p; 2210 struct p_sizes p;
2196 sector_t d_size, u_size; 2211 sector_t d_size, u_size;
2197 int q_order_type, max_bio_size; 2212 int q_order_type;
2213 unsigned int max_bio_size;
2198 int ok; 2214 int ok;
2199 2215
2200 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 2216 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2203 u_size = mdev->ldev->dc.disk_size; 2219 u_size = mdev->ldev->dc.disk_size;
2204 q_order_type = drbd_queue_order_type(mdev); 2220 q_order_type = drbd_queue_order_type(mdev);
2205 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; 2221 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2206 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); 2222 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
2207 put_ldev(mdev); 2223 put_ldev(mdev);
2208 } else { 2224 } else {
2209 d_size = 0; 2225 d_size = 0;
@@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2214 2230
2215 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ 2231 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2216 if (mdev->agreed_pro_version <= 94) 2232 if (mdev->agreed_pro_version <= 94)
2217 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 2233 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2218 2234
2219 p.d_size = cpu_to_be64(d_size); 2235 p.d_size = cpu_to_be64(d_size);
2220 p.u_size = cpu_to_be64(u_size); 2236 p.u_size = cpu_to_be64(u_size);
@@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
3541 goto out; 3557 goto out;
3542 } 3558 }
3543 3559
3560 if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
3561 r |= (1 << BDI_async_congested);
3562 /* Without good local data, we would need to read from remote,
3563 * and that would need the worker thread as well, which is
3564 * currently blocked waiting for that usermode helper to
3565 * finish.
3566 */
3567 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
3568 r |= (1 << BDI_sync_congested);
3569 else
3570 put_ldev(mdev);
3571 r &= bdi_bits;
3572 reason = 'c';
3573 goto out;
3574 }
3575
3544 if (get_ldev(mdev)) { 3576 if (get_ldev(mdev)) {
3545 q = bdev_get_queue(mdev->ldev->backing_bdev); 3577 q = bdev_get_queue(mdev->ldev->backing_bdev);
3546 r = bdi_congested(&q->backing_dev_info, bdi_bits); 3578 r = bdi_congested(&q->backing_dev_info, bdi_bits);
@@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3604 q->backing_dev_info.congested_data = mdev; 3636 q->backing_dev_info.congested_data = mdev;
3605 3637
3606 blk_queue_make_request(q, drbd_make_request); 3638 blk_queue_make_request(q, drbd_make_request);
3639 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
3607 /* Setting the max_hw_sectors to an odd value of 8kibyte here 3640 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3608 This triggers a max_bio_size message upon first attach or connect */ 3641 This triggers a max_bio_size message upon first attach or connect */
3609 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 3642 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
@@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
3870 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { 3903 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3871 /* this was a try anyways ... */ 3904 /* this was a try anyways ... */
3872 dev_err(DEV, "meta data update failed!\n"); 3905 dev_err(DEV, "meta data update failed!\n");
3873 drbd_chk_io_error(mdev, 1, true); 3906 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
3874 } 3907 }
3875 3908
3876 /* Update mdev->ldev->md.la_size_sect, 3909 /* Update mdev->ldev->md.la_size_sect,
@@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3950 3983
3951 spin_lock_irq(&mdev->req_lock); 3984 spin_lock_irq(&mdev->req_lock);
3952 if (mdev->state.conn < C_CONNECTED) { 3985 if (mdev->state.conn < C_CONNECTED) {
3953 int peer; 3986 unsigned int peer;
3954 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3987 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3955 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); 3988 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3956 mdev->peer_max_bio_size = peer; 3989 mdev->peer_max_bio_size = peer;
3957 } 3990 }
3958 spin_unlock_irq(&mdev->req_lock); 3991 spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6d4de6a72e80..fb9dce8daa24 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 147 char *argv[] = {usermode_helper, cmd, mb, NULL };
148 int ret; 148 int ret;
149 149
150 if (current == mdev->worker.task)
151 set_bit(CALLBACK_PENDING, &mdev->flags);
152
150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 153 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
151 154
152 if (get_net_conf(mdev)) { 155 if (get_net_conf(mdev)) {
@@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
189 usermode_helper, cmd, mb, 192 usermode_helper, cmd, mb,
190 (ret >> 8) & 0xff, ret); 193 (ret >> 8) & 0xff, ret);
191 194
195 if (current == mdev->worker.task)
196 clear_bit(CALLBACK_PENDING, &mdev->flags);
197
192 if (ret < 0) /* Ignore any ERRNOs we got. */ 198 if (ret < 0) /* Ignore any ERRNOs we got. */
193 ret = 0; 199 ret = 0;
194 200
@@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
795static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) 801static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
796{ 802{
797 struct request_queue * const q = mdev->rq_queue; 803 struct request_queue * const q = mdev->rq_queue;
798 int max_hw_sectors = max_bio_size >> 9; 804 unsigned int max_hw_sectors = max_bio_size >> 9;
799 int max_segments = 0; 805 unsigned int max_segments = 0;
800 806
801 if (get_ldev_if_state(mdev, D_ATTACHING)) { 807 if (get_ldev_if_state(mdev, D_ATTACHING)) {
802 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 808 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
@@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
829 835
830void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) 836void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
831{ 837{
832 int now, new, local, peer; 838 unsigned int now, new, local, peer;
833 839
834 now = queue_max_hw_sectors(mdev->rq_queue) << 9; 840 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
835 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ 841 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
840 mdev->local_max_bio_size = local; 846 mdev->local_max_bio_size = local;
841 put_ldev(mdev); 847 put_ldev(mdev);
842 } 848 }
849 local = min(local, DRBD_MAX_BIO_SIZE);
843 850
844 /* We may ignore peer limits if the peer is modern enough. 851 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple 852 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */ 853 BIOs for a single peer_request */
847 if (mdev->state.conn >= C_CONNECTED) { 854 if (mdev->state.conn >= C_CONNECTED) {
848 if (mdev->agreed_pro_version < 94) { 855 if (mdev->agreed_pro_version < 94) {
849 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 856 peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
850 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ 857 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
851 } else if (mdev->agreed_pro_version == 94) 858 } else if (mdev->agreed_pro_version == 94)
852 peer = DRBD_MAX_SIZE_H80_PACKET; 859 peer = DRBD_MAX_SIZE_H80_PACKET;
@@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
854 peer = DRBD_MAX_BIO_SIZE; 861 peer = DRBD_MAX_BIO_SIZE;
855 } 862 }
856 863
857 new = min_t(int, local, peer); 864 new = min(local, peer);
858 865
859 if (mdev->state.role == R_PRIMARY && new < now) 866 if (mdev->state.role == R_PRIMARY && new < now)
860 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); 867 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
861 868
862 if (new != now) 869 if (new != now)
863 dev_info(DEV, "max BIO size = %u\n", new); 870 dev_info(DEV, "max BIO size = %u\n", new);
@@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
950 * to realize a "hot spare" feature (not that I'd recommend that) */ 957 * to realize a "hot spare" feature (not that I'd recommend that) */
951 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 958 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
952 959
960 /* make sure there is no leftover from previous force-detach attempts */
961 clear_bit(FORCE_DETACH, &mdev->flags);
962
963 /* and no leftover from previously aborted resync or verify, either */
964 mdev->rs_total = 0;
965 mdev->rs_failed = 0;
966 atomic_set(&mdev->rs_pending_cnt, 0);
967
953 /* allocation not in the IO path, cqueue thread context */ 968 /* allocation not in the IO path, cqueue thread context */
954 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 969 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
955 if (!nbc) { 970 if (!nbc) {
@@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1345 } 1360 }
1346 1361
1347 if (dt.detach_force) { 1362 if (dt.detach_force) {
1363 set_bit(FORCE_DETACH, &mdev->flags);
1348 drbd_force_state(mdev, NS(disk, D_FAILED)); 1364 drbd_force_state(mdev, NS(disk, D_FAILED));
1349 reply->ret_code = SS_SUCCESS; 1365 reply->ret_code = SS_SUCCESS;
1350 goto out; 1366 goto out;
@@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1962 int retcode; 1978 int retcode;
1963 1979
1964 /* If there is still bitmap IO pending, probably because of a previous 1980 /* If there is still bitmap IO pending, probably because of a previous
1965 * resync just being finished, wait for it before requesting a new resync. */ 1981 * resync just being finished, wait for it before requesting a new resync.
1982 * Also wait for it's after_state_ch(). */
1966 drbd_suspend_io(mdev); 1983 drbd_suspend_io(mdev);
1967 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 1984 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
1985 drbd_flush_workqueue(mdev);
1968 1986
1969 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1987 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1970 1988
@@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
2003 int retcode; 2021 int retcode;
2004 2022
2005 /* If there is still bitmap IO pending, probably because of a previous 2023 /* If there is still bitmap IO pending, probably because of a previous
2006 * resync just being finished, wait for it before requesting a new resync. */ 2024 * resync just being finished, wait for it before requesting a new resync.
2025 * Also wait for it's after_state_ch(). */
2007 drbd_suspend_io(mdev); 2026 drbd_suspend_io(mdev);
2008 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 2027 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2028 drbd_flush_workqueue(mdev);
2009 2029
2010 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 2030 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2011 2031
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 869bada2ed06..5496104f90b9 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
245 mdev->state.role == R_SECONDARY) { 245 mdev->state.role == R_SECONDARY) {
246 seq_printf(seq, "%2d: cs:Unconfigured\n", i); 246 seq_printf(seq, "%2d: cs:Unconfigured\n", i);
247 } else { 247 } else {
248 /* reset mdev->congestion_reason */
249 bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
250
248 seq_printf(seq, 251 seq_printf(seq,
249 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" 252 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
250 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " 253 " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ea4836e0ae98..c74ca2df7431 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
278 int i; 278 int i;
279 279
280 if (page == NULL)
281 return;
282
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 283 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page); 284 i = page_chain_free(page);
282 else { 285 else {
@@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
316 gfp_t gfp_mask) __must_hold(local) 319 gfp_t gfp_mask) __must_hold(local)
317{ 320{
318 struct drbd_epoch_entry *e; 321 struct drbd_epoch_entry *e;
319 struct page *page; 322 struct page *page = NULL;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 323 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
321 324
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 325 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
@@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
329 return NULL; 332 return NULL;
330 } 333 }
331 334
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 335 if (data_size) {
333 if (!page) 336 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
334 goto fail; 337 if (!page)
338 goto fail;
339 }
335 340
336 INIT_HLIST_NODE(&e->collision); 341 INIT_HLIST_NODE(&e->collision);
337 e->epoch = NULL; 342 e->epoch = NULL;
@@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
1270 1275
1271 data_size -= dgs; 1276 data_size -= dgs;
1272 1277
1273 ERR_IF(data_size == 0) return NULL;
1274 ERR_IF(data_size & 0x1ff) return NULL; 1278 ERR_IF(data_size & 0x1ff) return NULL;
1275 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; 1279 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1276 1280
@@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
1291 if (!e) 1295 if (!e)
1292 return NULL; 1296 return NULL;
1293 1297
1298 if (!data_size)
1299 return e;
1300
1294 ds = data_size; 1301 ds = data_size;
1295 page = e->pages; 1302 page = e->pages;
1296 page_chain_for_each(page) { 1303 page_chain_for_each(page) {
@@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1715 1722
1716 dp_flags = be32_to_cpu(p->dp_flags); 1723 dp_flags = be32_to_cpu(p->dp_flags);
1717 rw |= wire_flags_to_bio(mdev, dp_flags); 1724 rw |= wire_flags_to_bio(mdev, dp_flags);
1725 if (e->pages == NULL) {
1726 D_ASSERT(e->size == 0);
1727 D_ASSERT(dp_flags & DP_FLUSH);
1728 }
1718 1729
1719 if (dp_flags & DP_MAY_SET_IN_SYNC) 1730 if (dp_flags & DP_MAY_SET_IN_SYNC)
1720 e->flags |= EE_MAY_SET_IN_SYNC; 1731 e->flags |= EE_MAY_SET_IN_SYNC;
@@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev)
3801 mdev->ee_hash = NULL; 3812 mdev->ee_hash = NULL;
3802 mdev->ee_hash_s = 0; 3813 mdev->ee_hash_s = 0;
3803 3814
3804 /* paranoia code */ 3815 /* We may not have had the chance to wait for all locally pending
3805 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) 3816 * application requests. The hlist_add_fake() prevents access after
3806 if (h->first) 3817 * free on master bio completion. */
3807 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", 3818 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
3808 (int)(h - mdev->tl_hash), h->first); 3819 struct drbd_request *req;
3820 struct hlist_node *pos, *n;
3821 hlist_for_each_entry_safe(req, pos, n, h, collision) {
3822 hlist_del_init(&req->collision);
3823 hlist_add_fake(&req->collision);
3824 }
3825 }
3826
3809 kfree(mdev->tl_hash); 3827 kfree(mdev->tl_hash);
3810 mdev->tl_hash = NULL; 3828 mdev->tl_hash = NULL;
3811 mdev->tl_hash_s = 0; 3829 mdev->tl_hash_s = 0;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 8e93a6ac9bb6..910335c30927 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
455 req->rq_state |= RQ_LOCAL_COMPLETED; 455 req->rq_state |= RQ_LOCAL_COMPLETED;
456 req->rq_state &= ~RQ_LOCAL_PENDING; 456 req->rq_state &= ~RQ_LOCAL_PENDING;
457 457
458 __drbd_chk_io_error(mdev, false); 458 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
459 _req_may_be_done_not_susp(req, m); 459 _req_may_be_done_not_susp(req, m);
460 break; 460 break;
461 461
@@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
477 break; 477 break;
478 } 478 }
479 479
480 __drbd_chk_io_error(mdev, false); 480 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
481 481
482 goto_queue_for_net_read: 482 goto_queue_for_net_read:
483 483
@@ -1111,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1111 /* 1111 /*
1112 * what we "blindly" assume: 1112 * what we "blindly" assume:
1113 */ 1113 */
1114 D_ASSERT(bio->bi_size > 0);
1115 D_ASSERT((bio->bi_size & 0x1ff) == 0); 1114 D_ASSERT((bio->bi_size & 0x1ff) == 0);
1116 1115
1117 /* to make some things easier, force alignment of requests within the 1116 /* to make some things easier, force alignment of requests within the
1118 * granularity of our hash tables */ 1117 * granularity of our hash tables */
1119 s_enr = bio->bi_sector >> HT_SHIFT; 1118 s_enr = bio->bi_sector >> HT_SHIFT;
1120 e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; 1119 e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
1121 1120
1122 if (likely(s_enr == e_enr)) { 1121 if (likely(s_enr == e_enr)) {
1123 do { 1122 do {
@@ -1275,7 +1274,7 @@ void request_timer_fn(unsigned long data)
1275 time_after(now, req->start_time + dt) && 1274 time_after(now, req->start_time + dt) &&
1276 !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { 1275 !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
1277 dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); 1276 dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
1278 __drbd_chk_io_error(mdev, 1); 1277 __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
1279 } 1278 }
1280 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; 1279 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
1281 spin_unlock_irq(&mdev->req_lock); 1280 spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 620c70ff2231..6bce2cc179d4 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
111 if (list_empty(&mdev->read_ee)) 111 if (list_empty(&mdev->read_ee))
112 wake_up(&mdev->ee_wait); 112 wake_up(&mdev->ee_wait);
113 if (test_bit(__EE_WAS_ERROR, &e->flags)) 113 if (test_bit(__EE_WAS_ERROR, &e->flags))
114 __drbd_chk_io_error(mdev, false); 114 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
115 spin_unlock_irqrestore(&mdev->req_lock, flags); 115 spin_unlock_irqrestore(&mdev->req_lock, flags);
116 116
117 drbd_queue_work(&mdev->data.work, &e->w); 117 drbd_queue_work(&mdev->data.work, &e->w);
@@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
154 : list_empty(&mdev->active_ee); 154 : list_empty(&mdev->active_ee);
155 155
156 if (test_bit(__EE_WAS_ERROR, &e->flags)) 156 if (test_bit(__EE_WAS_ERROR, &e->flags))
157 __drbd_chk_io_error(mdev, false); 157 __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
158 spin_unlock_irqrestore(&mdev->req_lock, flags); 158 spin_unlock_irqrestore(&mdev->req_lock, flags);
159 159
160 if (is_syncer_req) 160 if (is_syncer_req)
@@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1501 return; 1501 return;
1502 } 1502 }
1503 1503
1504 if (mdev->state.conn < C_AHEAD) {
1505 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1506 drbd_rs_cancel_all(mdev);
1507 /* This should be done when we abort the resync. We definitely do not
1508 want to have this for connections going back and forth between
1509 Ahead/Behind and SyncSource/SyncTarget */
1510 }
1511
1512 if (side == C_SYNC_TARGET) { 1504 if (side == C_SYNC_TARGET) {
1513 /* Since application IO was locked out during C_WF_BITMAP_T and 1505 /* Since application IO was locked out during C_WF_BITMAP_T and
1514 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET 1506 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1347ba8b8377..8d4afc83e05f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -672,6 +672,7 @@ static void __reschedule_timeout(int drive, const char *message)
672 672
673 if (drive == current_reqD) 673 if (drive == current_reqD)
674 drive = current_drive; 674 drive = current_drive;
675 __cancel_delayed_work(&fd_timeout);
675 676
676 if (drive < 0 || drive >= N_DRIVE) { 677 if (drive < 0 || drive >= N_DRIVE) {
677 delay = 20UL * HZ; 678 delay = 20UL * HZ;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbca966f8f66..3bba65510d23 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
1597 struct gendisk *disk; 1597 struct gendisk *disk;
1598 int err; 1598 int err;
1599 1599
1600 err = -ENOMEM;
1600 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1601 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1601 if (!lo) { 1602 if (!lo)
1602 err = -ENOMEM;
1603 goto out; 1603 goto out;
1604 }
1605 1604
1606 err = idr_pre_get(&loop_index_idr, GFP_KERNEL); 1605 if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
1607 if (err < 0)
1608 goto out_free_dev; 1606 goto out_free_dev;
1609 1607
1610 if (i >= 0) { 1608 if (i >= 0) {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65665c9c42c6..8f428a8ab003 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -499,7 +499,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
499 / sizeof (*ondisk)) 499 / sizeof (*ondisk))
500 return -EINVAL; 500 return -EINVAL;
501 header->snapc = kmalloc(sizeof(struct ceph_snap_context) + 501 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
502 snap_count * sizeof (*ondisk), 502 snap_count * sizeof(u64),
503 gfp_flags); 503 gfp_flags);
504 if (!header->snapc) 504 if (!header->snapc)
505 return -ENOMEM; 505 return -ENOMEM;
@@ -977,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
977 op = (void *)(replyhead + 1); 977 op = (void *)(replyhead + 1);
978 rc = le32_to_cpu(replyhead->result); 978 rc = le32_to_cpu(replyhead->result);
979 bytes = le64_to_cpu(op->extent.length); 979 bytes = le64_to_cpu(op->extent.length);
980 read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); 980 read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
981 981
982 dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); 982 dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
983 983