aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c19
-rw-r--r--drivers/block/drbd/drbd_bitmap.c10
-rw-r--r--drivers/block/drbd/drbd_int.h12
-rw-r--r--drivers/block/drbd/drbd_main.c20
-rw-r--r--drivers/block/drbd/drbd_nl.c44
-rw-r--r--drivers/block/drbd/drbd_receiver.c34
-rw-r--r--drivers/block/drbd/drbd_worker.c18
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/scsi/sd.c2
14 files changed, 121 insertions, 58 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 459f1bc25a7..c5f22bb0a48 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2533 Controller->RequestQueue[n] = RequestQueue; 2533 Controller->RequestQueue[n] = RequestQueue;
2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2535 RequestQueue->queuedata = Controller; 2535 RequestQueue->queuedata = Controller;
2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2537 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2536 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2537 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2539 disk->queue = RequestQueue; 2538 disk->queue = RequestQueue;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 17956ff6a08..df018990c42 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -536,7 +536,9 @@ static void atodb_endio(struct bio *bio, int error)
536 put_ldev(mdev); 536 put_ldev(mdev);
537} 537}
538 538
539/* sector to word */
539#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 540#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
541
540/* activity log to on disk bitmap -- prepare bio unless that sector 542/* activity log to on disk bitmap -- prepare bio unless that sector
541 * is already covered by previously prepared bios */ 543 * is already covered by previously prepared bios */
542static int atodb_prepare_unless_covered(struct drbd_conf *mdev, 544static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
@@ -546,13 +548,20 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
546{ 548{
547 struct bio *bio; 549 struct bio *bio;
548 struct page *page; 550 struct page *page;
549 sector_t on_disk_sector = enr + mdev->ldev->md.md_offset 551 sector_t on_disk_sector;
550 + mdev->ldev->md.bm_offset;
551 unsigned int page_offset = PAGE_SIZE; 552 unsigned int page_offset = PAGE_SIZE;
552 int offset; 553 int offset;
553 int i = 0; 554 int i = 0;
554 int err = -ENOMEM; 555 int err = -ENOMEM;
555 556
557 /* We always write aligned, full 4k blocks,
558 * so we can ignore the logical_block_size (for now) */
559 enr &= ~7U;
560 on_disk_sector = enr + mdev->ldev->md.md_offset
561 + mdev->ldev->md.bm_offset;
562
563 D_ASSERT(!(on_disk_sector & 7U));
564
556 /* Check if that enr is already covered by an already created bio. 565 /* Check if that enr is already covered by an already created bio.
557 * Caution, bios[] is not NULL terminated, 566 * Caution, bios[] is not NULL terminated,
558 * but only initialized to all NULL. 567 * but only initialized to all NULL.
@@ -588,7 +597,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
588 597
589 offset = S2W(enr); 598 offset = S2W(enr);
590 drbd_bm_get_lel(mdev, offset, 599 drbd_bm_get_lel(mdev, offset,
591 min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), 600 min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
592 kmap(page) + page_offset); 601 kmap(page) + page_offset);
593 kunmap(page); 602 kunmap(page);
594 603
@@ -597,7 +606,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
597 bio->bi_bdev = mdev->ldev->md_bdev; 606 bio->bi_bdev = mdev->ldev->md_bdev;
598 bio->bi_sector = on_disk_sector; 607 bio->bi_sector = on_disk_sector;
599 608
600 if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) 609 if (bio_add_page(bio, page, 4096, page_offset) != 4096)
601 goto out_put_page; 610 goto out_put_page;
602 611
603 atomic_inc(&wc->count); 612 atomic_inc(&wc->count);
@@ -1327,7 +1336,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
1327 /* ok, ->resync is there. */ 1336 /* ok, ->resync is there. */
1328 for (i = 0; i < mdev->resync->nr_elements; i++) { 1337 for (i = 0; i < mdev->resync->nr_elements; i++) {
1329 e = lc_element_by_index(mdev->resync, i); 1338 e = lc_element_by_index(mdev->resync, i);
1330 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1339 bm_ext = lc_entry(e, struct bm_extent, lce);
1331 if (bm_ext->lce.lc_number == LC_FREE) 1340 if (bm_ext->lce.lc_number == LC_FREE)
1332 continue; 1341 continue;
1333 if (bm_ext->lce.lc_number == mdev->resync_wenr) { 1342 if (bm_ext->lce.lc_number == mdev->resync_wenr) {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3d6f3d98894..3390716898d 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -67,7 +67,7 @@ struct drbd_bitmap {
67 size_t bm_words; 67 size_t bm_words;
68 size_t bm_number_of_pages; 68 size_t bm_number_of_pages;
69 sector_t bm_dev_capacity; 69 sector_t bm_dev_capacity;
70 struct semaphore bm_change; /* serializes resize operations */ 70 struct mutex bm_change; /* serializes resize operations */
71 71
72 atomic_t bm_async_io; 72 atomic_t bm_async_io;
73 wait_queue_head_t bm_io_wait; 73 wait_queue_head_t bm_io_wait;
@@ -115,7 +115,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
115 return; 115 return;
116 } 116 }
117 117
118 trylock_failed = down_trylock(&b->bm_change); 118 trylock_failed = !mutex_trylock(&b->bm_change);
119 119
120 if (trylock_failed) { 120 if (trylock_failed) {
121 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 121 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
@@ -126,7 +126,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
126 b->bm_task == mdev->receiver.task ? "receiver" : 126 b->bm_task == mdev->receiver.task ? "receiver" :
127 b->bm_task == mdev->asender.task ? "asender" : 127 b->bm_task == mdev->asender.task ? "asender" :
128 b->bm_task == mdev->worker.task ? "worker" : "?"); 128 b->bm_task == mdev->worker.task ? "worker" : "?");
129 down(&b->bm_change); 129 mutex_lock(&b->bm_change);
130 } 130 }
131 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 131 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
132 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 132 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
@@ -148,7 +148,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
148 148
149 b->bm_why = NULL; 149 b->bm_why = NULL;
150 b->bm_task = NULL; 150 b->bm_task = NULL;
151 up(&b->bm_change); 151 mutex_unlock(&b->bm_change);
152} 152}
153 153
154/* word offset to long pointer */ 154/* word offset to long pointer */
@@ -296,7 +296,7 @@ int drbd_bm_init(struct drbd_conf *mdev)
296 if (!b) 296 if (!b)
297 return -ENOMEM; 297 return -ENOMEM;
298 spin_lock_init(&b->bm_lock); 298 spin_lock_init(&b->bm_lock);
299 init_MUTEX(&b->bm_change); 299 mutex_init(&b->bm_change);
300 init_waitqueue_head(&b->bm_io_wait); 300 init_waitqueue_head(&b->bm_io_wait);
301 301
302 mdev->bitmap = b; 302 mdev->bitmap = b;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d9301e861d9..e5e86a78182 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -261,6 +261,9 @@ static inline const char *cmdname(enum drbd_packets cmd)
261 [P_OV_REQUEST] = "OVRequest", 261 [P_OV_REQUEST] = "OVRequest",
262 [P_OV_REPLY] = "OVReply", 262 [P_OV_REPLY] = "OVReply",
263 [P_OV_RESULT] = "OVResult", 263 [P_OV_RESULT] = "OVResult",
264 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
265 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
266 [P_COMPRESSED_BITMAP] = "CBitmap",
264 [P_MAX_CMD] = NULL, 267 [P_MAX_CMD] = NULL,
265 }; 268 };
266 269
@@ -443,13 +446,18 @@ struct p_rs_param_89 {
443 char csums_alg[SHARED_SECRET_MAX]; 446 char csums_alg[SHARED_SECRET_MAX];
444} __packed; 447} __packed;
445 448
449enum drbd_conn_flags {
450 CF_WANT_LOSE = 1,
451 CF_DRY_RUN = 2,
452};
453
446struct p_protocol { 454struct p_protocol {
447 struct p_header head; 455 struct p_header head;
448 u32 protocol; 456 u32 protocol;
449 u32 after_sb_0p; 457 u32 after_sb_0p;
450 u32 after_sb_1p; 458 u32 after_sb_1p;
451 u32 after_sb_2p; 459 u32 after_sb_2p;
452 u32 want_lose; 460 u32 conn_flags;
453 u32 two_primaries; 461 u32 two_primaries;
454 462
455 /* Since protocol version 87 and higher. */ 463 /* Since protocol version 87 and higher. */
@@ -791,6 +799,8 @@ enum {
791 * while this is set. */ 799 * while this is set. */
792 RESIZE_PENDING, /* Size change detected locally, waiting for the response from 800 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
793 * the peer, if it changed there as well. */ 801 * the peer, if it changed there as well. */
802 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
803 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
794}; 804};
795 805
796struct drbd_bitmap; /* opaque for drbd_conf */ 806struct drbd_bitmap; /* opaque for drbd_conf */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index ab871e00ffc..67e0fc54224 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1668,7 +1668,7 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1668int drbd_send_protocol(struct drbd_conf *mdev) 1668int drbd_send_protocol(struct drbd_conf *mdev)
1669{ 1669{
1670 struct p_protocol *p; 1670 struct p_protocol *p;
1671 int size, rv; 1671 int size, cf, rv;
1672 1672
1673 size = sizeof(struct p_protocol); 1673 size = sizeof(struct p_protocol);
1674 1674
@@ -1685,9 +1685,21 @@ int drbd_send_protocol(struct drbd_conf *mdev)
1685 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); 1685 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1686 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); 1686 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1687 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); 1687 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
1688 p->want_lose = cpu_to_be32(mdev->net_conf->want_lose);
1689 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); 1688 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1690 1689
1690 cf = 0;
1691 if (mdev->net_conf->want_lose)
1692 cf |= CF_WANT_LOSE;
1693 if (mdev->net_conf->dry_run) {
1694 if (mdev->agreed_pro_version >= 92)
1695 cf |= CF_DRY_RUN;
1696 else {
1697 dev_err(DEV, "--dry-run is not supported by peer");
1698 return 0;
1699 }
1700 }
1701 p->conn_flags = cpu_to_be32(cf);
1702
1691 if (mdev->agreed_pro_version >= 87) 1703 if (mdev->agreed_pro_version >= 87)
1692 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); 1704 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1693 1705
@@ -3161,14 +3173,18 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
3161void drbd_free_sock(struct drbd_conf *mdev) 3173void drbd_free_sock(struct drbd_conf *mdev)
3162{ 3174{
3163 if (mdev->data.socket) { 3175 if (mdev->data.socket) {
3176 mutex_lock(&mdev->data.mutex);
3164 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); 3177 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3165 sock_release(mdev->data.socket); 3178 sock_release(mdev->data.socket);
3166 mdev->data.socket = NULL; 3179 mdev->data.socket = NULL;
3180 mutex_unlock(&mdev->data.mutex);
3167 } 3181 }
3168 if (mdev->meta.socket) { 3182 if (mdev->meta.socket) {
3183 mutex_lock(&mdev->meta.mutex);
3169 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); 3184 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3170 sock_release(mdev->meta.socket); 3185 sock_release(mdev->meta.socket);
3171 mdev->meta.socket = NULL; 3186 mdev->meta.socket = NULL;
3187 mutex_unlock(&mdev->meta.mutex);
3172 } 3188 }
3173} 3189}
3174 3190
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4df3b40b105..6429d2b19e0 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -285,8 +285,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
285 } 285 }
286 286
287 if (r == SS_NO_UP_TO_DATE_DISK && force && 287 if (r == SS_NO_UP_TO_DATE_DISK && force &&
288 (mdev->state.disk == D_INCONSISTENT || 288 (mdev->state.disk < D_UP_TO_DATE &&
289 mdev->state.disk == D_OUTDATED)) { 289 mdev->state.disk >= D_INCONSISTENT)) {
290 mask.disk = D_MASK; 290 mask.disk = D_MASK;
291 val.disk = D_UP_TO_DATE; 291 val.disk = D_UP_TO_DATE;
292 forced = 1; 292 forced = 1;
@@ -407,7 +407,7 @@ static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
407 } 407 }
408 408
409 reply->ret_code = 409 reply->ret_code =
410 drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); 410 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
411 411
412 return 0; 412 return 0;
413} 413}
@@ -941,6 +941,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
941 941
942 drbd_md_set_sector_offsets(mdev, nbc); 942 drbd_md_set_sector_offsets(mdev, nbc);
943 943
944 /* allocate a second IO page if logical_block_size != 512 */
945 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
946 if (logical_block_size == 0)
947 logical_block_size = MD_SECTOR_SIZE;
948
949 if (logical_block_size != MD_SECTOR_SIZE) {
950 if (!mdev->md_io_tmpp) {
951 struct page *page = alloc_page(GFP_NOIO);
952 if (!page)
953 goto force_diskless_dec;
954
955 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
956 logical_block_size, MD_SECTOR_SIZE);
957 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
958
959 mdev->md_io_tmpp = page;
960 }
961 }
962
944 if (!mdev->bitmap) { 963 if (!mdev->bitmap) {
945 if (drbd_bm_init(mdev)) { 964 if (drbd_bm_init(mdev)) {
946 retcode = ERR_NOMEM; 965 retcode = ERR_NOMEM;
@@ -980,25 +999,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
980 goto force_diskless_dec; 999 goto force_diskless_dec;
981 } 1000 }
982 1001
983 /* allocate a second IO page if logical_block_size != 512 */
984 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
985 if (logical_block_size == 0)
986 logical_block_size = MD_SECTOR_SIZE;
987
988 if (logical_block_size != MD_SECTOR_SIZE) {
989 if (!mdev->md_io_tmpp) {
990 struct page *page = alloc_page(GFP_NOIO);
991 if (!page)
992 goto force_diskless_dec;
993
994 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
995 logical_block_size, MD_SECTOR_SIZE);
996 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
997
998 mdev->md_io_tmpp = page;
999 }
1000 }
1001
1002 /* Reset the "barriers don't work" bits here, then force meta data to 1002 /* Reset the "barriers don't work" bits here, then force meta data to
1003 * be written, to ensure we determine if barriers are supported. */ 1003 * be written, to ensure we determine if barriers are supported. */
1004 if (nbc->dc.no_md_flush) 1004 if (nbc->dc.no_md_flush)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index d065c646b35..ed9f1de24a7 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2513,6 +2513,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2513 } 2513 }
2514 2514
2515 if (hg == -100) { 2515 if (hg == -100) {
2516 /* FIXME this log message is not correct if we end up here
2517 * after an attempted attach on a diskless node.
2518 * We just refuse to attach -- well, we drop the "connection"
2519 * to that disk, in a way... */
2516 dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); 2520 dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2517 drbd_khelper(mdev, "split-brain"); 2521 drbd_khelper(mdev, "split-brain");
2518 return C_MASK; 2522 return C_MASK;
@@ -2538,6 +2542,16 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2538 } 2542 }
2539 } 2543 }
2540 2544
2545 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2546 if (hg == 0)
2547 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2548 else
2549 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2550 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2551 abs(hg) >= 2 ? "full" : "bit-map based");
2552 return C_MASK;
2553 }
2554
2541 if (abs(hg) >= 2) { 2555 if (abs(hg) >= 2) {
2542 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2556 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2543 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2557 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
@@ -2585,7 +2599,7 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2585 struct p_protocol *p = (struct p_protocol *)h; 2599 struct p_protocol *p = (struct p_protocol *)h;
2586 int header_size, data_size; 2600 int header_size, data_size;
2587 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2601 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2588 int p_want_lose, p_two_primaries; 2602 int p_want_lose, p_two_primaries, cf;
2589 char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2603 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2590 2604
2591 header_size = sizeof(*p) - sizeof(*h); 2605 header_size = sizeof(*p) - sizeof(*h);
@@ -2598,8 +2612,14 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2598 p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2612 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2599 p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2613 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2600 p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2614 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2601 p_want_lose = be32_to_cpu(p->want_lose);
2602 p_two_primaries = be32_to_cpu(p->two_primaries); 2615 p_two_primaries = be32_to_cpu(p->two_primaries);
2616 cf = be32_to_cpu(p->conn_flags);
2617 p_want_lose = cf & CF_WANT_LOSE;
2618
2619 clear_bit(CONN_DRY_RUN, &mdev->flags);
2620
2621 if (cf & CF_DRY_RUN)
2622 set_bit(CONN_DRY_RUN, &mdev->flags);
2603 2623
2604 if (p_proto != mdev->net_conf->wire_protocol) { 2624 if (p_proto != mdev->net_conf->wire_protocol) {
2605 dev_err(DEV, "incompatible communication protocols\n"); 2625 dev_err(DEV, "incompatible communication protocols\n");
@@ -3118,13 +3138,16 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3118 3138
3119 put_ldev(mdev); 3139 put_ldev(mdev);
3120 if (nconn == C_MASK) { 3140 if (nconn == C_MASK) {
3141 nconn = C_CONNECTED;
3121 if (mdev->state.disk == D_NEGOTIATING) { 3142 if (mdev->state.disk == D_NEGOTIATING) {
3122 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3143 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3123 nconn = C_CONNECTED;
3124 } else if (peer_state.disk == D_NEGOTIATING) { 3144 } else if (peer_state.disk == D_NEGOTIATING) {
3125 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3145 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3126 peer_state.disk = D_DISKLESS; 3146 peer_state.disk = D_DISKLESS;
3147 real_peer_disk = D_DISKLESS;
3127 } else { 3148 } else {
3149 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3150 return FALSE;
3128 D_ASSERT(oconn == C_WF_REPORT_PARAMS); 3151 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3129 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3152 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3130 return FALSE; 3153 return FALSE;
@@ -3594,10 +3617,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3594 3617
3595 /* asender does not clean up anything. it must not interfere, either */ 3618 /* asender does not clean up anything. it must not interfere, either */
3596 drbd_thread_stop(&mdev->asender); 3619 drbd_thread_stop(&mdev->asender);
3597
3598 mutex_lock(&mdev->data.mutex);
3599 drbd_free_sock(mdev); 3620 drbd_free_sock(mdev);
3600 mutex_unlock(&mdev->data.mutex);
3601 3621
3602 spin_lock_irq(&mdev->req_lock); 3622 spin_lock_irq(&mdev->req_lock);
3603 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3623 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
@@ -4054,6 +4074,8 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4054{ 4074{
4055 /* restore idle timeout */ 4075 /* restore idle timeout */
4056 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4076 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4077 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4078 wake_up(&mdev->misc_wait);
4057 4079
4058 return TRUE; 4080 return TRUE;
4059} 4081}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b453c2bca3b..44bf6d11197 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -938,7 +938,8 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
938 938
939 if (eq) { 939 if (eq) {
940 drbd_set_in_sync(mdev, e->sector, e->size); 940 drbd_set_in_sync(mdev, e->sector, e->size);
941 mdev->rs_same_csum++; 941 /* rs_same_csums unit is BM_BLOCK_SIZE */
942 mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
942 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); 943 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
943 } else { 944 } else {
944 inc_rs_pending(mdev); 945 inc_rs_pending(mdev);
@@ -1288,6 +1289,14 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
1288 return retcode; 1289 return retcode;
1289} 1290}
1290 1291
1292static void ping_peer(struct drbd_conf *mdev)
1293{
1294 clear_bit(GOT_PING_ACK, &mdev->flags);
1295 request_ping(mdev);
1296 wait_event(mdev->misc_wait,
1297 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
1298}
1299
1291/** 1300/**
1292 * drbd_start_resync() - Start the resync process 1301 * drbd_start_resync() - Start the resync process
1293 * @mdev: DRBD device. 1302 * @mdev: DRBD device.
@@ -1371,7 +1380,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1371 _drbd_pause_after(mdev); 1380 _drbd_pause_after(mdev);
1372 } 1381 }
1373 write_unlock_irq(&global_state_lock); 1382 write_unlock_irq(&global_state_lock);
1374 drbd_state_unlock(mdev);
1375 put_ldev(mdev); 1383 put_ldev(mdev);
1376 1384
1377 if (r == SS_SUCCESS) { 1385 if (r == SS_SUCCESS) {
@@ -1382,11 +1390,8 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1382 1390
1383 if (mdev->rs_total == 0) { 1391 if (mdev->rs_total == 0) {
1384 /* Peer still reachable? Beware of failing before-resync-target handlers! */ 1392 /* Peer still reachable? Beware of failing before-resync-target handlers! */
1385 request_ping(mdev); 1393 ping_peer(mdev);
1386 __set_current_state(TASK_INTERRUPTIBLE);
1387 schedule_timeout(mdev->net_conf->ping_timeo*HZ/9); /* 9 instead 10 */
1388 drbd_resync_finished(mdev); 1394 drbd_resync_finished(mdev);
1389 return;
1390 } 1395 }
1391 1396
1392 /* ns.conn may already be != mdev->state.conn, 1397 /* ns.conn may already be != mdev->state.conn,
@@ -1398,6 +1403,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1398 1403
1399 drbd_md_sync(mdev); 1404 drbd_md_sync(mdev);
1400 } 1405 }
1406 drbd_state_unlock(mdev);
1401} 1407}
1402 1408
1403int drbd_worker(struct drbd_thread *thi) 1409int drbd_worker(struct drbd_thread *thi)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cb69929d917..8546d123b9a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -237,6 +237,8 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
237 if (ret) 237 if (ret)
238 goto fail; 238 goto fail;
239 239
240 file_update_time(file);
241
240 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 242 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
241 bvec->bv_page, bv_offs, size, IV); 243 bvec->bv_page, bv_offs, size, IV);
242 copied = size; 244 copied = size;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 8866ca369d5..71acf4e5335 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -341,11 +341,11 @@ static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
341 && (j++ < PCD_SPIN)) 341 && (j++ < PCD_SPIN))
342 udelay(PCD_DELAY); 342 udelay(PCD_DELAY);
343 343
344 if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) { 344 if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) {
345 s = read_reg(cd, 7); 345 s = read_reg(cd, 7);
346 e = read_reg(cd, 1); 346 e = read_reg(cd, 1);
347 p = read_reg(cd, 2); 347 p = read_reg(cd, 2);
348 if (j >= PCD_SPIN) 348 if (j > PCD_SPIN)
349 e |= 0x100; 349 e |= 0x100;
350 if (fun) 350 if (fun)
351 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 351 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ddb4f9abd48..c059aab3006 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -391,11 +391,11 @@ static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
391 && (j++ < PF_SPIN)) 391 && (j++ < PF_SPIN))
392 udelay(PF_SPIN_DEL); 392 udelay(PF_SPIN_DEL);
393 393
394 if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { 394 if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
395 s = read_reg(pf, 7); 395 s = read_reg(pf, 7);
396 e = read_reg(pf, 1); 396 e = read_reg(pf, 1);
397 p = read_reg(pf, 2); 397 p = read_reg(pf, 2);
398 if (j >= PF_SPIN) 398 if (j > PF_SPIN)
399 e |= 0x100; 399 e |= 0x100;
400 if (fun) 400 if (fun)
401 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 401 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 1e4006e18f0..bc5825fdeaa 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -274,11 +274,11 @@ static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
274 && (j++ < PT_SPIN)) 274 && (j++ < PT_SPIN))
275 udelay(PT_SPIN_DEL); 275 udelay(PT_SPIN_DEL);
276 276
277 if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) { 277 if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) {
278 s = read_reg(pi, 7); 278 s = read_reg(pi, 7);
279 e = read_reg(pi, 1); 279 e = read_reg(pi, 1);
280 p = read_reg(pi, 2); 280 p = read_reg(pi, 2);
281 if (j >= PT_SPIN) 281 if (j > PT_SPIN)
282 e |= 0x100; 282 e |= 0x100;
283 if (fun) 283 if (fun)
284 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 284 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4b12b820c9a..2138a7ae050 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -348,14 +348,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
348 set_capacity(vblk->disk, cap); 348 set_capacity(vblk->disk, cap);
349 349
350 /* We can handle whatever the host told us to handle. */ 350 /* We can handle whatever the host told us to handle. */
351 blk_queue_max_phys_segments(q, vblk->sg_elems-2); 351 blk_queue_max_segments(q, vblk->sg_elems-2);
352 blk_queue_max_hw_segments(q, vblk->sg_elems-2);
353 352
354 /* No need to bounce any requests */ 353 /* No need to bounce any requests */
355 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 354 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
356 355
357 /* No real sector limit. */ 356 /* No real sector limit. */
358 blk_queue_max_sectors(q, -1U); 357 blk_queue_max_hw_sectors(q, -1U);
359 358
360 /* Host can optionally specify maximum segment size and number of 359 /* Host can optionally specify maximum segment size and number of
361 * segments. */ 360 * segments. */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58c62ff42ab..8b827f37b03 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2186,7 +2186,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2186 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 2186 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2187 2187
2188 gd->driverfs_dev = &sdp->sdev_gendev; 2188 gd->driverfs_dev = &sdp->sdev_gendev;
2189 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; 2189 gd->flags = GENHD_FL_EXT_DEVT;
2190 if (sdp->removable) 2190 if (sdp->removable)
2191 gd->flags |= GENHD_FL_REMOVABLE; 2191 gd->flags |= GENHD_FL_REMOVABLE;
2192 2192