aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-01-21 10:32:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-01-21 10:32:11 -0500
commit456eac94789e1b512515e6974e091ef655f343de (patch)
tree79f277d02e5e480a373c129bdce7564421d360c7
parentdedd0c2a48d1eb779373de5eddd04a3e059ce540 (diff)
parentf06f135d8642e2f6812cfcb4ea8e4e9122d4d58c (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: fs/bio.c: fix shadows sparse warning drbd: The kernel code is now equivalent to out of tree release 8.3.7 drbd: Allow online resizing of DRBD devices while peer not reachable (needs to be explicitly forced) drbd: Don't go into StandAlone mode when authentification failes because of network error drivers/block/drbd/drbd_receiver.c: correct NULL test cfq-iosched: Respect ioprio_class when preempting genhd: overlapping variable definition block: removed unused as_io_context DM: Fix device mapper topology stacking block: bdev_stack_limits wrapper block: Fix discard alignment calculation and printing block: Correct handling of bottom device misaligment drbd: check on CONFIG_LBDAF, not LBD drivers/block/drbd: Correct NULL test drbd: Silenced an assert that could triggered after changing write ordering method drbd: Kconfig fix drbd: Fix for a race between IO and a detach operation [Bugz 262] drbd: Use drbd_crypto_is_hash() instead of an open coded check
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-settings.c39
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h7
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_nl.c19
-rw-r--r--drivers/block/drbd/drbd_receiver.c46
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--fs/bio.c2
-rw-r--r--include/linux/blkdev.h9
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h1
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/iocontext.h27
16 files changed, 107 insertions, 87 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index cbdabb0dd6d7..98e6bf61b0ac 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
39 39
40 if (atomic_long_dec_and_test(&ioc->refcount)) { 40 if (atomic_long_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic);
44 cfq_dtor(ioc); 42 cfq_dtor(ioc);
45 rcu_read_unlock(); 43 rcu_read_unlock();
46 44
@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
76 task_unlock(task); 74 task_unlock(task);
77 75
78 if (atomic_dec_and_test(&ioc->nr_tasks)) { 76 if (atomic_dec_and_test(&ioc->nr_tasks)) {
79 if (ioc->aic && ioc->aic->exit)
80 ioc->aic->exit(ioc->aic);
81 cfq_exit(ioc); 77 cfq_exit(ioc);
82 78
83 } 79 }
@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
97 ret->ioprio = 0; 93 ret->ioprio = 0;
98 ret->last_waited = jiffies; /* doesn't matter... */ 94 ret->last_waited = jiffies; /* doesn't matter... */
99 ret->nr_batch_requests = 0; /* because this is 0 */ 95 ret->nr_batch_requests = 0; /* because this is 0 */
100 ret->aic = NULL;
101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
102 INIT_HLIST_HEAD(&ret->cic_list); 97 INIT_HLIST_HEAD(&ret->cic_list);
103 ret->ioc_data = NULL; 98 ret->ioc_data = NULL;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d52d4adc440b..5eeb9e0d256e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
528 sector_t offset) 528 sector_t offset)
529{ 529{
530 sector_t alignment; 530 sector_t alignment;
531 unsigned int top, bottom; 531 unsigned int top, bottom, ret = 0;
532 532
533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
546 t->max_segment_size = min_not_zero(t->max_segment_size, 546 t->max_segment_size = min_not_zero(t->max_segment_size,
547 b->max_segment_size); 547 b->max_segment_size);
548 548
549 t->misaligned |= b->misaligned;
550
549 alignment = queue_limit_alignment_offset(b, offset); 551 alignment = queue_limit_alignment_offset(b, offset);
550 552
551 /* Bottom device has different alignment. Check that it is 553 /* Bottom device has different alignment. Check that it is
@@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
558 bottom = max(b->physical_block_size, b->io_min) + alignment; 560 bottom = max(b->physical_block_size, b->io_min) + alignment;
559 561
560 /* Verify that top and bottom intervals line up */ 562 /* Verify that top and bottom intervals line up */
561 if (max(top, bottom) & (min(top, bottom) - 1)) 563 if (max(top, bottom) & (min(top, bottom) - 1)) {
562 t->misaligned = 1; 564 t->misaligned = 1;
565 ret = -1;
566 }
563 } 567 }
564 568
565 t->logical_block_size = max(t->logical_block_size, 569 t->logical_block_size = max(t->logical_block_size,
@@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
578 if (t->physical_block_size & (t->logical_block_size - 1)) { 582 if (t->physical_block_size & (t->logical_block_size - 1)) {
579 t->physical_block_size = t->logical_block_size; 583 t->physical_block_size = t->logical_block_size;
580 t->misaligned = 1; 584 t->misaligned = 1;
585 ret = -1;
581 } 586 }
582 587
583 /* Minimum I/O a multiple of the physical block size? */ 588 /* Minimum I/O a multiple of the physical block size? */
584 if (t->io_min & (t->physical_block_size - 1)) { 589 if (t->io_min & (t->physical_block_size - 1)) {
585 t->io_min = t->physical_block_size; 590 t->io_min = t->physical_block_size;
586 t->misaligned = 1; 591 t->misaligned = 1;
592 ret = -1;
587 } 593 }
588 594
589 /* Optimal I/O a multiple of the physical block size? */ 595 /* Optimal I/O a multiple of the physical block size? */
590 if (t->io_opt & (t->physical_block_size - 1)) { 596 if (t->io_opt & (t->physical_block_size - 1)) {
591 t->io_opt = 0; 597 t->io_opt = 0;
592 t->misaligned = 1; 598 t->misaligned = 1;
599 ret = -1;
593 } 600 }
594 601
595 /* Find lowest common alignment_offset */ 602 /* Find lowest common alignment_offset */
@@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
597 & (max(t->physical_block_size, t->io_min) - 1); 604 & (max(t->physical_block_size, t->io_min) - 1);
598 605
599 /* Verify that new alignment_offset is on a logical block boundary */ 606 /* Verify that new alignment_offset is on a logical block boundary */
600 if (t->alignment_offset & (t->logical_block_size - 1)) 607 if (t->alignment_offset & (t->logical_block_size - 1)) {
601 t->misaligned = 1; 608 t->misaligned = 1;
609 ret = -1;
610 }
602 611
603 /* Discard alignment and granularity */ 612 /* Discard alignment and granularity */
604 if (b->discard_granularity) { 613 if (b->discard_granularity) {
@@ -626,11 +635,33 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
626 (t->discard_granularity - 1); 635 (t->discard_granularity - 1);
627 } 636 }
628 637
629 return t->misaligned ? -1 : 0; 638 return ret;
630} 639}
631EXPORT_SYMBOL(blk_stack_limits); 640EXPORT_SYMBOL(blk_stack_limits);
632 641
633/** 642/**
643 * bdev_stack_limits - adjust queue limits for stacked drivers
644 * @t: the stacking driver limits (top device)
645 * @bdev: the component block_device (bottom)
646 * @start: first data sector within component device
647 *
648 * Description:
649 * Merges queue limits for a top device and a block_device. Returns
650 * 0 if alignment didn't change. Returns -1 if adding the bottom
651 * device caused misalignment.
652 */
653int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
654 sector_t start)
655{
656 struct request_queue *bq = bdev_get_queue(bdev);
657
658 start += get_start_sect(bdev);
659
660 return blk_stack_limits(t, &bq->limits, start << 9);
661}
662EXPORT_SYMBOL(bdev_stack_limits);
663
664/**
634 * disk_stack_limits - adjust queue limits for stacked drivers 665 * disk_stack_limits - adjust queue limits for stacked drivers
635 * @disk: MD/DM gendisk (top) 666 * @disk: MD/DM gendisk (top)
636 * @bdev: the underlying block device (bottom) 667 * @bdev: the underlying block device (bottom)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 918c7fd9aeb1..ee130f14d1fc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3077,6 +3077,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3077 return true; 3077 return true;
3078 3078
3079 /* 3079 /*
3080 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3081 */
3082 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3083 return false;
3084
3085 /*
3080 * if the new request is sync, but the currently running queue is 3086 * if the new request is sync, but the currently running queue is
3081 * not, let the sync request have priority. 3087 * not, let the sync request have priority.
3082 */ 3088 */
diff --git a/block/genhd.c b/block/genhd.c
index b11a4ad7d571..d13ba76a169c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
867{ 867{
868 struct gendisk *disk = dev_to_disk(dev); 868 struct gendisk *disk = dev_to_disk(dev);
869 869
870 return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); 870 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
871} 871}
872 872
873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index f4acd04ebeef..df0983787390 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" 5comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
6 depends on !PROC_FS || !INET || !CONNECTOR 6 depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
7 7
8config BLK_DEV_DRBD 8config BLK_DEV_DRBD
9 tristate "DRBD Distributed Replicated Block Device support" 9 tristate "DRBD Distributed Replicated Block Device support"
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c97558763430..2bf3a6ef3684 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1275,7 +1275,7 @@ struct bm_extent {
1275#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 1275#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
1276#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM 1276#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
1277#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM 1277#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
1278#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 1278#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1279#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1279#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1280#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1280#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1281#else 1281#else
@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1371extern void drbd_suspend_io(struct drbd_conf *mdev); 1371extern void drbd_suspend_io(struct drbd_conf *mdev);
1372extern void drbd_resume_io(struct drbd_conf *mdev); 1372extern void drbd_resume_io(struct drbd_conf *mdev);
1373extern char *ppsize(char *buf, unsigned long long size); 1373extern char *ppsize(char *buf, unsigned long long size);
1374extern sector_t drbd_new_dev_size(struct drbd_conf *, 1374extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1375 struct drbd_backing_dev *);
1376enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1375enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1377extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); 1376extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
1378extern void resync_after_online_grow(struct drbd_conf *); 1377extern void resync_after_online_grow(struct drbd_conf *);
1379extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1378extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
1380extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, 1379extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9348f33f6242..e898ad9eb1c3 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n"); 1298 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1299 } 1299 }
1300 1300
1301 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1301 lc_destroy(mdev->resync); 1302 lc_destroy(mdev->resync);
1302 mdev->resync = NULL; 1303 mdev->resync = NULL;
1303 lc_destroy(mdev->act_log); 1304 lc_destroy(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4e0726aa53b0..1292e0620663 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
510 * Returns 0 on success, negative return values indicate errors. 510 * Returns 0 on success, negative return values indicate errors.
511 * You should call drbd_md_sync() after calling this function. 511 * You should call drbd_md_sync() after calling this function.
512 */ 512 */
513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) 513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
514{ 514{
515 sector_t prev_first_sect, prev_size; /* previous meta location */ 515 sector_t prev_first_sect, prev_size; /* previous meta location */
516 sector_t la_size; 516 sector_t la_size;
@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
541 /* TODO: should only be some assert here, not (re)init... */ 541 /* TODO: should only be some assert here, not (re)init... */
542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 542 drbd_md_set_sector_offsets(mdev, mdev->ldev);
543 543
544 size = drbd_new_dev_size(mdev, mdev->ldev); 544 size = drbd_new_dev_size(mdev, mdev->ldev, force);
545 545
546 if (drbd_get_capacity(mdev->this_bdev) != size || 546 if (drbd_get_capacity(mdev->this_bdev) != size ||
547 drbd_bm_capacity(mdev) != size) { 547 drbd_bm_capacity(mdev) != size) {
@@ -596,7 +596,7 @@ out:
596} 596}
597 597
598sector_t 598sector_t
599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
600{ 600{
601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 601 sector_t p_size = mdev->p_size; /* partner's disk size. */
602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
606 606
607 m_size = drbd_get_max_capacity(bdev); 607 m_size = drbd_get_max_capacity(bdev);
608 608
609 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
610 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
611 p_size = m_size;
612 }
613
609 if (p_size && m_size) { 614 if (p_size && m_size) {
610 size = min_t(sector_t, p_size, m_size); 615 size = min_t(sector_t, p_size, m_size);
611 } else { 616 } else {
@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
965 970
966 /* Prevent shrinking of consistent devices ! */ 971 /* Prevent shrinking of consistent devices ! */
967 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 972 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
968 drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { 973 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
969 dev_warn(DEV, "refusing to truncate a consistent device\n"); 974 dev_warn(DEV, "refusing to truncate a consistent device\n");
970 retcode = ERR_DISK_TO_SMALL; 975 retcode = ERR_DISK_TO_SMALL;
971 goto force_diskless_dec; 976 goto force_diskless_dec;
@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1052 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1057 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1053 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1058 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1054 1059
1055 dd = drbd_determin_dev_size(mdev); 1060 dd = drbd_determin_dev_size(mdev, 0);
1056 if (dd == dev_size_error) { 1061 if (dd == dev_size_error) {
1057 retcode = ERR_NOMEM_BITMAP; 1062 retcode = ERR_NOMEM_BITMAP;
1058 goto force_diskless_dec; 1063 goto force_diskless_dec;
@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1271 goto fail; 1276 goto fail;
1272 } 1277 }
1273 1278
1274 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { 1279 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1275 retcode = ERR_AUTH_ALG_ND; 1280 retcode = ERR_AUTH_ALG_ND;
1276 goto fail; 1281 goto fail;
1277 } 1282 }
@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1504 } 1509 }
1505 1510
1506 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1511 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1507 dd = drbd_determin_dev_size(mdev); 1512 dd = drbd_determin_dev_size(mdev, rs.resize_force);
1508 drbd_md_sync(mdev); 1513 drbd_md_sync(mdev);
1509 put_ldev(mdev); 1514 put_ldev(mdev);
1510 if (dd == dev_size_error) { 1515 if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 259c1351b152..f22a5283128a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -878,9 +878,13 @@ retry:
878 878
879 if (mdev->cram_hmac_tfm) { 879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 if (!drbd_do_auth(mdev)) { 881 switch (drbd_do_auth(mdev)) {
882 case -1:
882 dev_err(DEV, "Authentication of peer failed\n"); 883 dev_err(DEV, "Authentication of peer failed\n");
883 return -1; 884 return -1;
885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
884 } 888 }
885 } 889 }
886 890
@@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1201 1205
1202 case WO_bdev_flush: 1206 case WO_bdev_flush:
1203 case WO_drain_io: 1207 case WO_drain_io:
1204 D_ASSERT(rv == FE_STILL_LIVE); 1208 if (rv == FE_STILL_LIVE) {
1205 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1209 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1206 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1210 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1207 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1211 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1212 }
1208 if (rv == FE_RECYCLED) 1213 if (rv == FE_RECYCLED)
1209 return TRUE; 1214 return TRUE;
1210 1215
@@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2865 2870
2866 /* Never shrink a device with usable data during connect. 2871 /* Never shrink a device with usable data during connect.
2867 But allow online shrinking if we are connected. */ 2872 But allow online shrinking if we are connected. */
2868 if (drbd_new_dev_size(mdev, mdev->ldev) < 2873 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2869 drbd_get_capacity(mdev->this_bdev) && 2874 drbd_get_capacity(mdev->this_bdev) &&
2870 mdev->state.disk >= D_OUTDATED && 2875 mdev->state.disk >= D_OUTDATED &&
2871 mdev->state.conn < C_CONNECTED) { 2876 mdev->state.conn < C_CONNECTED) {
@@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2880#undef min_not_zero 2885#undef min_not_zero
2881 2886
2882 if (get_ldev(mdev)) { 2887 if (get_ldev(mdev)) {
2883 dd = drbd_determin_dev_size(mdev); 2888 dd = drbd_determin_dev_size(mdev, 0);
2884 put_ldev(mdev); 2889 put_ldev(mdev);
2885 if (dd == dev_size_error) 2890 if (dd == dev_size_error)
2886 return FALSE; 2891 return FALSE;
@@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3830{ 3835{
3831 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3836 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3832 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3837 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3833 return 0; 3838 return -1;
3834} 3839}
3835#else 3840#else
3836#define CHALLENGE_LEN 64 3841#define CHALLENGE_LEN 64
3842
3843/* Return value:
3844 1 - auth succeeded,
3845 0 - failed, try again (network error),
3846 -1 - auth failed, don't try again.
3847*/
3848
3837static int drbd_do_auth(struct drbd_conf *mdev) 3849static int drbd_do_auth(struct drbd_conf *mdev)
3838{ 3850{
3839 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 3851 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3854 (u8 *)mdev->net_conf->shared_secret, key_len); 3866 (u8 *)mdev->net_conf->shared_secret, key_len);
3855 if (rv) { 3867 if (rv) {
3856 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3868 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3857 rv = 0; 3869 rv = -1;
3858 goto fail; 3870 goto fail;
3859 } 3871 }
3860 3872
@@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3877 3889
3878 if (p.length > CHALLENGE_LEN*2) { 3890 if (p.length > CHALLENGE_LEN*2) {
3879 dev_err(DEV, "expected AuthChallenge payload too big.\n"); 3891 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3880 rv = 0; 3892 rv = -1;
3881 goto fail; 3893 goto fail;
3882 } 3894 }
3883 3895
3884 peers_ch = kmalloc(p.length, GFP_NOIO); 3896 peers_ch = kmalloc(p.length, GFP_NOIO);
3885 if (peers_ch == NULL) { 3897 if (peers_ch == NULL) {
3886 dev_err(DEV, "kmalloc of peers_ch failed\n"); 3898 dev_err(DEV, "kmalloc of peers_ch failed\n");
3887 rv = 0; 3899 rv = -1;
3888 goto fail; 3900 goto fail;
3889 } 3901 }
3890 3902
@@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3900 response = kmalloc(resp_size, GFP_NOIO); 3912 response = kmalloc(resp_size, GFP_NOIO);
3901 if (response == NULL) { 3913 if (response == NULL) {
3902 dev_err(DEV, "kmalloc of response failed\n"); 3914 dev_err(DEV, "kmalloc of response failed\n");
3903 rv = 0; 3915 rv = -1;
3904 goto fail; 3916 goto fail;
3905 } 3917 }
3906 3918
@@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3910 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 3922 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3911 if (rv) { 3923 if (rv) {
3912 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3924 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3913 rv = 0; 3925 rv = -1;
3914 goto fail; 3926 goto fail;
3915 } 3927 }
3916 3928
@@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3944 } 3956 }
3945 3957
3946 right_response = kmalloc(resp_size, GFP_NOIO); 3958 right_response = kmalloc(resp_size, GFP_NOIO);
3947 if (response == NULL) { 3959 if (right_response == NULL) {
3948 dev_err(DEV, "kmalloc of right_response failed\n"); 3960 dev_err(DEV, "kmalloc of right_response failed\n");
3949 rv = 0; 3961 rv = -1;
3950 goto fail; 3962 goto fail;
3951 } 3963 }
3952 3964
@@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3955 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 3967 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3956 if (rv) { 3968 if (rv) {
3957 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 3969 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3958 rv = 0; 3970 rv = -1;
3959 goto fail; 3971 goto fail;
3960 } 3972 }
3961 3973
@@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
3964 if (rv) 3976 if (rv)
3965 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 3977 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
3966 resp_size, mdev->net_conf->cram_hmac_alg); 3978 resp_size, mdev->net_conf->cram_hmac_alg);
3979 else
3980 rv = -1;
3967 3981
3968 fail: 3982 fail:
3969 kfree(peers_ch); 3983 kfree(peers_ch);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index be625475cf6d..4b22feb01a0c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
503 return 0; 503 return 0;
504 } 504 }
505 505
506 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 506 if (bdev_stack_limits(limits, bdev, start) < 0)
507 DMWARN("%s: target device %s is misaligned: " 507 DMWARN("%s: adding target device %s caused an alignment inconsistency: "
508 "physical_block_size=%u, logical_block_size=%u, " 508 "physical_block_size=%u, logical_block_size=%u, "
509 "alignment_offset=%u, start=%llu", 509 "alignment_offset=%u, start=%llu",
510 dm_device_name(ti->table->md), bdevname(bdev, b), 510 dm_device_name(ti->table->md), bdevname(bdev, b),
511 q->limits.physical_block_size, 511 q->limits.physical_block_size,
512 q->limits.logical_block_size, 512 q->limits.logical_block_size,
513 q->limits.alignment_offset, 513 q->limits.alignment_offset,
514 (unsigned long long) start << 9); 514 (unsigned long long) start << SECTOR_SHIFT);
515
516 515
517 /* 516 /*
518 * Check if merge fn is supported. 517 * Check if merge fn is supported.
@@ -1026,9 +1025,9 @@ combine_limits:
1026 * for the table. 1025 * for the table.
1027 */ 1026 */
1028 if (blk_stack_limits(limits, &ti_limits, 0) < 0) 1027 if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1029 DMWARN("%s: target device " 1028 DMWARN("%s: adding target device "
1030 "(start sect %llu len %llu) " 1029 "(start sect %llu len %llu) "
1031 "is misaligned", 1030 "caused an alignment inconsistency",
1032 dm_device_name(table->md), 1031 dm_device_name(table->md),
1033 (unsigned long long) ti->begin, 1032 (unsigned long long) ti->begin,
1034 (unsigned long long) ti->len); 1033 (unsigned long long) ti->len);
@@ -1080,15 +1079,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1080 struct queue_limits *limits) 1079 struct queue_limits *limits)
1081{ 1080{
1082 /* 1081 /*
1083 * Each target device in the table has a data area that should normally
1084 * be aligned such that the DM device's alignment_offset is 0.
1085 * FIXME: Propagate alignment_offsets up the stack and warn of
1086 * sub-optimal or inconsistent settings.
1087 */
1088 limits->alignment_offset = 0;
1089 limits->misaligned = 0;
1090
1091 /*
1092 * Copy table's limits to the DM device's request_queue 1082 * Copy table's limits to the DM device's request_queue
1093 */ 1083 */
1094 q->limits = *limits; 1084 q->limits = *limits;
diff --git a/fs/bio.c b/fs/bio.c
index 76e6713abf94..12429c9553eb 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
78 78
79 i = 0; 79 i = 0;
80 while (i < bio_slab_nr) { 80 while (i < bio_slab_nr) {
81 struct bio_slab *bslab = &bio_slabs[i]; 81 bslab = &bio_slabs[i];
82 82
83 if (!bslab->slab && entry == -1) 83 if (!bslab->slab && entry == -1)
84 entry = i; 84 entry = i;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9b98173a8184..5c8018977efa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -938,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
938extern void blk_set_default_limits(struct queue_limits *lim); 938extern void blk_set_default_limits(struct queue_limits *lim);
939extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 939extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
940 sector_t offset); 940 sector_t offset);
941extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
942 sector_t offset);
941extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 943extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
942 sector_t offset); 944 sector_t offset);
943extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 945extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1148,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
1148static inline int queue_sector_discard_alignment(struct request_queue *q, 1150static inline int queue_sector_discard_alignment(struct request_queue *q,
1149 sector_t sector) 1151 sector_t sector)
1150{ 1152{
1151 return ((sector << 9) - q->limits.discard_alignment) 1153 struct queue_limits *lim = &q->limits;
1152 & (q->limits.discard_granularity - 1); 1154 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1155
1156 return (lim->discard_granularity + lim->discard_alignment - alignment)
1157 & (lim->discard_granularity - 1);
1153} 1158}
1154 1159
1155static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1160static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index e84f4733cb55..78962272338a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.6" 56#define REL_VERSION "8.3.7"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 91 59#define PRO_VERSION_MAX 91
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index db5721ad50d1..a4d82f895994 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
69 69
70NL_PACKET(resize, 7, 70NL_PACKET(resize, 7,
71 NL_INT64( 29, T_MAY_IGNORE, resize_size) 71 NL_INT64( 29, T_MAY_IGNORE, resize_size)
72 NL_BIT( 68, T_MAY_IGNORE, resize_force)
72) 73)
73 74
74NL_PACKET(syncer_conf, 8, 75NL_PACKET(syncer_conf, 8,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c6c0c41af35f..9717081c75ad 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
256#define part_stat_read(part, field) \ 256#define part_stat_read(part, field) \
257({ \ 257({ \
258 typeof((part)->dkstats->field) res = 0; \ 258 typeof((part)->dkstats->field) res = 0; \
259 int i; \ 259 unsigned int _cpu; \
260 for_each_possible_cpu(i) \ 260 for_each_possible_cpu(_cpu) \
261 res += per_cpu_ptr((part)->dkstats, i)->field; \ 261 res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
262 res; \ 262 res; \
263}) 263})
264 264
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index a63235996309..78ef023227d4 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -4,32 +4,6 @@
4#include <linux/radix-tree.h> 4#include <linux/radix-tree.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6 6
7/*
8 * This is the per-process anticipatory I/O scheduler state.
9 */
10struct as_io_context {
11 spinlock_t lock;
12
13 void (*dtor)(struct as_io_context *aic); /* destructor */
14 void (*exit)(struct as_io_context *aic); /* called on task exit */
15
16 unsigned long state;
17 atomic_t nr_queued; /* queued reads & sync writes */
18 atomic_t nr_dispatched; /* number of requests gone to the drivers */
19
20 /* IO History tracking */
21 /* Thinktime */
22 unsigned long last_end_request;
23 unsigned long ttime_total;
24 unsigned long ttime_samples;
25 unsigned long ttime_mean;
26 /* Layout pattern */
27 unsigned int seek_samples;
28 sector_t last_request_pos;
29 u64 seek_total;
30 sector_t seek_mean;
31};
32
33struct cfq_queue; 7struct cfq_queue;
34struct cfq_io_context { 8struct cfq_io_context {
35 void *key; 9 void *key;
@@ -78,7 +52,6 @@ struct io_context {
78 unsigned long last_waited; /* Time last woken after wait for request */ 52 unsigned long last_waited; /* Time last woken after wait for request */
79 int nr_batch_requests; /* Number of requests left in the batch */ 53 int nr_batch_requests; /* Number of requests left in the batch */
80 54
81 struct as_io_context *aic;
82 struct radix_tree_root radix_root; 55 struct radix_tree_root radix_root;
83 struct hlist_head cic_list; 56 struct hlist_head cic_list;
84 void *ioc_data; 57 void *ioc_data;