aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2013-11-22 07:22:13 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-07-10 09:22:20 -0400
commit8fe39aac0578cbb0abf27e1be70ff581e0c1d836 (patch)
tree3a3f8c724dec46a06ecf2d490905a3f6fbd80045 /drivers/block/drbd
parente952658020c5150ad4987d313e25e8e2fb38d529 (diff)
drbd: device->ldev is not guaranteed on an D_ATTACHING disk
Some parts of the code assumed that get_ldev_if_state(device, D_ATTACHING) is sufficient to access the ldev member of the device object. That was wrong. ldev may not be there or might be freed at any time if the device has a disk state of D_ATTACHING. bm_rw() Documented that drbd_bm_read() is only called from drbd_adm_attach. drbd_bm_write() is only called when a reference is held, and it is documented that a caller has to hold a reference before calling drbd_bm_write() drbd_bm_write_page() Use get_ldev() instead of get_ldev_if_state(device, D_ATTACHING) drbd_bmio_set_n_write() No longer use get_ldev_if_state(device, D_ATTACHING). All callers hold a reference to ldev now. drbd_bmio_clear_n_write() All callers where holding a reference of ldev anyways. Remove the misleading get_ldev_if_state(device, D_ATTACHING) drbd_reconsider_max_bio_size() Removed the get_ldev_if_state(device, D_ATTACHING). All callers now pass a struct drbd_backing_dev* when they have a proper reference, or a NULL pointer. Before this fix, the receiver could trigger a NULL pointer deref when in drbd_reconsider_max_bio_size() drbd_bump_write_ordering() Used get_ldev_if_state(device, D_ATTACHING) with the wrong assumption. Remove it, and allow the caller to pass in a struct drbd_backing_dev* when the caller knows that accessing this bdev is safe. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/drbd_bitmap.c4
-rw-r--r--drivers/block/drbd/drbd_int.h9
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_nl.c41
-rw-r--r--drivers/block/drbd/drbd_receiver.c43
5 files changed, 78 insertions, 55 deletions
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 1aa29f8fdfe1..ed310415020b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1085,6 +1085,8 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la
1085 kfree(ctx); 1085 kfree(ctx);
1086 return -ENODEV; 1086 return -ENODEV;
1087 } 1087 }
1088 /* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from
1089 drbd_adm_attach(), after device->ldev was assigned. */
1088 1090
1089 if (!ctx->flags) 1091 if (!ctx->flags)
1090 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1092 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
@@ -1260,7 +1262,7 @@ int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold
1260 .kref = { ATOMIC_INIT(2) }, 1262 .kref = { ATOMIC_INIT(2) },
1261 }; 1263 };
1262 1264
1263 if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 1265 if (!get_ldev(device)) { /* put is in bm_aio_ctx_destroy() */
1264 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); 1266 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
1265 kfree(ctx); 1267 kfree(ctx);
1266 return -ENODEV; 1268 return -ENODEV;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1ef2474e8f11..c87bc8e8fd82 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -984,8 +984,8 @@ extern int drbd_bitmap_io(struct drbd_device *device,
984extern int drbd_bitmap_io_from_worker(struct drbd_device *device, 984extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
985 int (*io_fn)(struct drbd_device *), 985 int (*io_fn)(struct drbd_device *),
986 char *why, enum bm_flag flags); 986 char *why, enum bm_flag flags);
987extern int drbd_bmio_set_n_write(struct drbd_device *device); 987extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
988extern int drbd_bmio_clear_n_write(struct drbd_device *device); 988extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
989extern void drbd_ldev_destroy(struct drbd_device *device); 989extern void drbd_ldev_destroy(struct drbd_device *device);
990 990
991/* Meta data layout 991/* Meta data layout
@@ -1313,7 +1313,7 @@ enum determine_dev_size {
1313extern enum determine_dev_size 1313extern enum determine_dev_size
1314drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); 1314drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1315extern void resync_after_online_grow(struct drbd_device *); 1315extern void resync_after_online_grow(struct drbd_device *);
1316extern void drbd_reconsider_max_bio_size(struct drbd_device *device); 1316extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
1317extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, 1317extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1318 enum drbd_role new_role, 1318 enum drbd_role new_role,
1319 int force); 1319 int force);
@@ -1479,7 +1479,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
1479 generic_make_request(bio); 1479 generic_make_request(bio);
1480} 1480}
1481 1481
1482void drbd_bump_write_ordering(struct drbd_resource *resource, enum write_ordering_e wo); 1482void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1483 enum write_ordering_e wo);
1483 1484
1484/* drbd_proc.c */ 1485/* drbd_proc.c */
1485extern struct proc_dir_entry *drbd_proc; 1486extern struct proc_dir_entry *drbd_proc;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 17b9a237f2e6..a6af93528d57 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3466,23 +3466,19 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3466 * 3466 *
3467 * Sets all bits in the bitmap and writes the whole bitmap to stable storage. 3467 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3468 */ 3468 */
3469int drbd_bmio_set_n_write(struct drbd_device *device) 3469int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3470{ 3470{
3471 int rv = -EIO; 3471 int rv = -EIO;
3472 3472
3473 if (get_ldev_if_state(device, D_ATTACHING)) { 3473 drbd_md_set_flag(device, MDF_FULL_SYNC);
3474 drbd_md_set_flag(device, MDF_FULL_SYNC); 3474 drbd_md_sync(device);
3475 drbd_md_sync(device); 3475 drbd_bm_set_all(device);
3476 drbd_bm_set_all(device);
3477
3478 rv = drbd_bm_write(device);
3479 3476
3480 if (!rv) { 3477 rv = drbd_bm_write(device);
3481 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3482 drbd_md_sync(device);
3483 }
3484 3478
3485 put_ldev(device); 3479 if (!rv) {
3480 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3481 drbd_md_sync(device);
3486 } 3482 }
3487 3483
3488 return rv; 3484 return rv;
@@ -3494,18 +3490,11 @@ int drbd_bmio_set_n_write(struct drbd_device *device)
3494 * 3490 *
3495 * Clears all bits in the bitmap and writes the whole bitmap to stable storage. 3491 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3496 */ 3492 */
3497int drbd_bmio_clear_n_write(struct drbd_device *device) 3493int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3498{ 3494{
3499 int rv = -EIO;
3500
3501 drbd_resume_al(device); 3495 drbd_resume_al(device);
3502 if (get_ldev_if_state(device, D_ATTACHING)) { 3496 drbd_bm_clear_all(device);
3503 drbd_bm_clear_all(device); 3497 return drbd_bm_write(device);
3504 rv = drbd_bm_write(device);
3505 put_ldev(device);
3506 }
3507
3508 return rv;
3509} 3498}
3510 3499
3511static int w_bitmap_io(struct drbd_work *w, int unused) 3500static int w_bitmap_io(struct drbd_work *w, int unused)
@@ -3603,6 +3592,9 @@ static int w_go_diskless(struct drbd_work *w, int unused)
3603 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be 3592 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3604 * called from worker context. It MUST NOT be used while a previous such 3593 * called from worker context. It MUST NOT be used while a previous such
3605 * work is still pending! 3594 * work is still pending!
3595 *
3596 * Its worker function encloses the call of io_fn() by get_ldev() and
3597 * put_ldev().
3606 */ 3598 */
3607void drbd_queue_bitmap_io(struct drbd_device *device, 3599void drbd_queue_bitmap_io(struct drbd_device *device,
3608 int (*io_fn)(struct drbd_device *), 3600 int (*io_fn)(struct drbd_device *),
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 43fad2c1ba01..25f4b6f67c21 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1110,15 +1110,16 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1110 return 0; 1110 return 0;
1111} 1111}
1112 1112
1113static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size) 1113static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1114 unsigned int max_bio_size)
1114{ 1115{
1115 struct request_queue * const q = device->rq_queue; 1116 struct request_queue * const q = device->rq_queue;
1116 unsigned int max_hw_sectors = max_bio_size >> 9; 1117 unsigned int max_hw_sectors = max_bio_size >> 9;
1117 unsigned int max_segments = 0; 1118 unsigned int max_segments = 0;
1118 struct request_queue *b = NULL; 1119 struct request_queue *b = NULL;
1119 1120
1120 if (get_ldev_if_state(device, D_ATTACHING)) { 1121 if (bdev) {
1121 b = device->ldev->backing_bdev->bd_disk->queue; 1122 b = bdev->backing_bdev->bd_disk->queue;
1122 1123
1123 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 1124 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1124 rcu_read_lock(); 1125 rcu_read_lock();
@@ -1163,11 +1164,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_
1163 b->backing_dev_info.ra_pages); 1164 b->backing_dev_info.ra_pages);
1164 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 1165 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1165 } 1166 }
1166 put_ldev(device);
1167 } 1167 }
1168} 1168}
1169 1169
1170void drbd_reconsider_max_bio_size(struct drbd_device *device) 1170void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1171{ 1171{
1172 unsigned int now, new, local, peer; 1172 unsigned int now, new, local, peer;
1173 1173
@@ -1175,10 +1175,9 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1175 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ 1175 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1176 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ 1176 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1177 1177
1178 if (get_ldev_if_state(device, D_ATTACHING)) { 1178 if (bdev) {
1179 local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; 1179 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1180 device->local_max_bio_size = local; 1180 device->local_max_bio_size = local;
1181 put_ldev(device);
1182 } 1181 }
1183 local = min(local, DRBD_MAX_BIO_SIZE); 1182 local = min(local, DRBD_MAX_BIO_SIZE);
1184 1183
@@ -1211,7 +1210,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1211 if (new != now) 1210 if (new != now)
1212 drbd_info(device, "max BIO size = %u\n", new); 1211 drbd_info(device, "max BIO size = %u\n", new);
1213 1212
1214 drbd_setup_queue_param(device, new); 1213 drbd_setup_queue_param(device, bdev, new);
1215} 1214}
1216 1215
1217/* Starts the worker thread */ 1216/* Starts the worker thread */
@@ -1399,7 +1398,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1399 else 1398 else
1400 set_bit(MD_NO_FUA, &device->flags); 1399 set_bit(MD_NO_FUA, &device->flags);
1401 1400
1402 drbd_bump_write_ordering(device->resource, WO_bdev_flush); 1401 drbd_bump_write_ordering(device->resource, NULL, WO_bdev_flush);
1403 1402
1404 drbd_md_sync(device); 1403 drbd_md_sync(device);
1405 1404
@@ -1704,7 +1703,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1704 new_disk_conf = NULL; 1703 new_disk_conf = NULL;
1705 new_plan = NULL; 1704 new_plan = NULL;
1706 1705
1707 drbd_bump_write_ordering(device->resource, WO_bdev_flush); 1706 drbd_bump_write_ordering(device->resource, device->ldev, WO_bdev_flush);
1708 1707
1709 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) 1708 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1710 set_bit(CRASHED_PRIMARY, &device->flags); 1709 set_bit(CRASHED_PRIMARY, &device->flags);
@@ -1720,7 +1719,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1720 device->read_cnt = 0; 1719 device->read_cnt = 0;
1721 device->writ_cnt = 0; 1720 device->writ_cnt = 0;
1722 1721
1723 drbd_reconsider_max_bio_size(device); 1722 drbd_reconsider_max_bio_size(device, device->ldev);
1724 1723
1725 /* If I am currently not R_PRIMARY, 1724 /* If I am currently not R_PRIMARY,
1726 * but meta data primary indicator is set, 1725 * but meta data primary indicator is set,
@@ -2648,8 +2647,13 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2648 if (retcode != NO_ERROR) 2647 if (retcode != NO_ERROR)
2649 goto out; 2648 goto out;
2650 2649
2651 mutex_lock(&adm_ctx.resource->adm_mutex);
2652 device = adm_ctx.device; 2650 device = adm_ctx.device;
2651 if (!get_ldev(device)) {
2652 retcode = ERR_NO_DISK;
2653 goto out;
2654 }
2655
2656 mutex_lock(&adm_ctx.resource->adm_mutex);
2653 2657
2654 /* If there is still bitmap IO pending, probably because of a previous 2658 /* If there is still bitmap IO pending, probably because of a previous
2655 * resync just being finished, wait for it before requesting a new resync. 2659 * resync just being finished, wait for it before requesting a new resync.
@@ -2673,6 +2677,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2673 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); 2677 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2674 drbd_resume_io(device); 2678 drbd_resume_io(device);
2675 mutex_unlock(&adm_ctx.resource->adm_mutex); 2679 mutex_unlock(&adm_ctx.resource->adm_mutex);
2680 put_ldev(device);
2676out: 2681out:
2677 drbd_adm_finish(&adm_ctx, info, retcode); 2682 drbd_adm_finish(&adm_ctx, info, retcode);
2678 return 0; 2683 return 0;
@@ -2698,7 +2703,7 @@ out:
2698 return 0; 2703 return 0;
2699} 2704}
2700 2705
2701static int drbd_bmio_set_susp_al(struct drbd_device *device) 2706static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
2702{ 2707{
2703 int rv; 2708 int rv;
2704 2709
@@ -2719,8 +2724,13 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2719 if (retcode != NO_ERROR) 2724 if (retcode != NO_ERROR)
2720 goto out; 2725 goto out;
2721 2726
2722 mutex_lock(&adm_ctx.resource->adm_mutex);
2723 device = adm_ctx.device; 2727 device = adm_ctx.device;
2728 if (!get_ldev(device)) {
2729 retcode = ERR_NO_DISK;
2730 goto out;
2731 }
2732
2733 mutex_lock(&adm_ctx.resource->adm_mutex);
2724 2734
2725 /* If there is still bitmap IO pending, probably because of a previous 2735 /* If there is still bitmap IO pending, probably because of a previous
2726 * resync just being finished, wait for it before requesting a new resync. 2736 * resync just being finished, wait for it before requesting a new resync.
@@ -2747,6 +2757,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2747 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); 2757 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2748 drbd_resume_io(device); 2758 drbd_resume_io(device);
2749 mutex_unlock(&adm_ctx.resource->adm_mutex); 2759 mutex_unlock(&adm_ctx.resource->adm_mutex);
2760 put_ldev(device);
2750out: 2761out:
2751 drbd_adm_finish(&adm_ctx, info, retcode); 2762 drbd_adm_finish(&adm_ctx, info, retcode);
2752 return 0; 2763 return 0;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7084188c2ae..be0c3761cdc6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1168,7 +1168,7 @@ static void drbd_flush(struct drbd_connection *connection)
1168 /* would rather check on EOPNOTSUPP, but that is not reliable. 1168 /* would rather check on EOPNOTSUPP, but that is not reliable.
1169 * don't try again for ANY return value != 0 1169 * don't try again for ANY return value != 0
1170 * if (rv == -EOPNOTSUPP) */ 1170 * if (rv == -EOPNOTSUPP) */
1171 drbd_bump_write_ordering(connection->resource, WO_drain_io); 1171 drbd_bump_write_ordering(connection->resource, NULL, WO_drain_io);
1172 } 1172 }
1173 put_ldev(device); 1173 put_ldev(device);
1174 kref_put(&device->kref, drbd_destroy_device); 1174 kref_put(&device->kref, drbd_destroy_device);
@@ -1257,14 +1257,29 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
1257 return rv; 1257 return rv;
1258} 1258}
1259 1259
1260static enum write_ordering_e
1261max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1262{
1263 struct disk_conf *dc;
1264
1265 dc = rcu_dereference(bdev->disk_conf);
1266
1267 if (wo == WO_bdev_flush && !dc->disk_flushes)
1268 wo = WO_drain_io;
1269 if (wo == WO_drain_io && !dc->disk_drain)
1270 wo = WO_none;
1271
1272 return wo;
1273}
1274
1260/** 1275/**
1261 * drbd_bump_write_ordering() - Fall back to an other write ordering method 1276 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1262 * @connection: DRBD connection. 1277 * @connection: DRBD connection.
1263 * @wo: Write ordering method to try. 1278 * @wo: Write ordering method to try.
1264 */ 1279 */
1265void drbd_bump_write_ordering(struct drbd_resource *resource, enum write_ordering_e wo) 1280void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1281 enum write_ordering_e wo)
1266{ 1282{
1267 struct disk_conf *dc;
1268 struct drbd_device *device; 1283 struct drbd_device *device;
1269 enum write_ordering_e pwo; 1284 enum write_ordering_e pwo;
1270 int vnr; 1285 int vnr;
@@ -1278,17 +1293,18 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, enum write_orderin
1278 wo = min(pwo, wo); 1293 wo = min(pwo, wo);
1279 rcu_read_lock(); 1294 rcu_read_lock();
1280 idr_for_each_entry(&resource->devices, device, vnr) { 1295 idr_for_each_entry(&resource->devices, device, vnr) {
1281 if (!get_ldev_if_state(device, D_ATTACHING)) 1296 if (get_ldev(device)) {
1282 continue; 1297 wo = max_allowed_wo(device->ldev, wo);
1283 dc = rcu_dereference(device->ldev->disk_conf); 1298 if (device->ldev == bdev)
1284 1299 bdev = NULL;
1285 if (wo == WO_bdev_flush && !dc->disk_flushes) 1300 put_ldev(device);
1286 wo = WO_drain_io; 1301 }
1287 if (wo == WO_drain_io && !dc->disk_drain)
1288 wo = WO_none;
1289 put_ldev(device);
1290 } 1302 }
1291 rcu_read_unlock(); 1303 rcu_read_unlock();
1304
1305 if (bdev)
1306 wo = max_allowed_wo(bdev, wo);
1307
1292 resource->write_ordering = wo; 1308 resource->write_ordering = wo;
1293 if (pwo != resource->write_ordering || wo == WO_bdev_flush) 1309 if (pwo != resource->write_ordering || wo == WO_bdev_flush)
1294 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); 1310 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
@@ -3709,7 +3725,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3709 } 3725 }
3710 3726
3711 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 3727 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3712 drbd_reconsider_max_bio_size(device);
3713 /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). 3728 /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size().
3714 In case we cleared the QUEUE_FLAG_DISCARD from our queue in 3729 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
3715 drbd_reconsider_max_bio_size(), we can be sure that after 3730 drbd_reconsider_max_bio_size(), we can be sure that after
@@ -3717,6 +3732,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3717 3732
3718 ddsf = be16_to_cpu(p->dds_flags); 3733 ddsf = be16_to_cpu(p->dds_flags);
3719 if (get_ldev(device)) { 3734 if (get_ldev(device)) {
3735 drbd_reconsider_max_bio_size(device, device->ldev);
3720 dd = drbd_determine_dev_size(device, ddsf, NULL); 3736 dd = drbd_determine_dev_size(device, ddsf, NULL);
3721 put_ldev(device); 3737 put_ldev(device);
3722 if (dd == DS_ERROR) 3738 if (dd == DS_ERROR)
@@ -3724,6 +3740,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3724 drbd_md_sync(device); 3740 drbd_md_sync(device);
3725 } else { 3741 } else {
3726 /* I am diskless, need to accept the peer's size. */ 3742 /* I am diskless, need to accept the peer's size. */
3743 drbd_reconsider_max_bio_size(device, NULL);
3727 drbd_set_my_capacity(device, p_size); 3744 drbd_set_my_capacity(device, p_size);
3728 } 3745 }
3729 3746