summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2016-06-13 18:26:20 -0400
committerJens Axboe <axboe@fb.com>2016-06-13 23:43:05 -0400
commitdd4f699da674010c58d7a2534215b4ca1ff13b13 (patch)
tree1886d16dad1ae7100f2842238d74e9a548ad8f51
parentf9ff0da564376d24d76c73d8a83b9bd7b723358f (diff)
drbd: when receiving P_TRIM, zero-out partial unaligned chunks
We can avoid spurious data divergence caused by partially-ignored discards on certain backends with discard_zeroes_data=0, if we translate partial unaligned discard requests into explicit zero-out. The relevant use case is LVM/DM thin. If on different nodes, DRBD is backed by devices with differing discard characteristics, discards may lead to data divergence (old data or garbage left over on one backend, zeroes due to unmapped areas on the other backend). Online verify would now potentially report tons of spurious differences. While probably harmless for most use cases (fstrim on a file system), DRBD cannot have that, it would violate our promise to upper layers that our data instances on the nodes are identical. To be correct and play safe (make sure data is identical on both copies), we would have to disable discard support, if our local backend (on a Primary) does not support "discard_zeroes_data=true". We'd also have to translate discards to explicit zero-out on the receiving (typically: Secondary) side, unless the receiving side supports "discard_zeroes_data=true". Which both would allocate those blocks, instead of unmapping them, in contrast with expectations. LVM/DM thin does set discard_zeroes_data=0, because it silently ignores discards to partial chunks. We can work around this by checking the alignment first. For unaligned (wrt. alignment and granularity) or too small discards, we zero-out the initial (and/or) trailing unaligned partial chunks, but discard all the aligned full chunks. At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1". Arguably it should behave this way internally, by default, and we'll try to make that happen. But our workaround is still valid for already deployed setups, and for other devices that may behave this way. Setting discard-zeroes-if-aligned=yes will allow DRBD to use discards, and to announce discard_zeroes_data=true, even on backends that announce discard_zeroes_data=false. Setting discard-zeroes-if-aligned=no will cause DRBD to always fall-back to zero-out on the receiving side, and to not even announce discard capabilities on the Primary, if the respective backend announces discard_zeroes_data=false. We used to ignore the discard_zeroes_data setting completely. To not break established and expected behaviour, and suddenly cause fstrim on thin-provisioned LVs to run out-of-space, instead of freeing up space, the default value is "yes". Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_nl.c15
-rw-r--r--drivers/block/drbd/drbd_receiver.c140
-rw-r--r--include/linux/drbd_genl.h1
-rw-r--r--include/linux/drbd_limits.h6
5 files changed, 134 insertions, 30 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9e338ecca7dd..f49ff861bfd8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1488,7 +1488,7 @@ enum determine_dev_size {
1488extern enum determine_dev_size 1488extern enum determine_dev_size
1489drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); 1489drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1490extern void resync_after_online_grow(struct drbd_device *); 1490extern void resync_after_online_grow(struct drbd_device *);
1491extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev); 1491extern void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev);
1492extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, 1492extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1493 enum drbd_role new_role, 1493 enum drbd_role new_role,
1494 int force); 1494 int force);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 3643f9ca5a38..8d757d6f21e7 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1161,13 +1161,17 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1161 unsigned int max_hw_sectors = max_bio_size >> 9; 1161 unsigned int max_hw_sectors = max_bio_size >> 9;
1162 unsigned int max_segments = 0; 1162 unsigned int max_segments = 0;
1163 struct request_queue *b = NULL; 1163 struct request_queue *b = NULL;
1164 struct disk_conf *dc;
1165 bool discard_zeroes_if_aligned = true;
1164 1166
1165 if (bdev) { 1167 if (bdev) {
1166 b = bdev->backing_bdev->bd_disk->queue; 1168 b = bdev->backing_bdev->bd_disk->queue;
1167 1169
1168 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 1170 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1169 rcu_read_lock(); 1171 rcu_read_lock();
1170 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; 1172 dc = rcu_dereference(device->ldev->disk_conf);
1173 max_segments = dc->max_bio_bvecs;
1174 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1171 rcu_read_unlock(); 1175 rcu_read_unlock();
1172 1176
1173 blk_set_stacking_limits(&q->limits); 1177 blk_set_stacking_limits(&q->limits);
@@ -1185,7 +1189,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1185 1189
1186 blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS); 1190 blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
1187 1191
1188 if (blk_queue_discard(b) && 1192 if (blk_queue_discard(b) && (b->limits.discard_zeroes_data || discard_zeroes_if_aligned) &&
1189 (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { 1193 (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
1190 /* We don't care, stacking below should fix it for the local device. 1194 /* We don't care, stacking below should fix it for the local device.
1191 * Whether or not it is a suitable granularity on the remote device 1195 * Whether or not it is a suitable granularity on the remote device
@@ -1216,7 +1220,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1216 } 1220 }
1217} 1221}
1218 1222
1219void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) 1223void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev)
1220{ 1224{
1221 unsigned int now, new, local, peer; 1225 unsigned int now, new, local, peer;
1222 1226
@@ -1488,6 +1492,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1488 if (write_ordering_changed(old_disk_conf, new_disk_conf)) 1492 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1489 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH); 1493 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1490 1494
1495 if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
1496 drbd_reconsider_queue_parameters(device, device->ldev);
1497
1491 drbd_md_sync(device); 1498 drbd_md_sync(device);
1492 1499
1493 if (device->state.conn >= C_CONNECTED) { 1500 if (device->state.conn >= C_CONNECTED) {
@@ -1866,7 +1873,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1866 device->read_cnt = 0; 1873 device->read_cnt = 0;
1867 device->writ_cnt = 0; 1874 device->writ_cnt = 0;
1868 1875
1869 drbd_reconsider_max_bio_size(device, device->ldev); 1876 drbd_reconsider_queue_parameters(device, device->ldev);
1870 1877
1871 /* If I am currently not R_PRIMARY, 1878 /* If I am currently not R_PRIMARY,
1872 * but meta data primary indicator is set, 1879 * but meta data primary indicator is set,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 15b2a0dab0e5..cb80fb406361 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1443,6 +1443,108 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
1443 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); 1443 drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
1444} 1444}
1445 1445
1446/*
1447 * We *may* ignore the discard-zeroes-data setting, if so configured.
1448 *
1449 * Assumption is that it "discard_zeroes_data=0" is only because the backend
1450 * may ignore partial unaligned discards.
1451 *
1452 * LVM/DM thin as of at least
1453 * LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
1454 * Library version: 1.02.93-RHEL7 (2015-01-28)
1455 * Driver version: 4.29.0
1456 * still behaves this way.
1457 *
1458 * For unaligned (wrt. alignment and granularity) or too small discards,
1459 * we zero-out the initial (and/or) trailing unaligned partial chunks,
1460 * but discard all the aligned full chunks.
1461 *
1462 * At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1".
1463 */
1464int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard)
1465{
1466 struct block_device *bdev = device->ldev->backing_bdev;
1467 struct request_queue *q = bdev_get_queue(bdev);
1468 sector_t tmp, nr;
1469 unsigned int max_discard_sectors, granularity;
1470 int alignment;
1471 int err = 0;
1472
1473 if (!discard)
1474 goto zero_out;
1475
1476 /* Zero-sector (unknown) and one-sector granularities are the same. */
1477 granularity = max(q->limits.discard_granularity >> 9, 1U);
1478 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
1479
1480 max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
1481 max_discard_sectors -= max_discard_sectors % granularity;
1482 if (unlikely(!max_discard_sectors))
1483 goto zero_out;
1484
1485 if (nr_sectors < granularity)
1486 goto zero_out;
1487
1488 tmp = start;
1489 if (sector_div(tmp, granularity) != alignment) {
1490 if (nr_sectors < 2*granularity)
1491 goto zero_out;
1492 /* start + gran - (start + gran - align) % gran */
1493 tmp = start + granularity - alignment;
1494 tmp = start + granularity - sector_div(tmp, granularity);
1495
1496 nr = tmp - start;
1497 err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
1498 nr_sectors -= nr;
1499 start = tmp;
1500 }
1501 while (nr_sectors >= granularity) {
1502 nr = min_t(sector_t, nr_sectors, max_discard_sectors);
1503 err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
1504 nr_sectors -= nr;
1505 start += nr;
1506 }
1507 zero_out:
1508 if (nr_sectors) {
1509 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0);
1510 }
1511 return err != 0;
1512}
1513
1514static bool can_do_reliable_discards(struct drbd_device *device)
1515{
1516 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
1517 struct disk_conf *dc;
1518 bool can_do;
1519
1520 if (!blk_queue_discard(q))
1521 return false;
1522
1523 if (q->limits.discard_zeroes_data)
1524 return true;
1525
1526 rcu_read_lock();
1527 dc = rcu_dereference(device->ldev->disk_conf);
1528 can_do = dc->discard_zeroes_if_aligned;
1529 rcu_read_unlock();
1530 return can_do;
1531}
1532
1533void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
1534{
1535 /* If the backend cannot discard, or does not guarantee
1536 * read-back zeroes in discarded ranges, we fall back to
1537 * zero-out. Unless configuration specifically requested
1538 * otherwise. */
1539 if (!can_do_reliable_discards(device))
1540 peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
1541
1542 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1543 peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT)))
1544 peer_req->flags |= EE_WAS_ERROR;
1545 drbd_endio_write_sec_final(peer_req);
1546}
1547
1446/** 1548/**
1447 * drbd_submit_peer_request() 1549 * drbd_submit_peer_request()
1448 * @device: DRBD device. 1550 * @device: DRBD device.
@@ -1474,7 +1576,13 @@ int drbd_submit_peer_request(struct drbd_device *device,
1474 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 1576 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
1475 int err = -ENOMEM; 1577 int err = -ENOMEM;
1476 1578
1477 if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) { 1579 /* TRIM/DISCARD: for now, always use the helper function
1580 * blkdev_issue_zeroout(..., discard=true).
1581 * It's synchronous, but it does the right thing wrt. bio splitting.
1582 * Correctness first, performance later. Next step is to code an
1583 * asynchronous variant of the same.
1584 */
1585 if (peer_req->flags & EE_IS_TRIM) {
1478 /* wait for all pending IO completions, before we start 1586 /* wait for all pending IO completions, before we start
1479 * zeroing things out. */ 1587 * zeroing things out. */
1480 conn_wait_active_ee_empty(peer_req->peer_device->connection); 1588 conn_wait_active_ee_empty(peer_req->peer_device->connection);
@@ -1491,19 +1599,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
1491 spin_unlock_irq(&device->resource->req_lock); 1599 spin_unlock_irq(&device->resource->req_lock);
1492 } 1600 }
1493 1601
1494 if (blkdev_issue_zeroout(device->ldev->backing_bdev, 1602 drbd_issue_peer_discard(device, peer_req);
1495 sector, data_size >> 9, GFP_NOIO, false))
1496 peer_req->flags |= EE_WAS_ERROR;
1497 drbd_endio_write_sec_final(peer_req);
1498 return 0; 1603 return 0;
1499 } 1604 }
1500 1605
1501 /* Discards don't have any payload.
1502 * But the scsi layer still expects a bio_vec it can use internally,
1503 * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
1504 if (peer_req->flags & EE_IS_TRIM)
1505 nr_pages = 1;
1506
1507 /* In most cases, we will only need one bio. But in case the lower 1606 /* In most cases, we will only need one bio. But in case the lower
1508 * level restrictions happen to be different at this offset on this 1607 * level restrictions happen to be different at this offset on this
1509 * side than those of the sending peer, we may need to submit the 1608 * side than those of the sending peer, we may need to submit the
@@ -1529,11 +1628,6 @@ next_bio:
1529 bios = bio; 1628 bios = bio;
1530 ++n_bios; 1629 ++n_bios;
1531 1630
1532 if (op == REQ_OP_DISCARD) {
1533 bio->bi_iter.bi_size = data_size;
1534 goto submit;
1535 }
1536
1537 page_chain_for_each(page) { 1631 page_chain_for_each(page) {
1538 unsigned len = min_t(unsigned, data_size, PAGE_SIZE); 1632 unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
1539 if (!bio_add_page(bio, page, len, 0)) { 1633 if (!bio_add_page(bio, page, len, 0)) {
@@ -1555,7 +1649,6 @@ next_bio:
1555 --nr_pages; 1649 --nr_pages;
1556 } 1650 }
1557 D_ASSERT(device, data_size == 0); 1651 D_ASSERT(device, data_size == 0);
1558submit:
1559 D_ASSERT(device, page == NULL); 1652 D_ASSERT(device, page == NULL);
1560 1653
1561 atomic_set(&peer_req->pending_bios, n_bios); 1654 atomic_set(&peer_req->pending_bios, n_bios);
@@ -2424,10 +2517,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2424 op = wire_flags_to_bio_op(dp_flags); 2517 op = wire_flags_to_bio_op(dp_flags);
2425 op_flags = wire_flags_to_bio_flags(dp_flags); 2518 op_flags = wire_flags_to_bio_flags(dp_flags);
2426 if (pi->cmd == P_TRIM) { 2519 if (pi->cmd == P_TRIM) {
2427 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
2428 peer_req->flags |= EE_IS_TRIM; 2520 peer_req->flags |= EE_IS_TRIM;
2429 if (!blk_queue_discard(q))
2430 peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
2431 D_ASSERT(peer_device, peer_req->i.size > 0); 2521 D_ASSERT(peer_device, peer_req->i.size > 0);
2432 D_ASSERT(peer_device, op == REQ_OP_DISCARD); 2522 D_ASSERT(peer_device, op == REQ_OP_DISCARD);
2433 D_ASSERT(peer_device, peer_req->pages == NULL); 2523 D_ASSERT(peer_device, peer_req->pages == NULL);
@@ -2498,7 +2588,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2498 * and we wait for all pending requests, respectively wait for 2588 * and we wait for all pending requests, respectively wait for
2499 * active_ee to become empty in drbd_submit_peer_request(); 2589 * active_ee to become empty in drbd_submit_peer_request();
2500 * better not add ourselves here. */ 2590 * better not add ourselves here. */
2501 if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0) 2591 if ((peer_req->flags & EE_IS_TRIM) == 0)
2502 list_add_tail(&peer_req->w.list, &device->active_ee); 2592 list_add_tail(&peer_req->w.list, &device->active_ee);
2503 spin_unlock_irq(&device->resource->req_lock); 2593 spin_unlock_irq(&device->resource->req_lock);
2504 2594
@@ -3916,14 +4006,14 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3916 } 4006 }
3917 4007
3918 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 4008 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3919 /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). 4009 /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
3920 In case we cleared the QUEUE_FLAG_DISCARD from our queue in 4010 In case we cleared the QUEUE_FLAG_DISCARD from our queue in
3921 drbd_reconsider_max_bio_size(), we can be sure that after 4011 drbd_reconsider_queue_parameters(), we can be sure that after
3922 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ 4012 drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
3923 4013
3924 ddsf = be16_to_cpu(p->dds_flags); 4014 ddsf = be16_to_cpu(p->dds_flags);
3925 if (get_ldev(device)) { 4015 if (get_ldev(device)) {
3926 drbd_reconsider_max_bio_size(device, device->ldev); 4016 drbd_reconsider_queue_parameters(device, device->ldev);
3927 dd = drbd_determine_dev_size(device, ddsf, NULL); 4017 dd = drbd_determine_dev_size(device, ddsf, NULL);
3928 put_ldev(device); 4018 put_ldev(device);
3929 if (dd == DS_ERROR) 4019 if (dd == DS_ERROR)
@@ -3943,7 +4033,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3943 * However, if he sends a zero current size, 4033 * However, if he sends a zero current size,
3944 * take his (user-capped or) backing disk size anyways. 4034 * take his (user-capped or) backing disk size anyways.
3945 */ 4035 */
3946 drbd_reconsider_max_bio_size(device, NULL); 4036 drbd_reconsider_queue_parameters(device, NULL);
3947 drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); 4037 drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
3948 } 4038 }
3949 4039
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index ab649d8e9ed2..c934d3a96b5e 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -132,6 +132,7 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
132 __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) 132 __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
133 __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) 133 __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
134 __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) 134 __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
135 __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
135) 136)
136 137
137GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts, 138GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 2e4aad8d8ae8..a351c40087cd 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -210,6 +210,12 @@
210#define DRBD_MD_FLUSHES_DEF 1 210#define DRBD_MD_FLUSHES_DEF 1
211#define DRBD_TCP_CORK_DEF 1 211#define DRBD_TCP_CORK_DEF 1
212#define DRBD_AL_UPDATES_DEF 1 212#define DRBD_AL_UPDATES_DEF 1
213/* We used to ignore the discard_zeroes_data setting.
214 * To not change established (and expected) behaviour,
215 * by default assume that, for discard_zeroes_data=0,
216 * we can make that an effective discard_zeroes_data=1,
217 * if we only explicitly zero-out unaligned partial chunks. */
218#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
213 219
214#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 220#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
215#define DRBD_ALWAYS_ASBP_DEF 0 221#define DRBD_ALWAYS_ASBP_DEF 0