aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_nl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_nl.c')
-rw-r--r--drivers/block/drbd/drbd_nl.c110
1 files changed, 66 insertions, 44 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 3f2e16738080..1cd47df44bda 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -23,6 +23,8 @@
23 23
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/module.h> 28#include <linux/module.h>
27#include <linux/drbd.h> 29#include <linux/drbd.h>
28#include <linux/in.h> 30#include <linux/in.h>
@@ -85,7 +87,7 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
85{ 87{
86 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); 88 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
87 if (genlmsg_reply(skb, info)) 89 if (genlmsg_reply(skb, info))
88 printk(KERN_ERR "drbd: error sending genl reply\n"); 90 pr_err("error sending genl reply\n");
89} 91}
90 92
91/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only 93/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
@@ -558,8 +560,10 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
558} 560}
559 561
560enum drbd_state_rv 562enum drbd_state_rv
561drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) 563drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
562{ 564{
565 struct drbd_peer_device *const peer_device = first_peer_device(device);
566 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
563 const int max_tries = 4; 567 const int max_tries = 4;
564 enum drbd_state_rv rv = SS_UNKNOWN_ERROR; 568 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
565 struct net_conf *nc; 569 struct net_conf *nc;
@@ -607,7 +611,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
607 device->state.disk == D_CONSISTENT && mask.pdsk == 0) { 611 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
608 D_ASSERT(device, device->state.pdsk == D_UNKNOWN); 612 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
609 613
610 if (conn_try_outdate_peer(first_peer_device(device)->connection)) { 614 if (conn_try_outdate_peer(connection)) {
611 val.disk = D_UP_TO_DATE; 615 val.disk = D_UP_TO_DATE;
612 mask.disk = D_MASK; 616 mask.disk = D_MASK;
613 } 617 }
@@ -617,7 +621,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
617 if (rv == SS_NOTHING_TO_DO) 621 if (rv == SS_NOTHING_TO_DO)
618 goto out; 622 goto out;
619 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 623 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
620 if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) { 624 if (!conn_try_outdate_peer(connection) && force) {
621 drbd_warn(device, "Forced into split brain situation!\n"); 625 drbd_warn(device, "Forced into split brain situation!\n");
622 mask.pdsk = D_MASK; 626 mask.pdsk = D_MASK;
623 val.pdsk = D_OUTDATED; 627 val.pdsk = D_OUTDATED;
@@ -630,7 +634,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
630 retry at most once more in this case. */ 634 retry at most once more in this case. */
631 int timeo; 635 int timeo;
632 rcu_read_lock(); 636 rcu_read_lock();
633 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 637 nc = rcu_dereference(connection->net_conf);
634 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; 638 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
635 rcu_read_unlock(); 639 rcu_read_unlock();
636 schedule_timeout_interruptible(timeo); 640 schedule_timeout_interruptible(timeo);
@@ -659,19 +663,17 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
659 /* FIXME also wait for all pending P_BARRIER_ACK? */ 663 /* FIXME also wait for all pending P_BARRIER_ACK? */
660 664
661 if (new_role == R_SECONDARY) { 665 if (new_role == R_SECONDARY) {
662 set_disk_ro(device->vdisk, true);
663 if (get_ldev(device)) { 666 if (get_ldev(device)) {
664 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 667 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
665 put_ldev(device); 668 put_ldev(device);
666 } 669 }
667 } else { 670 } else {
668 /* Called from drbd_adm_set_role only. 671 mutex_lock(&device->resource->conf_update);
669 * We are still holding the conf_update mutex. */ 672 nc = connection->net_conf;
670 nc = first_peer_device(device)->connection->net_conf;
671 if (nc) 673 if (nc)
672 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 674 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
675 mutex_unlock(&device->resource->conf_update);
673 676
674 set_disk_ro(device->vdisk, false);
675 if (get_ldev(device)) { 677 if (get_ldev(device)) {
676 if (((device->state.conn < C_CONNECTED || 678 if (((device->state.conn < C_CONNECTED ||
677 device->state.pdsk <= D_FAILED) 679 device->state.pdsk <= D_FAILED)
@@ -689,12 +691,12 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
689 if (device->state.conn >= C_WF_REPORT_PARAMS) { 691 if (device->state.conn >= C_WF_REPORT_PARAMS) {
690 /* if this was forced, we should consider sync */ 692 /* if this was forced, we should consider sync */
691 if (forced) 693 if (forced)
692 drbd_send_uuids(first_peer_device(device)); 694 drbd_send_uuids(peer_device);
693 drbd_send_current_state(first_peer_device(device)); 695 drbd_send_current_state(peer_device);
694 } 696 }
695 697
696 drbd_md_sync(device); 698 drbd_md_sync(device);
697 699 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
698 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 700 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
699out: 701out:
700 mutex_unlock(device->state_mutex); 702 mutex_unlock(device->state_mutex);
@@ -891,7 +893,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
891 * still lock the act_log to not trigger ASSERTs there. 893 * still lock the act_log to not trigger ASSERTs there.
892 */ 894 */
893 drbd_suspend_io(device); 895 drbd_suspend_io(device);
894 buffer = drbd_md_get_buffer(device); /* Lock meta-data IO */ 896 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
895 if (!buffer) { 897 if (!buffer) {
896 drbd_resume_io(device); 898 drbd_resume_io(device);
897 return DS_ERROR; 899 return DS_ERROR;
@@ -971,6 +973,10 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
971 if (la_size_changed || md_moved || rs) { 973 if (la_size_changed || md_moved || rs) {
972 u32 prev_flags; 974 u32 prev_flags;
973 975
976 /* We do some synchronous IO below, which may take some time.
977 * Clear the timer, to avoid scary "timer expired!" messages,
978 * "Superblock" is written out at least twice below, anyways. */
979 del_timer(&device->md_sync_timer);
974 drbd_al_shrink(device); /* All extents inactive. */ 980 drbd_al_shrink(device); /* All extents inactive. */
975 981
976 prev_flags = md->flags; 982 prev_flags = md->flags;
@@ -1116,15 +1122,16 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1116 return 0; 1122 return 0;
1117} 1123}
1118 1124
1119static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size) 1125static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1126 unsigned int max_bio_size)
1120{ 1127{
1121 struct request_queue * const q = device->rq_queue; 1128 struct request_queue * const q = device->rq_queue;
1122 unsigned int max_hw_sectors = max_bio_size >> 9; 1129 unsigned int max_hw_sectors = max_bio_size >> 9;
1123 unsigned int max_segments = 0; 1130 unsigned int max_segments = 0;
1124 struct request_queue *b = NULL; 1131 struct request_queue *b = NULL;
1125 1132
1126 if (get_ldev_if_state(device, D_ATTACHING)) { 1133 if (bdev) {
1127 b = device->ldev->backing_bdev->bd_disk->queue; 1134 b = bdev->backing_bdev->bd_disk->queue;
1128 1135
1129 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 1136 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1130 rcu_read_lock(); 1137 rcu_read_lock();
@@ -1169,11 +1176,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_
1169 b->backing_dev_info.ra_pages); 1176 b->backing_dev_info.ra_pages);
1170 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 1177 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1171 } 1178 }
1172 put_ldev(device);
1173 } 1179 }
1174} 1180}
1175 1181
1176void drbd_reconsider_max_bio_size(struct drbd_device *device) 1182void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1177{ 1183{
1178 unsigned int now, new, local, peer; 1184 unsigned int now, new, local, peer;
1179 1185
@@ -1181,10 +1187,9 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1181 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ 1187 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1182 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ 1188 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1183 1189
1184 if (get_ldev_if_state(device, D_ATTACHING)) { 1190 if (bdev) {
1185 local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; 1191 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1186 device->local_max_bio_size = local; 1192 device->local_max_bio_size = local;
1187 put_ldev(device);
1188 } 1193 }
1189 local = min(local, DRBD_MAX_BIO_SIZE); 1194 local = min(local, DRBD_MAX_BIO_SIZE);
1190 1195
@@ -1217,7 +1222,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1217 if (new != now) 1222 if (new != now)
1218 drbd_info(device, "max BIO size = %u\n", new); 1223 drbd_info(device, "max BIO size = %u\n", new);
1219 1224
1220 drbd_setup_queue_param(device, new); 1225 drbd_setup_queue_param(device, bdev, new);
1221} 1226}
1222 1227
1223/* Starts the worker thread */ 1228/* Starts the worker thread */
@@ -1299,6 +1304,13 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1299 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION; 1304 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1300} 1305}
1301 1306
1307static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1308{
1309 return a->disk_barrier != b->disk_barrier ||
1310 a->disk_flushes != b->disk_flushes ||
1311 a->disk_drain != b->disk_drain;
1312}
1313
1302int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) 1314int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1303{ 1315{
1304 struct drbd_config_context adm_ctx; 1316 struct drbd_config_context adm_ctx;
@@ -1405,7 +1417,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1405 else 1417 else
1406 set_bit(MD_NO_FUA, &device->flags); 1418 set_bit(MD_NO_FUA, &device->flags);
1407 1419
1408 drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush); 1420 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1421 drbd_bump_write_ordering(device->resource, NULL, WO_bdev_flush);
1409 1422
1410 drbd_md_sync(device); 1423 drbd_md_sync(device);
1411 1424
@@ -1440,6 +1453,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1440{ 1453{
1441 struct drbd_config_context adm_ctx; 1454 struct drbd_config_context adm_ctx;
1442 struct drbd_device *device; 1455 struct drbd_device *device;
1456 struct drbd_peer_device *peer_device;
1457 struct drbd_connection *connection;
1443 int err; 1458 int err;
1444 enum drbd_ret_code retcode; 1459 enum drbd_ret_code retcode;
1445 enum determine_dev_size dd; 1460 enum determine_dev_size dd;
@@ -1462,7 +1477,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1462 1477
1463 device = adm_ctx.device; 1478 device = adm_ctx.device;
1464 mutex_lock(&adm_ctx.resource->adm_mutex); 1479 mutex_lock(&adm_ctx.resource->adm_mutex);
1465 conn_reconfig_start(first_peer_device(device)->connection); 1480 peer_device = first_peer_device(device);
1481 connection = peer_device ? peer_device->connection : NULL;
1482 conn_reconfig_start(connection);
1466 1483
1467 /* if you want to reconfigure, please tear down first */ 1484 /* if you want to reconfigure, please tear down first */
1468 if (device->state.disk > D_DISKLESS) { 1485 if (device->state.disk > D_DISKLESS) {
@@ -1473,7 +1490,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1473 * drbd_ldev_destroy is done already, we may end up here very fast, 1490 * drbd_ldev_destroy is done already, we may end up here very fast,
1474 * e.g. if someone calls attach from the on-io-error handler, 1491 * e.g. if someone calls attach from the on-io-error handler,
1475 * to realize a "hot spare" feature (not that I'd recommend that) */ 1492 * to realize a "hot spare" feature (not that I'd recommend that) */
1476 wait_event(device->misc_wait, !atomic_read(&device->local_cnt)); 1493 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1477 1494
1478 /* make sure there is no leftover from previous force-detach attempts */ 1495 /* make sure there is no leftover from previous force-detach attempts */
1479 clear_bit(FORCE_DETACH, &device->flags); 1496 clear_bit(FORCE_DETACH, &device->flags);
@@ -1529,7 +1546,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1529 goto fail; 1546 goto fail;
1530 1547
1531 rcu_read_lock(); 1548 rcu_read_lock();
1532 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 1549 nc = rcu_dereference(connection->net_conf);
1533 if (nc) { 1550 if (nc) {
1534 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { 1551 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1535 rcu_read_unlock(); 1552 rcu_read_unlock();
@@ -1649,7 +1666,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1649 */ 1666 */
1650 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); 1667 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1651 /* and for any other previously queued work */ 1668 /* and for any other previously queued work */
1652 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); 1669 drbd_flush_workqueue(&connection->sender_work);
1653 1670
1654 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); 1671 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1655 retcode = rv; /* FIXME: Type mismatch. */ 1672 retcode = rv; /* FIXME: Type mismatch. */
@@ -1710,7 +1727,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1710 new_disk_conf = NULL; 1727 new_disk_conf = NULL;
1711 new_plan = NULL; 1728 new_plan = NULL;
1712 1729
1713 drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush); 1730 drbd_bump_write_ordering(device->resource, device->ldev, WO_bdev_flush);
1714 1731
1715 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) 1732 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1716 set_bit(CRASHED_PRIMARY, &device->flags); 1733 set_bit(CRASHED_PRIMARY, &device->flags);
@@ -1726,7 +1743,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1726 device->read_cnt = 0; 1743 device->read_cnt = 0;
1727 device->writ_cnt = 0; 1744 device->writ_cnt = 0;
1728 1745
1729 drbd_reconsider_max_bio_size(device); 1746 drbd_reconsider_max_bio_size(device, device->ldev);
1730 1747
1731 /* If I am currently not R_PRIMARY, 1748 /* If I am currently not R_PRIMARY,
1732 * but meta data primary indicator is set, 1749 * but meta data primary indicator is set,
@@ -1845,7 +1862,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1845 1862
1846 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 1863 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1847 put_ldev(device); 1864 put_ldev(device);
1848 conn_reconfig_done(first_peer_device(device)->connection); 1865 conn_reconfig_done(connection);
1849 mutex_unlock(&adm_ctx.resource->adm_mutex); 1866 mutex_unlock(&adm_ctx.resource->adm_mutex);
1850 drbd_adm_finish(&adm_ctx, info, retcode); 1867 drbd_adm_finish(&adm_ctx, info, retcode);
1851 return 0; 1868 return 0;
@@ -1856,7 +1873,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1856 drbd_force_state(device, NS(disk, D_DISKLESS)); 1873 drbd_force_state(device, NS(disk, D_DISKLESS));
1857 drbd_md_sync(device); 1874 drbd_md_sync(device);
1858 fail: 1875 fail:
1859 conn_reconfig_done(first_peer_device(device)->connection); 1876 conn_reconfig_done(connection);
1860 if (nbc) { 1877 if (nbc) {
1861 if (nbc->backing_bdev) 1878 if (nbc->backing_bdev)
1862 blkdev_put(nbc->backing_bdev, 1879 blkdev_put(nbc->backing_bdev,
@@ -1888,7 +1905,7 @@ static int adm_detach(struct drbd_device *device, int force)
1888 } 1905 }
1889 1906
1890 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ 1907 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
1891 drbd_md_get_buffer(device); /* make sure there is no in-flight meta-data IO */ 1908 drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
1892 retcode = drbd_request_state(device, NS(disk, D_FAILED)); 1909 retcode = drbd_request_state(device, NS(disk, D_FAILED));
1893 drbd_md_put_buffer(device); 1910 drbd_md_put_buffer(device);
1894 /* D_FAILED will transition to DISKLESS. */ 1911 /* D_FAILED will transition to DISKLESS. */
@@ -2654,8 +2671,13 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2654 if (retcode != NO_ERROR) 2671 if (retcode != NO_ERROR)
2655 goto out; 2672 goto out;
2656 2673
2657 mutex_lock(&adm_ctx.resource->adm_mutex);
2658 device = adm_ctx.device; 2674 device = adm_ctx.device;
2675 if (!get_ldev(device)) {
2676 retcode = ERR_NO_DISK;
2677 goto out;
2678 }
2679
2680 mutex_lock(&adm_ctx.resource->adm_mutex);
2659 2681
2660 /* If there is still bitmap IO pending, probably because of a previous 2682 /* If there is still bitmap IO pending, probably because of a previous
2661 * resync just being finished, wait for it before requesting a new resync. 2683 * resync just being finished, wait for it before requesting a new resync.
@@ -2679,6 +2701,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2679 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); 2701 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2680 drbd_resume_io(device); 2702 drbd_resume_io(device);
2681 mutex_unlock(&adm_ctx.resource->adm_mutex); 2703 mutex_unlock(&adm_ctx.resource->adm_mutex);
2704 put_ldev(device);
2682out: 2705out:
2683 drbd_adm_finish(&adm_ctx, info, retcode); 2706 drbd_adm_finish(&adm_ctx, info, retcode);
2684 return 0; 2707 return 0;
@@ -2704,7 +2727,7 @@ out:
2704 return 0; 2727 return 0;
2705} 2728}
2706 2729
2707static int drbd_bmio_set_susp_al(struct drbd_device *device) 2730static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
2708{ 2731{
2709 int rv; 2732 int rv;
2710 2733
@@ -2725,8 +2748,13 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2725 if (retcode != NO_ERROR) 2748 if (retcode != NO_ERROR)
2726 goto out; 2749 goto out;
2727 2750
2728 mutex_lock(&adm_ctx.resource->adm_mutex);
2729 device = adm_ctx.device; 2751 device = adm_ctx.device;
2752 if (!get_ldev(device)) {
2753 retcode = ERR_NO_DISK;
2754 goto out;
2755 }
2756
2757 mutex_lock(&adm_ctx.resource->adm_mutex);
2730 2758
2731 /* If there is still bitmap IO pending, probably because of a previous 2759 /* If there is still bitmap IO pending, probably because of a previous
2732 * resync just being finished, wait for it before requesting a new resync. 2760 * resync just being finished, wait for it before requesting a new resync.
@@ -2753,6 +2781,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2753 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); 2781 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2754 drbd_resume_io(device); 2782 drbd_resume_io(device);
2755 mutex_unlock(&adm_ctx.resource->adm_mutex); 2783 mutex_unlock(&adm_ctx.resource->adm_mutex);
2784 put_ldev(device);
2756out: 2785out:
2757 drbd_adm_finish(&adm_ctx, info, retcode); 2786 drbd_adm_finish(&adm_ctx, info, retcode);
2758 return 0; 2787 return 0;
@@ -2892,7 +2921,7 @@ static struct drbd_connection *the_only_connection(struct drbd_resource *resourc
2892 return list_first_entry(&resource->connections, struct drbd_connection, connections); 2921 return list_first_entry(&resource->connections, struct drbd_connection, connections);
2893} 2922}
2894 2923
2895int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, 2924static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2896 const struct sib_info *sib) 2925 const struct sib_info *sib)
2897{ 2926{
2898 struct drbd_resource *resource = device->resource; 2927 struct drbd_resource *resource = device->resource;
@@ -3622,13 +3651,6 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
3622 unsigned seq; 3651 unsigned seq;
3623 int err = -ENOMEM; 3652 int err = -ENOMEM;
3624 3653
3625 if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3626 if (time_after(jiffies, device->rs_last_bcast + HZ))
3627 device->rs_last_bcast = jiffies;
3628 else
3629 return;
3630 }
3631
3632 seq = atomic_inc_return(&drbd_genl_seq); 3654 seq = atomic_inc_return(&drbd_genl_seq);
3633 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); 3655 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3634 if (!msg) 3656 if (!msg)