aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-03-23 09:31:09 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 10:44:20 -0500
commitf399002e68e626e7bc443e6fcab1772704cc197f (patch)
tree0d6abf88f3ebf5c619994c929d5baf728a3f579c /drivers/block
parent6b75dced005c7f06b81934167e36bcfc690cc3a7 (diff)
drbd: distribute former syncer_conf settings to disk, connection, and resource level
This commit breaks the API again. Move per-volume former syncer options into disk_conf. Move per-connection former syncer options into net_conf. Renamed the remainign sync_conf to res_opts Syncer settings have been changeable at runtime, so we need to prepare for these settings to be runtime-changeable in their new home as well. Introduce new configuration operations, and share the netlink attribute between "attach" (create new disk) and "disk-opts" (change options). Same for "connect" and "net-opts". Some fields cannot be changed at runtime, however. Introduce a new flag GENLA_F_INVARIANT to be able to trigger on that in the generated validation and assignment functions. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_int.h10
-rw-r--r--drivers/block/drbd/drbd_main.c72
-rw-r--r--drivers/block/drbd/drbd_nl.c550
-rw-r--r--drivers/block/drbd/drbd_receiver.c51
-rw-r--r--drivers/block/drbd/drbd_state.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c50
6 files changed, 449 insertions, 288 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d6e7e657e7a4..bc265f3733c6 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -860,7 +860,7 @@ struct drbd_md {
860 s32 bm_offset; /* signed relative sector offset to bitmap */ 860 s32 bm_offset; /* signed relative sector offset to bitmap */
861 861
862 /* u32 al_nr_extents; important for restoring the AL 862 /* u32 al_nr_extents; important for restoring the AL
863 * is stored into sync_conf.al_extents, which in turn 863 * is stored into ldev->dc.al_extents, which in turn
864 * gets applied to act_log->nr_elements 864 * gets applied to act_log->nr_elements
865 */ 865 */
866}; 866};
@@ -929,6 +929,7 @@ struct drbd_tconn { /* is a resource from the config file */
929 atomic_t net_cnt; /* Users of net_conf */ 929 atomic_t net_cnt; /* Users of net_conf */
930 wait_queue_head_t net_cnt_wait; 930 wait_queue_head_t net_cnt_wait;
931 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ 931 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
932 struct res_opts res_opts;
932 933
933 struct drbd_socket data; /* data/barrier/cstate/parameter packets */ 934 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
934 struct drbd_socket meta; /* ping/ack (metadata) packets */ 935 struct drbd_socket meta; /* ping/ack (metadata) packets */
@@ -945,6 +946,8 @@ struct drbd_tconn { /* is a resource from the config file */
945 struct crypto_hash *cram_hmac_tfm; 946 struct crypto_hash *cram_hmac_tfm;
946 struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */ 947 struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
947 struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */ 948 struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
949 struct crypto_hash *csums_tfm;
950 struct crypto_hash *verify_tfm;
948 void *int_dig_out; 951 void *int_dig_out;
949 void *int_dig_in; 952 void *int_dig_in;
950 void *int_dig_vv; 953 void *int_dig_vv;
@@ -963,7 +966,6 @@ struct drbd_conf {
963 unsigned long flags; 966 unsigned long flags;
964 967
965 /* configured by drbdsetup */ 968 /* configured by drbdsetup */
966 struct syncer_conf sync_conf;
967 struct drbd_backing_dev *ldev __protected_by(local); 969 struct drbd_backing_dev *ldev __protected_by(local);
968 970
969 sector_t p_size; /* partner's disk size */ 971 sector_t p_size; /* partner's disk size */
@@ -1037,8 +1039,6 @@ struct drbd_conf {
1037 /* size of out-of-sync range in sectors. */ 1039 /* size of out-of-sync range in sectors. */
1038 sector_t ov_last_oos_size; 1040 sector_t ov_last_oos_size;
1039 unsigned long ov_left; /* in bits */ 1041 unsigned long ov_left; /* in bits */
1040 struct crypto_hash *csums_tfm;
1041 struct crypto_hash *verify_tfm;
1042 1042
1043 struct drbd_bitmap *bitmap; 1043 struct drbd_bitmap *bitmap;
1044 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ 1044 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
@@ -1188,7 +1188,7 @@ extern int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd,
1188 char *data, size_t size); 1188 char *data, size_t size);
1189#define USE_DATA_SOCKET 1 1189#define USE_DATA_SOCKET 1
1190#define USE_META_SOCKET 0 1190#define USE_META_SOCKET 0
1191extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc); 1191extern int drbd_send_sync_param(struct drbd_conf *mdev);
1192extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, 1192extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
1193 u32 set_size); 1193 u32 set_size);
1194extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet, 1194extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 79a0e042252f..bdb12723585e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -784,7 +784,7 @@ int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd, char *data,
784 return ok; 784 return ok;
785} 785}
786 786
787int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) 787int drbd_send_sync_param(struct drbd_conf *mdev)
788{ 788{
789 struct p_rs_param_95 *p; 789 struct p_rs_param_95 *p;
790 struct socket *sock; 790 struct socket *sock;
@@ -793,7 +793,7 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
793 793
794 size = apv <= 87 ? sizeof(struct p_rs_param) 794 size = apv <= 87 ? sizeof(struct p_rs_param)
795 : apv == 88 ? sizeof(struct p_rs_param) 795 : apv == 88 ? sizeof(struct p_rs_param)
796 + strlen(mdev->sync_conf.verify_alg) + 1 796 + strlen(mdev->tconn->net_conf->verify_alg) + 1
797 : apv <= 94 ? sizeof(struct p_rs_param_89) 797 : apv <= 94 ? sizeof(struct p_rs_param_89)
798 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 798 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
799 799
@@ -812,16 +812,25 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
812 /* initialize verify_alg and csums_alg */ 812 /* initialize verify_alg and csums_alg */
813 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 813 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
814 814
815 p->rate = cpu_to_be32(sc->rate); 815 if (get_ldev(mdev)) {
816 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead); 816 p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
817 p->c_delay_target = cpu_to_be32(sc->c_delay_target); 817 p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
818 p->c_fill_target = cpu_to_be32(sc->c_fill_target); 818 p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
819 p->c_max_rate = cpu_to_be32(sc->c_max_rate); 819 p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
820 p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
821 put_ldev(mdev);
822 } else {
823 p->rate = cpu_to_be32(DRBD_RATE_DEF);
824 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
825 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
826 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
827 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
828 }
820 829
821 if (apv >= 88) 830 if (apv >= 88)
822 strcpy(p->verify_alg, mdev->sync_conf.verify_alg); 831 strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
823 if (apv >= 89) 832 if (apv >= 89)
824 strcpy(p->csums_alg, mdev->sync_conf.csums_alg); 833 strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
825 834
826 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0); 835 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
827 } else 836 } else
@@ -1043,7 +1052,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1043 int bits; 1052 int bits;
1044 1053
1045 /* may we use this feature? */ 1054 /* may we use this feature? */
1046 if ((mdev->sync_conf.use_rle == 0) || 1055 if ((mdev->tconn->net_conf->use_rle == 0) ||
1047 (mdev->tconn->agreed_pro_version < 90)) 1056 (mdev->tconn->agreed_pro_version < 90))
1048 return 0; 1057 return 0;
1049 1058
@@ -1790,26 +1799,8 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
1790 1799
1791static void drbd_set_defaults(struct drbd_conf *mdev) 1800static void drbd_set_defaults(struct drbd_conf *mdev)
1792{ 1801{
1793 /* This way we get a compile error when sync_conf grows, 1802 /* Beware! The actual layout differs
1794 and we forgot to initialize it here */ 1803 * between big endian and little endian */
1795 mdev->sync_conf = (struct syncer_conf) {
1796 /* .rate = */ DRBD_RATE_DEF,
1797 /* .after = */ DRBD_AFTER_DEF,
1798 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
1799 /* .verify_alg = */ {}, 0,
1800 /* .cpu_mask = */ {}, 0,
1801 /* .csums_alg = */ {}, 0,
1802 /* .use_rle = */ 0,
1803 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
1804 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
1805 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
1806 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
1807 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
1808 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
1809 };
1810
1811 /* Have to use that way, because the layout differs between
1812 big endian and little endian */
1813 mdev->state = (union drbd_state) { 1804 mdev->state = (union drbd_state) {
1814 { .role = R_SECONDARY, 1805 { .role = R_SECONDARY,
1815 .peer = R_UNKNOWN, 1806 .peer = R_UNKNOWN,
@@ -2286,6 +2277,11 @@ struct drbd_tconn *drbd_new_tconn(const char *name)
2286 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker"); 2277 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2287 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender"); 2278 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2288 2279
2280 tconn->res_opts = (struct res_opts) {
2281 {}, 0, /* cpu_mask */
2282 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2283 };
2284
2289 mutex_lock(&drbd_cfg_mutex); 2285 mutex_lock(&drbd_cfg_mutex);
2290 list_add_tail(&tconn->all_tconn, &drbd_tconns); 2286 list_add_tail(&tconn->all_tconn, &drbd_tconns);
2291 mutex_unlock(&drbd_cfg_mutex); 2287 mutex_unlock(&drbd_cfg_mutex);
@@ -2559,10 +2555,10 @@ void drbd_free_sock(struct drbd_tconn *tconn)
2559 2555
2560void drbd_free_resources(struct drbd_conf *mdev) 2556void drbd_free_resources(struct drbd_conf *mdev)
2561{ 2557{
2562 crypto_free_hash(mdev->csums_tfm); 2558 crypto_free_hash(mdev->tconn->csums_tfm);
2563 mdev->csums_tfm = NULL; 2559 mdev->tconn->csums_tfm = NULL;
2564 crypto_free_hash(mdev->verify_tfm); 2560 crypto_free_hash(mdev->tconn->verify_tfm);
2565 mdev->verify_tfm = NULL; 2561 mdev->tconn->verify_tfm = NULL;
2566 crypto_free_hash(mdev->tconn->cram_hmac_tfm); 2562 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2567 mdev->tconn->cram_hmac_tfm = NULL; 2563 mdev->tconn->cram_hmac_tfm = NULL;
2568 crypto_free_hash(mdev->tconn->integrity_w_tfm); 2564 crypto_free_hash(mdev->tconn->integrity_w_tfm);
@@ -2589,7 +2585,7 @@ struct meta_data_on_disk {
2589 u32 md_size_sect; 2585 u32 md_size_sect;
2590 u32 al_offset; /* offset to this block */ 2586 u32 al_offset; /* offset to this block */
2591 u32 al_nr_extents; /* important for restoring the AL */ 2587 u32 al_nr_extents; /* important for restoring the AL */
2592 /* `-- act_log->nr_elements <-- sync_conf.al_extents */ 2588 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2593 u32 bm_offset; /* offset to the bitmap, from here */ 2589 u32 bm_offset; /* offset to the bitmap, from here */
2594 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ 2590 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
2595 u32 la_peer_max_bio_size; /* last peer max_bio_size */ 2591 u32 la_peer_max_bio_size; /* last peer max_bio_size */
@@ -2715,7 +2711,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2715 for (i = UI_CURRENT; i < UI_SIZE; i++) 2711 for (i = UI_CURRENT; i < UI_SIZE; i++)
2716 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); 2712 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2717 bdev->md.flags = be32_to_cpu(buffer->flags); 2713 bdev->md.flags = be32_to_cpu(buffer->flags);
2718 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); 2714 bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
2719 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 2715 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2720 2716
2721 spin_lock_irq(&mdev->tconn->req_lock); 2717 spin_lock_irq(&mdev->tconn->req_lock);
@@ -2727,8 +2723,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2727 } 2723 }
2728 spin_unlock_irq(&mdev->tconn->req_lock); 2724 spin_unlock_irq(&mdev->tconn->req_lock);
2729 2725
2730 if (mdev->sync_conf.al_extents < 7) 2726 if (bdev->dc.al_extents < 7)
2731 mdev->sync_conf.al_extents = 127; 2727 bdev->dc.al_extents = 127;
2732 2728
2733 err: 2729 err:
2734 mutex_unlock(&mdev->md_io_mutex); 2730 mutex_unlock(&mdev->md_io_mutex);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index ac0a175e778c..18cd2ed4e8ca 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -53,8 +53,10 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53 53
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info); 54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info); 55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info); 57int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
57int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info); 58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info); 60int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info); 61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
60int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info); 62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
@@ -66,7 +68,7 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info); 68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info); 69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info); 70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info); 71int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info); 72int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
71int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info); 73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
72/* .dumpit */ 74/* .dumpit */
@@ -170,7 +172,7 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
170 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { 172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
171 struct nlattr *nla; 173 struct nlattr *nla;
172 /* parse and validate only */ 174 /* parse and validate only */
173 err = drbd_cfg_context_from_attrs(NULL, info->attrs); 175 err = drbd_cfg_context_from_attrs(NULL, info);
174 if (err) 176 if (err)
175 goto fail; 177 goto fail;
176 178
@@ -616,6 +618,7 @@ static const char *from_attrs_err_to_txt(int err)
616{ 618{
617 return err == -ENOMSG ? "required attribute missing" : 619 return err == -ENOMSG ? "required attribute missing" :
618 err == -EOPNOTSUPP ? "unknown mandatory attribute" : 620 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
621 err == -EEXIST ? "can not change invariant setting" :
619 "invalid attribute value"; 622 "invalid attribute value";
620} 623}
621 624
@@ -633,7 +636,7 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
633 636
634 memset(&parms, 0, sizeof(parms)); 637 memset(&parms, 0, sizeof(parms));
635 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) { 638 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
636 err = set_role_parms_from_attrs(&parms, info->attrs); 639 err = set_role_parms_from_attrs(&parms, info);
637 if (err) { 640 if (err) {
638 retcode = ERR_MANDATORY_TAG; 641 retcode = ERR_MANDATORY_TAG;
639 drbd_msg_put_info(from_attrs_err_to_txt(err)); 642 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -898,24 +901,24 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
898 * failed, and 0 on success. You should call drbd_md_sync() after you called 901 * failed, and 0 on success. You should call drbd_md_sync() after you called
899 * this function. 902 * this function.
900 */ 903 */
901static int drbd_check_al_size(struct drbd_conf *mdev) 904static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
902{ 905{
903 struct lru_cache *n, *t; 906 struct lru_cache *n, *t;
904 struct lc_element *e; 907 struct lc_element *e;
905 unsigned int in_use; 908 unsigned int in_use;
906 int i; 909 int i;
907 910
908 if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN)) 911 if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
909 mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN; 912 dc->al_extents = DRBD_AL_EXTENTS_MIN;
910 913
911 if (mdev->act_log && 914 if (mdev->act_log &&
912 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 915 mdev->act_log->nr_elements == dc->al_extents)
913 return 0; 916 return 0;
914 917
915 in_use = 0; 918 in_use = 0;
916 t = mdev->act_log; 919 t = mdev->act_log;
917 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION, 920 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
918 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 921 dc->al_extents, sizeof(struct lc_element), 0);
919 922
920 if (n == NULL) { 923 if (n == NULL) {
921 dev_err(DEV, "Cannot allocate act_log lru!\n"); 924 dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -1069,6 +1072,114 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
1069 dev_info(DEV, "Suspended AL updates\n"); 1072 dev_info(DEV, "Suspended AL updates\n");
1070} 1073}
1071 1074
1075int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1076{
1077 enum drbd_ret_code retcode;
1078 struct drbd_conf *mdev;
1079 struct disk_conf *ndc; /* new disk conf */
1080 int err, fifo_size;
1081 int *rs_plan_s = NULL;
1082
1083 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1084 if (!adm_ctx.reply_skb)
1085 return retcode;
1086 if (retcode != NO_ERROR)
1087 goto out;
1088
1089 mdev = adm_ctx.mdev;
1090
1091 /* we also need a disk
1092 * to change the options on */
1093 if (!get_ldev(mdev)) {
1094 retcode = ERR_NO_DISK;
1095 goto out;
1096 }
1097
1098/* FIXME freeze IO, cluster wide.
1099 *
1100 * We should make sure no-one uses
1101 * some half-updated struct when we
1102 * assign it later. */
1103
1104 ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
1105 if (!ndc) {
1106 retcode = ERR_NOMEM;
1107 goto fail;
1108 }
1109
1110 memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
1111 err = disk_conf_from_attrs_for_change(ndc, info);
1112 if (err) {
1113 retcode = ERR_MANDATORY_TAG;
1114 drbd_msg_put_info(from_attrs_err_to_txt(err));
1115 }
1116
1117 if (!expect(ndc->resync_rate >= 1))
1118 ndc->resync_rate = 1;
1119
1120 /* clip to allowed range */
1121 if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
1122 ndc->al_extents = DRBD_AL_EXTENTS_MIN;
1123 if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
1124 ndc->al_extents = DRBD_AL_EXTENTS_MAX;
1125
1126 /* most sanity checks done, try to assign the new sync-after
1127 * dependency. need to hold the global lock in there,
1128 * to avoid a race in the dependency loop check. */
1129 retcode = drbd_alter_sa(mdev, ndc->resync_after);
1130 if (retcode != NO_ERROR)
1131 goto fail;
1132
1133 fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1134 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1135 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1136 if (!rs_plan_s) {
1137 dev_err(DEV, "kmalloc of fifo_buffer failed");
1138 retcode = ERR_NOMEM;
1139 goto fail;
1140 }
1141 }
1142
1143 if (fifo_size != mdev->rs_plan_s.size) {
1144 kfree(mdev->rs_plan_s.values);
1145 mdev->rs_plan_s.values = rs_plan_s;
1146 mdev->rs_plan_s.size = fifo_size;
1147 mdev->rs_planed = 0;
1148 rs_plan_s = NULL;
1149 }
1150
1151 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1152 drbd_al_shrink(mdev);
1153 err = drbd_check_al_size(mdev, ndc);
1154 lc_unlock(mdev->act_log);
1155 wake_up(&mdev->al_wait);
1156
1157 if (err) {
1158 retcode = ERR_NOMEM;
1159 goto fail;
1160 }
1161
1162 /* FIXME
1163 * To avoid someone looking at a half-updated struct, we probably
1164 * should have a rw-semaphor on net_conf and disk_conf.
1165 */
1166 mdev->ldev->dc = *ndc;
1167
1168 drbd_md_sync(mdev);
1169
1170
1171 if (mdev->state.conn >= C_CONNECTED)
1172 drbd_send_sync_param(mdev);
1173
1174 fail:
1175 put_ldev(mdev);
1176 kfree(ndc);
1177 kfree(rs_plan_s);
1178 out:
1179 drbd_adm_finish(info, retcode);
1180 return 0;
1181}
1182
1072int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) 1183int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1073{ 1184{
1074 struct drbd_conf *mdev; 1185 struct drbd_conf *mdev;
@@ -1111,12 +1222,29 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1111 goto fail; 1222 goto fail;
1112 } 1223 }
1113 1224
1114 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 1225 nbc->dc = (struct disk_conf) {
1115 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 1226 {}, 0, /* backing_dev */
1116 nbc->dc.fencing = DRBD_FENCING_DEF; 1227 {}, 0, /* meta_dev */
1117 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 1228 0, /* meta_dev_idx */
1118 1229 DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
1119 err = disk_conf_from_attrs(&nbc->dc, info->attrs); 1230 DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
1231 DRBD_ON_IO_ERROR_DEF, /* on_io_error */
1232 DRBD_FENCING_DEF, /* fencing */
1233 DRBD_RATE_DEF, /* resync_rate */
1234 DRBD_AFTER_DEF, /* resync_after */
1235 DRBD_AL_EXTENTS_DEF, /* al_extents */
1236 DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
1237 DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
1238 DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
1239 DRBD_C_MAX_RATE_DEF, /* c_max_rate */
1240 DRBD_C_MIN_RATE_DEF, /* c_min_rate */
1241 0, /* no_disk_barrier */
1242 0, /* no_disk_flush */
1243 0, /* no_disk_drain */
1244 0, /* no_md_flush */
1245 };
1246
1247 err = disk_conf_from_attrs(&nbc->dc, info);
1120 if (err) { 1248 if (err) {
1121 retcode = ERR_MANDATORY_TAG; 1249 retcode = ERR_MANDATORY_TAG;
1122 drbd_msg_put_info(from_attrs_err_to_txt(err)); 1250 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -1267,7 +1395,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1267 } 1395 }
1268 1396
1269 /* Since we are diskless, fix the activity log first... */ 1397 /* Since we are diskless, fix the activity log first... */
1270 if (drbd_check_al_size(mdev)) { 1398 if (drbd_check_al_size(mdev, &nbc->dc)) {
1271 retcode = ERR_NOMEM; 1399 retcode = ERR_NOMEM;
1272 goto force_diskless_dec; 1400 goto force_diskless_dec;
1273 } 1401 }
@@ -1498,6 +1626,158 @@ out:
1498 return 0; 1626 return 0;
1499} 1627}
1500 1628
1629static bool conn_resync_running(struct drbd_tconn *tconn)
1630{
1631 struct drbd_conf *mdev;
1632 int vnr;
1633
1634 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1635 if (mdev->state.conn == C_SYNC_SOURCE ||
1636 mdev->state.conn == C_SYNC_TARGET ||
1637 mdev->state.conn == C_PAUSED_SYNC_S ||
1638 mdev->state.conn == C_PAUSED_SYNC_T)
1639 return true;
1640 }
1641 return false;
1642}
1643
1644static bool conn_ov_running(struct drbd_tconn *tconn)
1645{
1646 struct drbd_conf *mdev;
1647 int vnr;
1648
1649 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1650 if (mdev->state.conn == C_VERIFY_S ||
1651 mdev->state.conn == C_VERIFY_T)
1652 return true;
1653 }
1654 return false;
1655}
1656
1657int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1658{
1659 enum drbd_ret_code retcode;
1660 struct drbd_tconn *tconn;
1661 struct net_conf *new_conf = NULL;
1662 int err;
1663 int ovr; /* online verify running */
1664 int rsr; /* re-sync running */
1665 struct crypto_hash *verify_tfm = NULL;
1666 struct crypto_hash *csums_tfm = NULL;
1667
1668
1669 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1670 if (!adm_ctx.reply_skb)
1671 return retcode;
1672 if (retcode != NO_ERROR)
1673 goto out;
1674
1675 tconn = adm_ctx.tconn;
1676
1677 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1678 if (!new_conf) {
1679 retcode = ERR_NOMEM;
1680 goto out;
1681 }
1682
1683 /* we also need a net config
1684 * to change the options on */
1685 if (!get_net_conf(tconn)) {
1686 drbd_msg_put_info("net conf missing, try connect");
1687 retcode = ERR_INVALID_REQUEST;
1688 goto out;
1689 }
1690
1691 conn_reconfig_start(tconn);
1692
1693 memcpy(new_conf, tconn->net_conf, sizeof(*new_conf));
1694 err = net_conf_from_attrs_for_change(new_conf, info);
1695 if (err) {
1696 retcode = ERR_MANDATORY_TAG;
1697 drbd_msg_put_info(from_attrs_err_to_txt(err));
1698 goto fail;
1699 }
1700
1701 /* re-sync running */
1702 rsr = conn_resync_running(tconn);
1703 if (rsr && strcmp(new_conf->csums_alg, tconn->net_conf->csums_alg)) {
1704 retcode = ERR_CSUMS_RESYNC_RUNNING;
1705 goto fail;
1706 }
1707
1708 if (!rsr && new_conf->csums_alg[0]) {
1709 csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
1710 if (IS_ERR(csums_tfm)) {
1711 csums_tfm = NULL;
1712 retcode = ERR_CSUMS_ALG;
1713 goto fail;
1714 }
1715
1716 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1717 retcode = ERR_CSUMS_ALG_ND;
1718 goto fail;
1719 }
1720 }
1721
1722 /* online verify running */
1723 ovr = conn_ov_running(tconn);
1724 if (ovr) {
1725 if (strcmp(new_conf->verify_alg, tconn->net_conf->verify_alg)) {
1726 retcode = ERR_VERIFY_RUNNING;
1727 goto fail;
1728 }
1729 }
1730
1731 if (!ovr && new_conf->verify_alg[0]) {
1732 verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
1733 if (IS_ERR(verify_tfm)) {
1734 verify_tfm = NULL;
1735 retcode = ERR_VERIFY_ALG;
1736 goto fail;
1737 }
1738
1739 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1740 retcode = ERR_VERIFY_ALG_ND;
1741 goto fail;
1742 }
1743 }
1744
1745
1746 /* For now, use struct assignment, not pointer assignment.
1747 * We don't have any means to determine who might still
1748 * keep a local alias into the struct,
1749 * so we cannot just free it and hope for the best :(
1750 * FIXME
1751 * To avoid someone looking at a half-updated struct, we probably
1752 * should have a rw-semaphor on net_conf and disk_conf.
1753 */
1754 *tconn->net_conf = *new_conf;
1755
1756 if (!rsr) {
1757 crypto_free_hash(tconn->csums_tfm);
1758 tconn->csums_tfm = csums_tfm;
1759 csums_tfm = NULL;
1760 }
1761 if (!ovr) {
1762 crypto_free_hash(tconn->verify_tfm);
1763 tconn->verify_tfm = verify_tfm;
1764 verify_tfm = NULL;
1765 }
1766
1767 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1768 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1769
1770 fail:
1771 crypto_free_hash(csums_tfm);
1772 crypto_free_hash(verify_tfm);
1773 kfree(new_conf);
1774 put_net_conf(tconn);
1775 conn_reconfig_done(tconn);
1776 out:
1777 drbd_adm_finish(info, retcode);
1778 return 0;
1779}
1780
1501int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) 1781int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1502{ 1782{
1503 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1783 char hmac_name[CRYPTO_MAX_ALG_NAME];
@@ -1531,33 +1811,47 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1531 } 1811 }
1532 1812
1533 /* allocation not in the IO path, cqueue thread context */ 1813 /* allocation not in the IO path, cqueue thread context */
1534 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1814 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1535 if (!new_conf) { 1815 if (!new_conf) {
1536 retcode = ERR_NOMEM; 1816 retcode = ERR_NOMEM;
1537 goto fail; 1817 goto fail;
1538 } 1818 }
1539 1819
1540 new_conf->timeout = DRBD_TIMEOUT_DEF; 1820 *new_conf = (struct net_conf) {
1541 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1821 {}, 0, /* my_addr */
1542 new_conf->ping_int = DRBD_PING_INT_DEF; 1822 {}, 0, /* peer_addr */
1543 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1823 {}, 0, /* shared_secret */
1544 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1824 {}, 0, /* cram_hmac_alg */
1545 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1825 {}, 0, /* integrity_alg */
1546 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1826 {}, 0, /* verify_alg */
1547 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1827 {}, 0, /* csums_alg */
1548 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1828 DRBD_PROTOCOL_DEF, /* wire_protocol */
1549 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1829 DRBD_CONNECT_INT_DEF, /* try_connect_int */
1550 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1830 DRBD_TIMEOUT_DEF, /* timeout */
1551 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1831 DRBD_PING_INT_DEF, /* ping_int */
1552 new_conf->want_lose = 0; 1832 DRBD_PING_TIMEO_DEF, /* ping_timeo */
1553 new_conf->two_primaries = 0; 1833 DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
1554 new_conf->wire_protocol = DRBD_PROT_C; 1834 DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
1555 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1835 DRBD_KO_COUNT_DEF, /* ko_count */
1556 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1836 DRBD_MAX_BUFFERS_DEF, /* max_buffers */
1557 new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; 1837 DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
1558 new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; 1838 DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
1559 1839 DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
1560 err = net_conf_from_attrs(new_conf, info->attrs); 1840 DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
1841 DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
1842 DRBD_RR_CONFLICT_DEF, /* rr_conflict */
1843 DRBD_ON_CONGESTION_DEF, /* on_congestion */
1844 DRBD_CONG_FILL_DEF, /* cong_fill */
1845 DRBD_CONG_EXTENTS_DEF, /* cong_extents */
1846 0, /* two_primaries */
1847 0, /* want_lose */
1848 0, /* no_cork */
1849 0, /* always_asbp */
1850 0, /* dry_run */
1851 0, /* use_rle */
1852 };
1853
1854 err = net_conf_from_attrs(new_conf, info);
1561 if (err) { 1855 if (err) {
1562 retcode = ERR_MANDATORY_TAG; 1856 retcode = ERR_MANDATORY_TAG;
1563 drbd_msg_put_info(from_attrs_err_to_txt(err)); 1857 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -1789,7 +2083,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
1789 tconn = adm_ctx.tconn; 2083 tconn = adm_ctx.tconn;
1790 memset(&parms, 0, sizeof(parms)); 2084 memset(&parms, 0, sizeof(parms));
1791 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { 2085 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
1792 err = disconnect_parms_from_attrs(&parms, info->attrs); 2086 err = disconnect_parms_from_attrs(&parms, info);
1793 if (err) { 2087 if (err) {
1794 retcode = ERR_MANDATORY_TAG; 2088 retcode = ERR_MANDATORY_TAG;
1795 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2089 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -1848,7 +2142,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1848 2142
1849 memset(&rs, 0, sizeof(struct resize_parms)); 2143 memset(&rs, 0, sizeof(struct resize_parms));
1850 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { 2144 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
1851 err = resize_parms_from_attrs(&rs, info->attrs); 2145 err = resize_parms_from_attrs(&rs, info);
1852 if (err) { 2146 if (err) {
1853 retcode = ERR_MANDATORY_TAG; 2147 retcode = ERR_MANDATORY_TAG;
1854 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2148 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -1904,26 +2198,21 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1904 return 0; 2198 return 0;
1905} 2199}
1906 2200
1907int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info) 2201int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
1908{ 2202{
1909 struct drbd_conf *mdev;
1910 enum drbd_ret_code retcode; 2203 enum drbd_ret_code retcode;
1911 int err;
1912 int ovr; /* online verify running */
1913 int rsr; /* re-sync running */
1914 struct crypto_hash *verify_tfm = NULL;
1915 struct crypto_hash *csums_tfm = NULL;
1916 struct syncer_conf sc;
1917 cpumask_var_t new_cpu_mask; 2204 cpumask_var_t new_cpu_mask;
2205 struct drbd_tconn *tconn;
1918 int *rs_plan_s = NULL; 2206 int *rs_plan_s = NULL;
1919 int fifo_size; 2207 struct res_opts sc;
2208 int err;
1920 2209
1921 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); 2210 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1922 if (!adm_ctx.reply_skb) 2211 if (!adm_ctx.reply_skb)
1923 return retcode; 2212 return retcode;
1924 if (retcode != NO_ERROR) 2213 if (retcode != NO_ERROR)
1925 goto fail; 2214 goto fail;
1926 mdev = adm_ctx.mdev; 2215 tconn = adm_ctx.tconn;
1927 2216
1928 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 2217 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1929 retcode = ERR_NOMEM; 2218 retcode = ERR_NOMEM;
@@ -1933,172 +2222,43 @@ int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
1933 2222
1934 if (((struct drbd_genlmsghdr*)info->userhdr)->flags 2223 if (((struct drbd_genlmsghdr*)info->userhdr)->flags
1935 & DRBD_GENL_F_SET_DEFAULTS) { 2224 & DRBD_GENL_F_SET_DEFAULTS) {
1936 memset(&sc, 0, sizeof(struct syncer_conf)); 2225 memset(&sc, 0, sizeof(struct res_opts));
1937 sc.rate = DRBD_RATE_DEF;
1938 sc.after = DRBD_AFTER_DEF;
1939 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1940 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 2226 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
1941 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1942 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1943 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1944 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1945 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1946 } else 2227 } else
1947 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 2228 sc = tconn->res_opts;
1948 2229
1949 err = syncer_conf_from_attrs(&sc, info->attrs); 2230 err = res_opts_from_attrs(&sc, info);
1950 if (err) { 2231 if (err) {
1951 retcode = ERR_MANDATORY_TAG; 2232 retcode = ERR_MANDATORY_TAG;
1952 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2233 drbd_msg_put_info(from_attrs_err_to_txt(err));
1953 goto fail; 2234 goto fail;
1954 } 2235 }
1955 2236
1956 /* re-sync running */
1957 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1958 mdev->state.conn == C_SYNC_TARGET ||
1959 mdev->state.conn == C_PAUSED_SYNC_S ||
1960 mdev->state.conn == C_PAUSED_SYNC_T );
1961
1962 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1963 retcode = ERR_CSUMS_RESYNC_RUNNING;
1964 goto fail;
1965 }
1966
1967 if (!rsr && sc.csums_alg[0]) {
1968 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1969 if (IS_ERR(csums_tfm)) {
1970 csums_tfm = NULL;
1971 retcode = ERR_CSUMS_ALG;
1972 goto fail;
1973 }
1974
1975 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1976 retcode = ERR_CSUMS_ALG_ND;
1977 goto fail;
1978 }
1979 }
1980
1981 /* online verify running */
1982 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1983
1984 if (ovr) {
1985 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1986 retcode = ERR_VERIFY_RUNNING;
1987 goto fail;
1988 }
1989 }
1990
1991 if (!ovr && sc.verify_alg[0]) {
1992 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1993 if (IS_ERR(verify_tfm)) {
1994 verify_tfm = NULL;
1995 retcode = ERR_VERIFY_ALG;
1996 goto fail;
1997 }
1998
1999 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
2000 retcode = ERR_VERIFY_ALG_ND;
2001 goto fail;
2002 }
2003 }
2004
2005 /* silently ignore cpu mask on UP kernel */ 2237 /* silently ignore cpu mask on UP kernel */
2006 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 2238 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
2007 err = __bitmap_parse(sc.cpu_mask, 32, 0, 2239 err = __bitmap_parse(sc.cpu_mask, 32, 0,
2008 cpumask_bits(new_cpu_mask), nr_cpu_ids); 2240 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2009 if (err) { 2241 if (err) {
2010 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 2242 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2011 retcode = ERR_CPU_MASK_PARSE; 2243 retcode = ERR_CPU_MASK_PARSE;
2012 goto fail; 2244 goto fail;
2013 } 2245 }
2014 } 2246 }
2015 2247
2016 if (!expect(sc.rate >= 1))
2017 sc.rate = 1;
2018
2019 /* clip to allowed range */
2020 if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
2021 sc.al_extents = DRBD_AL_EXTENTS_MIN;
2022 if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
2023 sc.al_extents = DRBD_AL_EXTENTS_MAX;
2024
2025 /* most sanity checks done, try to assign the new sync-after
2026 * dependency. need to hold the global lock in there,
2027 * to avoid a race in the dependency loop check. */
2028 retcode = drbd_alter_sa(mdev, sc.after);
2029 if (retcode != NO_ERROR)
2030 goto fail;
2031
2032 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2033 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2034 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2035 if (!rs_plan_s) {
2036 dev_err(DEV, "kmalloc of fifo_buffer failed");
2037 retcode = ERR_NOMEM;
2038 goto fail;
2039 }
2040 }
2041
2042 /* ok, assign the rest of it as well.
2043 * lock against receive_SyncParam() */
2044 spin_lock(&mdev->peer_seq_lock);
2045 mdev->sync_conf = sc;
2046
2047 if (!rsr) {
2048 crypto_free_hash(mdev->csums_tfm);
2049 mdev->csums_tfm = csums_tfm;
2050 csums_tfm = NULL;
2051 }
2052
2053 if (!ovr) {
2054 crypto_free_hash(mdev->verify_tfm);
2055 mdev->verify_tfm = verify_tfm;
2056 verify_tfm = NULL;
2057 }
2058
2059 if (fifo_size != mdev->rs_plan_s.size) {
2060 kfree(mdev->rs_plan_s.values);
2061 mdev->rs_plan_s.values = rs_plan_s;
2062 mdev->rs_plan_s.size = fifo_size;
2063 mdev->rs_planed = 0;
2064 rs_plan_s = NULL;
2065 }
2066
2067 spin_unlock(&mdev->peer_seq_lock);
2068 2248
2069 if (get_ldev(mdev)) { 2249 tconn->res_opts = sc;
2070 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
2071 drbd_al_shrink(mdev);
2072 err = drbd_check_al_size(mdev);
2073 lc_unlock(mdev->act_log);
2074 wake_up(&mdev->al_wait);
2075 2250
2076 put_ldev(mdev); 2251 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2077 drbd_md_sync(mdev); 2252 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2078 2253 drbd_calc_cpu_mask(tconn);
2079 if (err) { 2254 tconn->receiver.reset_cpu_mask = 1;
2080 retcode = ERR_NOMEM; 2255 tconn->asender.reset_cpu_mask = 1;
2081 goto fail; 2256 tconn->worker.reset_cpu_mask = 1;
2082 }
2083 }
2084
2085 if (mdev->state.conn >= C_CONNECTED)
2086 drbd_send_sync_param(mdev, &sc);
2087
2088 if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
2089 cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
2090 drbd_calc_cpu_mask(mdev->tconn);
2091 mdev->tconn->receiver.reset_cpu_mask = 1;
2092 mdev->tconn->asender.reset_cpu_mask = 1;
2093 mdev->tconn->worker.reset_cpu_mask = 1;
2094 } 2257 }
2095 2258
2096 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2097fail: 2259fail:
2098 kfree(rs_plan_s); 2260 kfree(rs_plan_s);
2099 free_cpumask_var(new_cpu_mask); 2261 free_cpumask_var(new_cpu_mask);
2100 crypto_free_hash(csums_tfm);
2101 crypto_free_hash(verify_tfm);
2102 2262
2103 drbd_adm_finish(info, retcode); 2263 drbd_adm_finish(info, retcode);
2104 return 0; 2264 return 0;
@@ -2307,6 +2467,9 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2307 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr)) 2467 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2308 goto nla_put_failure; 2468 goto nla_put_failure;
2309 2469
2470 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2471 goto nla_put_failure;
2472
2310 if (got_ldev) 2473 if (got_ldev)
2311 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive)) 2474 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2312 goto nla_put_failure; 2475 goto nla_put_failure;
@@ -2314,9 +2477,6 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2314 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive)) 2477 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2315 goto nla_put_failure; 2478 goto nla_put_failure;
2316 2479
2317 if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
2318 goto nla_put_failure;
2319
2320 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO); 2480 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2321 if (!nla) 2481 if (!nla)
2322 goto nla_put_failure; 2482 goto nla_put_failure;
@@ -2532,7 +2692,7 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2532 /* resume from last known position, if possible */ 2692 /* resume from last known position, if possible */
2533 struct start_ov_parms parms = 2693 struct start_ov_parms parms =
2534 { .ov_start_sector = mdev->ov_start_sector }; 2694 { .ov_start_sector = mdev->ov_start_sector };
2535 int err = start_ov_parms_from_attrs(&parms, info->attrs); 2695 int err = start_ov_parms_from_attrs(&parms, info);
2536 if (err) { 2696 if (err) {
2537 retcode = ERR_MANDATORY_TAG; 2697 retcode = ERR_MANDATORY_TAG;
2538 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2698 drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -2568,7 +2728,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2568 mdev = adm_ctx.mdev; 2728 mdev = adm_ctx.mdev;
2569 memset(&args, 0, sizeof(args)); 2729 memset(&args, 0, sizeof(args));
2570 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) { 2730 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2571 err = new_c_uuid_parms_from_attrs(&args, info->attrs); 2731 err = new_c_uuid_parms_from_attrs(&args, info);
2572 if (err) { 2732 if (err) {
2573 retcode = ERR_MANDATORY_TAG; 2733 retcode = ERR_MANDATORY_TAG;
2574 drbd_msg_put_info(from_attrs_err_to_txt(err)); 2734 drbd_msg_put_info(from_attrs_err_to_txt(err));
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 50c52712715e..c8c826b2444f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -763,7 +763,7 @@ int drbd_connected(int vnr, void *p, void *data)
763 &mdev->tconn->cstate_mutex : 763 &mdev->tconn->cstate_mutex :
764 &mdev->own_state_mutex; 764 &mdev->own_state_mutex;
765 765
766 ok &= drbd_send_sync_param(mdev, &mdev->sync_conf); 766 ok &= drbd_send_sync_param(mdev);
767 ok &= drbd_send_sizes(mdev, 0, 0); 767 ok &= drbd_send_sizes(mdev, 0, 0);
768 ok &= drbd_send_uuids(mdev); 768 ok &= drbd_send_uuids(mdev);
769 ok &= drbd_send_state(mdev); 769 ok &= drbd_send_state(mdev);
@@ -2085,7 +2085,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2085 int throttle = 0; 2085 int throttle = 0;
2086 2086
2087 /* feature disabled? */ 2087 /* feature disabled? */
2088 if (mdev->sync_conf.c_min_rate == 0) 2088 if (mdev->ldev->dc.c_min_rate == 0)
2089 return 0; 2089 return 0;
2090 2090
2091 spin_lock_irq(&mdev->al_lock); 2091 spin_lock_irq(&mdev->al_lock);
@@ -2125,7 +2125,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2125 db = mdev->rs_mark_left[i] - rs_left; 2125 db = mdev->rs_mark_left[i] - rs_left;
2126 dbdt = Bit2KB(db/dt); 2126 dbdt = Bit2KB(db/dt);
2127 2127
2128 if (dbdt > mdev->sync_conf.c_min_rate) 2128 if (dbdt > mdev->ldev->dc.c_min_rate)
2129 throttle = 1; 2129 throttle = 1;
2130 } 2130 }
2131 return throttle; 2131 return throttle;
@@ -3001,7 +3001,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
3001 if (drbd_recv(mdev->tconn, &p->head.payload, header_size) != header_size) 3001 if (drbd_recv(mdev->tconn, &p->head.payload, header_size) != header_size)
3002 return false; 3002 return false;
3003 3003
3004 mdev->sync_conf.rate = be32_to_cpu(p->rate); 3004 if (get_ldev(mdev)) {
3005 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3006 put_ldev(mdev);
3007 }
3005 3008
3006 if (apv >= 88) { 3009 if (apv >= 88) {
3007 if (apv == 88) { 3010 if (apv == 88) {
@@ -3029,10 +3032,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
3029 p->csums_alg[SHARED_SECRET_MAX-1] = 0; 3032 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3030 } 3033 }
3031 3034
3032 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { 3035 if (strcmp(mdev->tconn->net_conf->verify_alg, p->verify_alg)) {
3033 if (mdev->state.conn == C_WF_REPORT_PARAMS) { 3036 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3034 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 3037 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3035 mdev->sync_conf.verify_alg, p->verify_alg); 3038 mdev->tconn->net_conf->verify_alg, p->verify_alg);
3036 goto disconnect; 3039 goto disconnect;
3037 } 3040 }
3038 verify_tfm = drbd_crypto_alloc_digest_safe(mdev, 3041 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -3043,10 +3046,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
3043 } 3046 }
3044 } 3047 }
3045 3048
3046 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { 3049 if (apv >= 89 && strcmp(mdev->tconn->net_conf->csums_alg, p->csums_alg)) {
3047 if (mdev->state.conn == C_WF_REPORT_PARAMS) { 3050 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3048 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 3051 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3049 mdev->sync_conf.csums_alg, p->csums_alg); 3052 mdev->tconn->net_conf->csums_alg, p->csums_alg);
3050 goto disconnect; 3053 goto disconnect;
3051 } 3054 }
3052 csums_tfm = drbd_crypto_alloc_digest_safe(mdev, 3055 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -3057,37 +3060,39 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packet cmd,
3057 } 3060 }
3058 } 3061 }
3059 3062
3060 if (apv > 94) { 3063 if (apv > 94 && get_ldev(mdev)) {
3061 mdev->sync_conf.rate = be32_to_cpu(p->rate); 3064 mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
3062 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 3065 mdev->ldev->dc.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3063 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); 3066 mdev->ldev->dc.c_delay_target = be32_to_cpu(p->c_delay_target);
3064 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); 3067 mdev->ldev->dc.c_fill_target = be32_to_cpu(p->c_fill_target);
3065 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); 3068 mdev->ldev->dc.c_max_rate = be32_to_cpu(p->c_max_rate);
3066 3069
3067 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 3070 fifo_size = (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3068 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 3071 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
3069 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 3072 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
3070 if (!rs_plan_s) { 3073 if (!rs_plan_s) {
3071 dev_err(DEV, "kmalloc of fifo_buffer failed"); 3074 dev_err(DEV, "kmalloc of fifo_buffer failed");
3075 put_ldev(mdev);
3072 goto disconnect; 3076 goto disconnect;
3073 } 3077 }
3074 } 3078 }
3079 put_ldev(mdev);
3075 } 3080 }
3076 3081
3077 spin_lock(&mdev->peer_seq_lock); 3082 spin_lock(&mdev->peer_seq_lock);
3078 /* lock against drbd_nl_syncer_conf() */ 3083 /* lock against drbd_nl_syncer_conf() */
3079 if (verify_tfm) { 3084 if (verify_tfm) {
3080 strcpy(mdev->sync_conf.verify_alg, p->verify_alg); 3085 strcpy(mdev->tconn->net_conf->verify_alg, p->verify_alg);
3081 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; 3086 mdev->tconn->net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3082 crypto_free_hash(mdev->verify_tfm); 3087 crypto_free_hash(mdev->tconn->verify_tfm);
3083 mdev->verify_tfm = verify_tfm; 3088 mdev->tconn->verify_tfm = verify_tfm;
3084 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 3089 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3085 } 3090 }
3086 if (csums_tfm) { 3091 if (csums_tfm) {
3087 strcpy(mdev->sync_conf.csums_alg, p->csums_alg); 3092 strcpy(mdev->tconn->net_conf->csums_alg, p->csums_alg);
3088 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; 3093 mdev->tconn->net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3089 crypto_free_hash(mdev->csums_tfm); 3094 crypto_free_hash(mdev->tconn->csums_tfm);
3090 mdev->csums_tfm = csums_tfm; 3095 mdev->tconn->csums_tfm = csums_tfm;
3091 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 3096 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3092 } 3097 }
3093 if (fifo_size != mdev->rs_plan_s.size) { 3098 if (fifo_size != mdev->rs_plan_s.size) {
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 11685658659e..77fad527fb1d 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -402,7 +402,7 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
402 rv = SS_CONNECTED_OUTDATES; 402 rv = SS_CONNECTED_OUTDATES;
403 403
404 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && 404 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
405 (mdev->sync_conf.verify_alg[0] == 0)) 405 (mdev->tconn->net_conf->verify_alg[0] == 0))
406 rv = SS_NO_VERIFY_ALG; 406 rv = SS_NO_VERIFY_ALG;
407 407
408 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && 408 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
@@ -668,7 +668,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
668 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) 668 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
669 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ 669 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
670 670
671 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO && 671 if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
672 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) 672 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
673 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ 673 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
674 674
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index a730520e468e..005876b32f74 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -310,12 +310,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
310 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) 310 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
311 goto out; 311 goto out;
312 312
313 digest_size = crypto_hash_digestsize(mdev->csums_tfm); 313 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
314 digest = kmalloc(digest_size, GFP_NOIO); 314 digest = kmalloc(digest_size, GFP_NOIO);
315 if (digest) { 315 if (digest) {
316 sector_t sector = peer_req->i.sector; 316 sector_t sector = peer_req->i.sector;
317 unsigned int size = peer_req->i.size; 317 unsigned int size = peer_req->i.size;
318 drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); 318 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
319 /* Free peer_req and pages before send. 319 /* Free peer_req and pages before send.
320 * In case we block on congestion, we could otherwise run into 320 * In case we block on congestion, we could otherwise run into
321 * some distributed deadlock, if the other side blocks on 321 * some distributed deadlock, if the other side blocks on
@@ -451,13 +451,13 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
451 451
452 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */ 452 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
453 453
454 steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ 454 steps = mdev->rs_plan_s.size; /* (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
455 455
456 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */ 456 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
457 want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps; 457 want = ((mdev->ldev->dc.resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
458 } else { /* normal path */ 458 } else { /* normal path */
459 want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target : 459 want = mdev->ldev->dc.c_fill_target ? mdev->ldev->dc.c_fill_target :
460 sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10); 460 sect_in * mdev->ldev->dc.c_delay_target * HZ / (SLEEP_TIME * 10);
461 } 461 }
462 462
463 correction = want - mdev->rs_in_flight - mdev->rs_planed; 463 correction = want - mdev->rs_in_flight - mdev->rs_planed;
@@ -476,7 +476,7 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
476 if (req_sect < 0) 476 if (req_sect < 0)
477 req_sect = 0; 477 req_sect = 0;
478 478
479 max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ; 479 max_sect = (mdev->ldev->dc.c_max_rate * 2 * SLEEP_TIME) / HZ;
480 if (req_sect > max_sect) 480 if (req_sect > max_sect)
481 req_sect = max_sect; 481 req_sect = max_sect;
482 482
@@ -492,11 +492,11 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
492static int drbd_rs_number_requests(struct drbd_conf *mdev) 492static int drbd_rs_number_requests(struct drbd_conf *mdev)
493{ 493{
494 int number; 494 int number;
495 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ 495 if (mdev->rs_plan_s.size) { /* mdev->ldev->dc.c_plan_ahead */
496 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); 496 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
497 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; 497 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
498 } else { 498 } else {
499 mdev->c_sync_rate = mdev->sync_conf.rate; 499 mdev->c_sync_rate = mdev->ldev->dc.resync_rate;
500 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); 500 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
501 } 501 }
502 502
@@ -619,7 +619,7 @@ next_sector:
619 /* adjust very last sectors, in case we are oddly sized */ 619 /* adjust very last sectors, in case we are oddly sized */
620 if (sector + (size>>9) > capacity) 620 if (sector + (size>>9) > capacity)
621 size = (capacity-sector)<<9; 621 size = (capacity-sector)<<9;
622 if (mdev->tconn->agreed_pro_version >= 89 && mdev->csums_tfm) { 622 if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
623 switch (read_for_csum(mdev, sector, size)) { 623 switch (read_for_csum(mdev, sector, size)) {
624 case -EIO: /* Disk failure */ 624 case -EIO: /* Disk failure */
625 put_ldev(mdev); 625 put_ldev(mdev);
@@ -810,7 +810,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
810 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) 810 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
811 khelper_cmd = "after-resync-target"; 811 khelper_cmd = "after-resync-target";
812 812
813 if (mdev->csums_tfm && mdev->rs_total) { 813 if (mdev->tconn->csums_tfm && mdev->rs_total) {
814 const unsigned long s = mdev->rs_same_csum; 814 const unsigned long s = mdev->rs_same_csum;
815 const unsigned long t = mdev->rs_total; 815 const unsigned long t = mdev->rs_total;
816 const int ratio = 816 const int ratio =
@@ -1019,13 +1019,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1019 /* quick hack to try to avoid a race against reconfiguration. 1019 /* quick hack to try to avoid a race against reconfiguration.
1020 * a real fix would be much more involved, 1020 * a real fix would be much more involved,
1021 * introducing more locking mechanisms */ 1021 * introducing more locking mechanisms */
1022 if (mdev->csums_tfm) { 1022 if (mdev->tconn->csums_tfm) {
1023 digest_size = crypto_hash_digestsize(mdev->csums_tfm); 1023 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
1024 D_ASSERT(digest_size == di->digest_size); 1024 D_ASSERT(digest_size == di->digest_size);
1025 digest = kmalloc(digest_size, GFP_NOIO); 1025 digest = kmalloc(digest_size, GFP_NOIO);
1026 } 1026 }
1027 if (digest) { 1027 if (digest) {
1028 drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); 1028 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
1029 eq = !memcmp(digest, di->digest, digest_size); 1029 eq = !memcmp(digest, di->digest, digest_size);
1030 kfree(digest); 1030 kfree(digest);
1031 } 1031 }
@@ -1069,7 +1069,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1069 if (unlikely(cancel)) 1069 if (unlikely(cancel))
1070 goto out; 1070 goto out;
1071 1071
1072 digest_size = crypto_hash_digestsize(mdev->verify_tfm); 1072 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1073 digest = kmalloc(digest_size, GFP_NOIO); 1073 digest = kmalloc(digest_size, GFP_NOIO);
1074 if (!digest) { 1074 if (!digest) {
1075 ok = 0; /* terminate the connection in case the allocation failed */ 1075 ok = 0; /* terminate the connection in case the allocation failed */
@@ -1077,7 +1077,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1077 } 1077 }
1078 1078
1079 if (likely(!(peer_req->flags & EE_WAS_ERROR))) 1079 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1080 drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); 1080 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1081 else 1081 else
1082 memset(digest, 0, digest_size); 1082 memset(digest, 0, digest_size);
1083 1083
@@ -1141,10 +1141,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1141 di = peer_req->digest; 1141 di = peer_req->digest;
1142 1142
1143 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1143 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1144 digest_size = crypto_hash_digestsize(mdev->verify_tfm); 1144 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1145 digest = kmalloc(digest_size, GFP_NOIO); 1145 digest = kmalloc(digest_size, GFP_NOIO);
1146 if (digest) { 1146 if (digest) {
1147 drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); 1147 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1148 1148
1149 D_ASSERT(digest_size == di->digest_size); 1149 D_ASSERT(digest_size == di->digest_size);
1150 eq = !memcmp(digest, di->digest, digest_size); 1150 eq = !memcmp(digest, di->digest, digest_size);
@@ -1319,9 +1319,9 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
1319 struct drbd_conf *odev = mdev; 1319 struct drbd_conf *odev = mdev;
1320 1320
1321 while (1) { 1321 while (1) {
1322 if (odev->sync_conf.after == -1) 1322 if (odev->ldev->dc.resync_after == -1)
1323 return 1; 1323 return 1;
1324 odev = minor_to_mdev(odev->sync_conf.after); 1324 odev = minor_to_mdev(odev->ldev->dc.resync_after);
1325 if (!expect(odev)) 1325 if (!expect(odev))
1326 return 1; 1326 return 1;
1327 if ((odev->state.conn >= C_SYNC_SOURCE && 1327 if ((odev->state.conn >= C_SYNC_SOURCE &&
@@ -1408,11 +1408,11 @@ static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1408 return ERR_SYNC_AFTER_CYCLE; 1408 return ERR_SYNC_AFTER_CYCLE;
1409 1409
1410 /* dependency chain ends here, no cycles. */ 1410 /* dependency chain ends here, no cycles. */
1411 if (odev->sync_conf.after == -1) 1411 if (odev->ldev->dc.resync_after == -1)
1412 return NO_ERROR; 1412 return NO_ERROR;
1413 1413
1414 /* follow the dependency chain */ 1414 /* follow the dependency chain */
1415 odev = minor_to_mdev(odev->sync_conf.after); 1415 odev = minor_to_mdev(odev->ldev->dc.resync_after);
1416 } 1416 }
1417} 1417}
1418 1418
@@ -1424,7 +1424,7 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
1424 write_lock_irq(&global_state_lock); 1424 write_lock_irq(&global_state_lock);
1425 retcode = sync_after_error(mdev, na); 1425 retcode = sync_after_error(mdev, na);
1426 if (retcode == NO_ERROR) { 1426 if (retcode == NO_ERROR) {
1427 mdev->sync_conf.after = na; 1427 mdev->ldev->dc.resync_after = na;
1428 do { 1428 do {
1429 changes = _drbd_pause_after(mdev); 1429 changes = _drbd_pause_after(mdev);
1430 changes |= _drbd_resume_next(mdev); 1430 changes |= _drbd_resume_next(mdev);
@@ -1637,7 +1637,7 @@ int drbd_worker(struct drbd_thread *thi)
1637 struct drbd_work *w = NULL; 1637 struct drbd_work *w = NULL;
1638 struct drbd_conf *mdev; 1638 struct drbd_conf *mdev;
1639 LIST_HEAD(work_list); 1639 LIST_HEAD(work_list);
1640 int minor, intr = 0; 1640 int vnr, intr = 0;
1641 1641
1642 while (get_t_state(thi) == RUNNING) { 1642 while (get_t_state(thi) == RUNNING) {
1643 drbd_thread_current_set_cpu(thi); 1643 drbd_thread_current_set_cpu(thi);
@@ -1722,7 +1722,7 @@ int drbd_worker(struct drbd_thread *thi)
1722 spin_unlock_irq(&tconn->data.work.q_lock); 1722 spin_unlock_irq(&tconn->data.work.q_lock);
1723 1723
1724 drbd_thread_stop(&tconn->receiver); 1724 drbd_thread_stop(&tconn->receiver);
1725 idr_for_each_entry(&tconn->volumes, mdev, minor) { 1725 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1726 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); 1726 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1727 /* _drbd_set_state only uses stop_nowait. 1727 /* _drbd_set_state only uses stop_nowait.
1728 * wait here for the exiting receiver. */ 1728 * wait here for the exiting receiver. */