diff options
Diffstat (limited to 'drivers/block/drbd/drbd_nl.c')
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 200 |
1 files changed, 124 insertions, 76 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 2af26fc95280..9e3f441e7e84 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -696,37 +696,52 @@ out: | |||
696 | return 0; | 696 | return 0; |
697 | } | 697 | } |
698 | 698 | ||
699 | /* initializes the md.*_offset members, so we are able to find | 699 | /* Initializes the md.*_offset members, so we are able to find |
700 | * the on disk meta data */ | 700 | * the on disk meta data. |
701 | * | ||
702 | * We currently have two possible layouts: | ||
703 | * external: | ||
704 | * |----------- md_size_sect ------------------| | ||
705 | * [ 4k superblock ][ activity log ][ Bitmap ] | ||
706 | * | al_offset == 8 | | ||
707 | * | bm_offset = al_offset + X | | ||
708 | * ==> bitmap sectors = md_size_sect - bm_offset | ||
709 | * | ||
710 | * internal: | ||
711 | * |----------- md_size_sect ------------------| | ||
712 | * [data.....][ Bitmap ][ activity log ][ 4k superblock ] | ||
713 | * | al_offset < 0 | | ||
714 | * | bm_offset = al_offset - Y | | ||
715 | * ==> bitmap sectors = Y = al_offset - bm_offset | ||
716 | * | ||
717 | * Activity log size used to be fixed 32kB, | ||
718 | * but is about to become configurable. | ||
719 | */ | ||
701 | static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, | 720 | static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, |
702 | struct drbd_backing_dev *bdev) | 721 | struct drbd_backing_dev *bdev) |
703 | { | 722 | { |
704 | sector_t md_size_sect = 0; | 723 | sector_t md_size_sect = 0; |
705 | int meta_dev_idx; | 724 | unsigned int al_size_sect = bdev->md.al_size_4k * 8; |
706 | 725 | ||
707 | rcu_read_lock(); | 726 | bdev->md.md_offset = drbd_md_ss(bdev); |
708 | meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx; | ||
709 | 727 | ||
710 | switch (meta_dev_idx) { | 728 | switch (bdev->md.meta_dev_idx) { |
711 | default: | 729 | default: |
712 | /* v07 style fixed size indexed meta data */ | 730 | /* v07 style fixed size indexed meta data */ |
713 | bdev->md.md_size_sect = MD_RESERVED_SECT; | 731 | bdev->md.md_size_sect = MD_128MB_SECT; |
714 | bdev->md.md_offset = drbd_md_ss__(mdev, bdev); | 732 | bdev->md.al_offset = MD_4kB_SECT; |
715 | bdev->md.al_offset = MD_AL_OFFSET; | 733 | bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; |
716 | bdev->md.bm_offset = MD_BM_OFFSET; | ||
717 | break; | 734 | break; |
718 | case DRBD_MD_INDEX_FLEX_EXT: | 735 | case DRBD_MD_INDEX_FLEX_EXT: |
719 | /* just occupy the full device; unit: sectors */ | 736 | /* just occupy the full device; unit: sectors */ |
720 | bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); | 737 | bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); |
721 | bdev->md.md_offset = 0; | 738 | bdev->md.al_offset = MD_4kB_SECT; |
722 | bdev->md.al_offset = MD_AL_OFFSET; | 739 | bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; |
723 | bdev->md.bm_offset = MD_BM_OFFSET; | ||
724 | break; | 740 | break; |
725 | case DRBD_MD_INDEX_INTERNAL: | 741 | case DRBD_MD_INDEX_INTERNAL: |
726 | case DRBD_MD_INDEX_FLEX_INT: | 742 | case DRBD_MD_INDEX_FLEX_INT: |
727 | bdev->md.md_offset = drbd_md_ss__(mdev, bdev); | ||
728 | /* al size is still fixed */ | 743 | /* al size is still fixed */ |
729 | bdev->md.al_offset = -MD_AL_SECTORS; | 744 | bdev->md.al_offset = -al_size_sect; |
730 | /* we need (slightly less than) ~ this much bitmap sectors: */ | 745 | /* we need (slightly less than) ~ this much bitmap sectors: */ |
731 | md_size_sect = drbd_get_capacity(bdev->backing_bdev); | 746 | md_size_sect = drbd_get_capacity(bdev->backing_bdev); |
732 | md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); | 747 | md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); |
@@ -735,14 +750,13 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, | |||
735 | 750 | ||
736 | /* plus the "drbd meta data super block", | 751 | /* plus the "drbd meta data super block", |
737 | * and the activity log; */ | 752 | * and the activity log; */ |
738 | md_size_sect += MD_BM_OFFSET; | 753 | md_size_sect += MD_4kB_SECT + al_size_sect; |
739 | 754 | ||
740 | bdev->md.md_size_sect = md_size_sect; | 755 | bdev->md.md_size_sect = md_size_sect; |
741 | /* bitmap offset is adjusted by 'super' block size */ | 756 | /* bitmap offset is adjusted by 'super' block size */ |
742 | bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; | 757 | bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT; |
743 | break; | 758 | break; |
744 | } | 759 | } |
745 | rcu_read_unlock(); | ||
746 | } | 760 | } |
747 | 761 | ||
748 | /* input size is expected to be in KB */ | 762 | /* input size is expected to be in KB */ |
@@ -805,7 +819,7 @@ void drbd_resume_io(struct drbd_conf *mdev) | |||
805 | enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) | 819 | enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) |
806 | { | 820 | { |
807 | sector_t prev_first_sect, prev_size; /* previous meta location */ | 821 | sector_t prev_first_sect, prev_size; /* previous meta location */ |
808 | sector_t la_size, u_size; | 822 | sector_t la_size_sect, u_size; |
809 | sector_t size; | 823 | sector_t size; |
810 | char ppb[10]; | 824 | char ppb[10]; |
811 | 825 | ||
@@ -828,7 +842,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
828 | 842 | ||
829 | prev_first_sect = drbd_md_first_sector(mdev->ldev); | 843 | prev_first_sect = drbd_md_first_sector(mdev->ldev); |
830 | prev_size = mdev->ldev->md.md_size_sect; | 844 | prev_size = mdev->ldev->md.md_size_sect; |
831 | la_size = mdev->ldev->md.la_size_sect; | 845 | la_size_sect = mdev->ldev->md.la_size_sect; |
832 | 846 | ||
833 | /* TODO: should only be some assert here, not (re)init... */ | 847 | /* TODO: should only be some assert here, not (re)init... */ |
834 | drbd_md_set_sector_offsets(mdev, mdev->ldev); | 848 | drbd_md_set_sector_offsets(mdev, mdev->ldev); |
@@ -864,7 +878,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
864 | if (rv == dev_size_error) | 878 | if (rv == dev_size_error) |
865 | goto out; | 879 | goto out; |
866 | 880 | ||
867 | la_size_changed = (la_size != mdev->ldev->md.la_size_sect); | 881 | la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); |
868 | 882 | ||
869 | md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) | 883 | md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) |
870 | || prev_size != mdev->ldev->md.md_size_sect; | 884 | || prev_size != mdev->ldev->md.md_size_sect; |
@@ -886,9 +900,9 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds | |||
886 | drbd_md_mark_dirty(mdev); | 900 | drbd_md_mark_dirty(mdev); |
887 | } | 901 | } |
888 | 902 | ||
889 | if (size > la_size) | 903 | if (size > la_size_sect) |
890 | rv = grew; | 904 | rv = grew; |
891 | if (size < la_size) | 905 | if (size < la_size_sect) |
892 | rv = shrunk; | 906 | rv = shrunk; |
893 | out: | 907 | out: |
894 | lc_unlock(mdev->act_log); | 908 | lc_unlock(mdev->act_log); |
@@ -903,7 +917,7 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | |||
903 | sector_t u_size, int assume_peer_has_space) | 917 | sector_t u_size, int assume_peer_has_space) |
904 | { | 918 | { |
905 | sector_t p_size = mdev->p_size; /* partner's disk size. */ | 919 | sector_t p_size = mdev->p_size; /* partner's disk size. */ |
906 | sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ | 920 | sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ |
907 | sector_t m_size; /* my size */ | 921 | sector_t m_size; /* my size */ |
908 | sector_t size = 0; | 922 | sector_t size = 0; |
909 | 923 | ||
@@ -917,8 +931,8 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | |||
917 | if (p_size && m_size) { | 931 | if (p_size && m_size) { |
918 | size = min_t(sector_t, p_size, m_size); | 932 | size = min_t(sector_t, p_size, m_size); |
919 | } else { | 933 | } else { |
920 | if (la_size) { | 934 | if (la_size_sect) { |
921 | size = la_size; | 935 | size = la_size_sect; |
922 | if (m_size && m_size < size) | 936 | if (m_size && m_size < size) |
923 | size = m_size; | 937 | size = m_size; |
924 | if (p_size && p_size < size) | 938 | if (p_size && p_size < size) |
@@ -1127,15 +1141,32 @@ static bool should_set_defaults(struct genl_info *info) | |||
1127 | return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS); | 1141 | return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS); |
1128 | } | 1142 | } |
1129 | 1143 | ||
1130 | static void enforce_disk_conf_limits(struct disk_conf *dc) | 1144 | static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) |
1131 | { | 1145 | { |
1132 | if (dc->al_extents < DRBD_AL_EXTENTS_MIN) | 1146 | /* This is limited by 16 bit "slot" numbers, |
1133 | dc->al_extents = DRBD_AL_EXTENTS_MIN; | 1147 | * and by available on-disk context storage. |
1134 | if (dc->al_extents > DRBD_AL_EXTENTS_MAX) | 1148 | * |
1135 | dc->al_extents = DRBD_AL_EXTENTS_MAX; | 1149 | * Also (u16)~0 is special (denotes a "free" extent). |
1150 | * | ||
1151 | * One transaction occupies one 4kB on-disk block, | ||
1152 | * we have n such blocks in the on disk ring buffer, | ||
1153 | * the "current" transaction may fail (n-1), | ||
1154 | * and there is 919 slot numbers context information per transaction. | ||
1155 | * | ||
1156 | * 72 transaction blocks amounts to more than 2**16 context slots, | ||
1157 | * so cap there first. | ||
1158 | */ | ||
1159 | const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX; | ||
1160 | const unsigned int sufficient_on_disk = | ||
1161 | (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1) | ||
1162 | /AL_CONTEXT_PER_TRANSACTION; | ||
1163 | |||
1164 | unsigned int al_size_4k = bdev->md.al_size_4k; | ||
1165 | |||
1166 | if (al_size_4k > sufficient_on_disk) | ||
1167 | return max_al_nr; | ||
1136 | 1168 | ||
1137 | if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) | 1169 | return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION; |
1138 | dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; | ||
1139 | } | 1170 | } |
1140 | 1171 | ||
1141 | int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) | 1172 | int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) |
@@ -1182,7 +1213,13 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) | |||
1182 | if (!expect(new_disk_conf->resync_rate >= 1)) | 1213 | if (!expect(new_disk_conf->resync_rate >= 1)) |
1183 | new_disk_conf->resync_rate = 1; | 1214 | new_disk_conf->resync_rate = 1; |
1184 | 1215 | ||
1185 | enforce_disk_conf_limits(new_disk_conf); | 1216 | if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) |
1217 | new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; | ||
1218 | if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev)) | ||
1219 | new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev); | ||
1220 | |||
1221 | if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) | ||
1222 | new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; | ||
1186 | 1223 | ||
1187 | fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; | 1224 | fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; |
1188 | if (fifo_size != mdev->rs_plan_s->size) { | 1225 | if (fifo_size != mdev->rs_plan_s->size) { |
@@ -1330,7 +1367,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1330 | goto fail; | 1367 | goto fail; |
1331 | } | 1368 | } |
1332 | 1369 | ||
1333 | enforce_disk_conf_limits(new_disk_conf); | 1370 | if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) |
1371 | new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; | ||
1334 | 1372 | ||
1335 | new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ); | 1373 | new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ); |
1336 | if (!new_plan) { | 1374 | if (!new_plan) { |
@@ -1343,6 +1381,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1343 | goto fail; | 1381 | goto fail; |
1344 | } | 1382 | } |
1345 | 1383 | ||
1384 | write_lock_irq(&global_state_lock); | ||
1385 | retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after); | ||
1386 | write_unlock_irq(&global_state_lock); | ||
1387 | if (retcode != NO_ERROR) | ||
1388 | goto fail; | ||
1389 | |||
1346 | rcu_read_lock(); | 1390 | rcu_read_lock(); |
1347 | nc = rcu_dereference(mdev->tconn->net_conf); | 1391 | nc = rcu_dereference(mdev->tconn->net_conf); |
1348 | if (nc) { | 1392 | if (nc) { |
@@ -1399,8 +1443,16 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1399 | goto fail; | 1443 | goto fail; |
1400 | } | 1444 | } |
1401 | 1445 | ||
1402 | /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ | 1446 | /* Read our meta data super block early. |
1403 | drbd_md_set_sector_offsets(mdev, nbc); | 1447 | * This also sets other on-disk offsets. */ |
1448 | retcode = drbd_md_read(mdev, nbc); | ||
1449 | if (retcode != NO_ERROR) | ||
1450 | goto fail; | ||
1451 | |||
1452 | if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) | ||
1453 | new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; | ||
1454 | if (new_disk_conf->al_extents > drbd_al_extents_max(nbc)) | ||
1455 | new_disk_conf->al_extents = drbd_al_extents_max(nbc); | ||
1404 | 1456 | ||
1405 | if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { | 1457 | if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { |
1406 | dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", | 1458 | dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", |
@@ -1416,7 +1468,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1416 | min_md_device_sectors = (2<<10); | 1468 | min_md_device_sectors = (2<<10); |
1417 | } else { | 1469 | } else { |
1418 | max_possible_sectors = DRBD_MAX_SECTORS; | 1470 | max_possible_sectors = DRBD_MAX_SECTORS; |
1419 | min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1); | 1471 | min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1); |
1420 | } | 1472 | } |
1421 | 1473 | ||
1422 | if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { | 1474 | if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { |
@@ -1467,8 +1519,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1467 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | 1519 | if (!get_ldev_if_state(mdev, D_ATTACHING)) |
1468 | goto force_diskless; | 1520 | goto force_diskless; |
1469 | 1521 | ||
1470 | drbd_md_set_sector_offsets(mdev, nbc); | ||
1471 | |||
1472 | if (!mdev->bitmap) { | 1522 | if (!mdev->bitmap) { |
1473 | if (drbd_bm_init(mdev)) { | 1523 | if (drbd_bm_init(mdev)) { |
1474 | retcode = ERR_NOMEM; | 1524 | retcode = ERR_NOMEM; |
@@ -1476,10 +1526,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1476 | } | 1526 | } |
1477 | } | 1527 | } |
1478 | 1528 | ||
1479 | retcode = drbd_md_read(mdev, nbc); | ||
1480 | if (retcode != NO_ERROR) | ||
1481 | goto force_diskless_dec; | ||
1482 | |||
1483 | if (mdev->state.conn < C_CONNECTED && | 1529 | if (mdev->state.conn < C_CONNECTED && |
1484 | mdev->state.role == R_PRIMARY && | 1530 | mdev->state.role == R_PRIMARY && |
1485 | (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { | 1531 | (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { |
@@ -2158,8 +2204,11 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for | |||
2158 | return SS_SUCCESS; | 2204 | return SS_SUCCESS; |
2159 | case SS_PRIMARY_NOP: | 2205 | case SS_PRIMARY_NOP: |
2160 | /* Our state checking code wants to see the peer outdated. */ | 2206 | /* Our state checking code wants to see the peer outdated. */ |
2161 | rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, | 2207 | rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); |
2162 | pdsk, D_OUTDATED), CS_VERBOSE); | 2208 | |
2209 | if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */ | ||
2210 | rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE); | ||
2211 | |||
2163 | break; | 2212 | break; |
2164 | case SS_CW_FAILED_BY_PEER: | 2213 | case SS_CW_FAILED_BY_PEER: |
2165 | /* The peer probably wants to see us outdated. */ | 2214 | /* The peer probably wants to see us outdated. */ |
@@ -2406,22 +2455,19 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) | |||
2406 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | 2455 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); |
2407 | drbd_flush_workqueue(mdev); | 2456 | drbd_flush_workqueue(mdev); |
2408 | 2457 | ||
2409 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); | 2458 | /* If we happen to be C_STANDALONE R_SECONDARY, just change to |
2410 | 2459 | * D_INCONSISTENT, and set all bits in the bitmap. Otherwise, | |
2411 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) | 2460 | * try to start a resync handshake as sync target for full sync. |
2412 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); | 2461 | */ |
2413 | 2462 | if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) { | |
2414 | while (retcode == SS_NEED_CONNECTION) { | 2463 | retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT)); |
2415 | spin_lock_irq(&mdev->tconn->req_lock); | 2464 | if (retcode >= SS_SUCCESS) { |
2416 | if (mdev->state.conn < C_CONNECTED) | 2465 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, |
2417 | retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); | 2466 | "set_n_write from invalidate", BM_LOCKED_MASK)) |
2418 | spin_unlock_irq(&mdev->tconn->req_lock); | 2467 | retcode = ERR_IO_MD_DISK; |
2419 | 2468 | } | |
2420 | if (retcode != SS_NEED_CONNECTION) | 2469 | } else |
2421 | break; | ||
2422 | |||
2423 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); | 2470 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); |
2424 | } | ||
2425 | drbd_resume_io(mdev); | 2471 | drbd_resume_io(mdev); |
2426 | 2472 | ||
2427 | out: | 2473 | out: |
@@ -2475,21 +2521,22 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) | |||
2475 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | 2521 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); |
2476 | drbd_flush_workqueue(mdev); | 2522 | drbd_flush_workqueue(mdev); |
2477 | 2523 | ||
2478 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); | 2524 | /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits |
2479 | if (retcode < SS_SUCCESS) { | 2525 | * in the bitmap. Otherwise, try to start a resync handshake |
2480 | if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { | 2526 | * as sync source for full sync. |
2481 | /* The peer will get a resync upon connect anyways. | 2527 | */ |
2482 | * Just make that into a full resync. */ | 2528 | if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) { |
2483 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); | 2529 | /* The peer will get a resync upon connect anyways. Just make that |
2484 | if (retcode >= SS_SUCCESS) { | 2530 | into a full resync. */ |
2485 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, | 2531 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); |
2486 | "set_n_write from invalidate_peer", | 2532 | if (retcode >= SS_SUCCESS) { |
2487 | BM_LOCKED_SET_ALLOWED)) | 2533 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, |
2488 | retcode = ERR_IO_MD_DISK; | 2534 | "set_n_write from invalidate_peer", |
2489 | } | 2535 | BM_LOCKED_SET_ALLOWED)) |
2490 | } else | 2536 | retcode = ERR_IO_MD_DISK; |
2491 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); | 2537 | } |
2492 | } | 2538 | } else |
2539 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); | ||
2493 | drbd_resume_io(mdev); | 2540 | drbd_resume_io(mdev); |
2494 | 2541 | ||
2495 | out: | 2542 | out: |
@@ -3162,6 +3209,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev) | |||
3162 | CS_VERBOSE + CS_WAIT_COMPLETE); | 3209 | CS_VERBOSE + CS_WAIT_COMPLETE); |
3163 | idr_remove(&mdev->tconn->volumes, mdev->vnr); | 3210 | idr_remove(&mdev->tconn->volumes, mdev->vnr); |
3164 | idr_remove(&minors, mdev_to_minor(mdev)); | 3211 | idr_remove(&minors, mdev_to_minor(mdev)); |
3212 | destroy_workqueue(mdev->submit.wq); | ||
3165 | del_gendisk(mdev->vdisk); | 3213 | del_gendisk(mdev->vdisk); |
3166 | synchronize_rcu(); | 3214 | synchronize_rcu(); |
3167 | kref_put(&mdev->kref, &drbd_minor_destroy); | 3215 | kref_put(&mdev->kref, &drbd_minor_destroy); |