diff options
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 608 |
1 files changed, 383 insertions, 225 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 8e68be939deb..fe1564c7d8b6 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) | |||
277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; | 277 | atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; |
278 | int i; | 278 | int i; |
279 | 279 | ||
280 | if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) | 280 | if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) |
281 | i = page_chain_free(page); | 281 | i = page_chain_free(page); |
282 | else { | 282 | else { |
283 | struct page *tmp; | 283 | struct page *tmp; |
@@ -319,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |||
319 | struct page *page; | 319 | struct page *page; |
320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; | 320 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
321 | 321 | ||
322 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) | 322 | if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) |
323 | return NULL; | 323 | return NULL; |
324 | 324 | ||
325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); | 325 | e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); |
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) | |||
725 | char tb[4]; | 725 | char tb[4]; |
726 | 726 | ||
727 | if (!*sock) | 727 | if (!*sock) |
728 | return FALSE; | 728 | return false; |
729 | 729 | ||
730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); | 730 | rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); |
731 | 731 | ||
732 | if (rr > 0 || rr == -EAGAIN) { | 732 | if (rr > 0 || rr == -EAGAIN) { |
733 | return TRUE; | 733 | return true; |
734 | } else { | 734 | } else { |
735 | sock_release(*sock); | 735 | sock_release(*sock); |
736 | *sock = NULL; | 736 | *sock = NULL; |
737 | return FALSE; | 737 | return false; |
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
@@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
768 | if (s || ++try >= 3) | 768 | if (s || ++try >= 3) |
769 | break; | 769 | break; |
770 | /* give the other side time to call bind() & listen() */ | 770 | /* give the other side time to call bind() & listen() */ |
771 | __set_current_state(TASK_INTERRUPTIBLE); | 771 | schedule_timeout_interruptible(HZ / 10); |
772 | schedule_timeout(HZ / 10); | ||
773 | } | 772 | } |
774 | 773 | ||
775 | if (s) { | 774 | if (s) { |
@@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev) | |||
788 | } | 787 | } |
789 | 788 | ||
790 | if (sock && msock) { | 789 | if (sock && msock) { |
791 | __set_current_state(TASK_INTERRUPTIBLE); | 790 | schedule_timeout_interruptible(HZ / 10); |
792 | schedule_timeout(HZ / 10); | ||
793 | ok = drbd_socket_okay(mdev, &sock); | 791 | ok = drbd_socket_okay(mdev, &sock); |
794 | ok = drbd_socket_okay(mdev, &msock) && ok; | 792 | ok = drbd_socket_okay(mdev, &msock) && ok; |
795 | if (ok) | 793 | if (ok) |
@@ -906,7 +904,7 @@ retry: | |||
906 | put_ldev(mdev); | 904 | put_ldev(mdev); |
907 | } | 905 | } |
908 | 906 | ||
909 | if (!drbd_send_protocol(mdev)) | 907 | if (drbd_send_protocol(mdev) == -1) |
910 | return -1; | 908 | return -1; |
911 | drbd_send_sync_param(mdev, &mdev->sync_conf); | 909 | drbd_send_sync_param(mdev, &mdev->sync_conf); |
912 | drbd_send_sizes(mdev, 0, 0); | 910 | drbd_send_sizes(mdev, 0, 0); |
@@ -914,6 +912,7 @@ retry: | |||
914 | drbd_send_state(mdev); | 912 | drbd_send_state(mdev); |
915 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | 913 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); |
916 | clear_bit(RESIZE_PENDING, &mdev->flags); | 914 | clear_bit(RESIZE_PENDING, &mdev->flags); |
915 | mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ | ||
917 | 916 | ||
918 | return 1; | 917 | return 1; |
919 | 918 | ||
@@ -932,8 +931,9 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
932 | 931 | ||
933 | r = drbd_recv(mdev, h, sizeof(*h)); | 932 | r = drbd_recv(mdev, h, sizeof(*h)); |
934 | if (unlikely(r != sizeof(*h))) { | 933 | if (unlikely(r != sizeof(*h))) { |
935 | dev_err(DEV, "short read expecting header on sock: r=%d\n", r); | 934 | if (!signal_pending(current)) |
936 | return FALSE; | 935 | dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); |
936 | return false; | ||
937 | } | 937 | } |
938 | 938 | ||
939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { | 939 | if (likely(h->h80.magic == BE_DRBD_MAGIC)) { |
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi | |||
947 | be32_to_cpu(h->h80.magic), | 947 | be32_to_cpu(h->h80.magic), |
948 | be16_to_cpu(h->h80.command), | 948 | be16_to_cpu(h->h80.command), |
949 | be16_to_cpu(h->h80.length)); | 949 | be16_to_cpu(h->h80.length)); |
950 | return FALSE; | 950 | return false; |
951 | } | 951 | } |
952 | mdev->last_received = jiffies; | 952 | mdev->last_received = jiffies; |
953 | 953 | ||
954 | return TRUE; | 954 | return true; |
955 | } | 955 | } |
956 | 956 | ||
957 | static void drbd_flush(struct drbd_conf *mdev) | 957 | static void drbd_flush(struct drbd_conf *mdev) |
@@ -1074,6 +1074,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) | |||
1074 | * @mdev: DRBD device. | 1074 | * @mdev: DRBD device. |
1075 | * @e: epoch entry | 1075 | * @e: epoch entry |
1076 | * @rw: flag field, see bio->bi_rw | 1076 | * @rw: flag field, see bio->bi_rw |
1077 | * | ||
1078 | * May spread the pages to multiple bios, | ||
1079 | * depending on bio_add_page restrictions. | ||
1080 | * | ||
1081 | * Returns 0 if all bios have been submitted, | ||
1082 | * -ENOMEM if we could not allocate enough bios, | ||
1083 | * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a | ||
1084 | * single page to an empty bio (which should never happen and likely indicates | ||
1085 | * that the lower level IO stack is in some way broken). This has been observed | ||
1086 | * on certain Xen deployments. | ||
1077 | */ | 1087 | */ |
1078 | /* TODO allocate from our own bio_set. */ | 1088 | /* TODO allocate from our own bio_set. */ |
1079 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | 1089 | int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
@@ -1086,6 +1096,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | |||
1086 | unsigned ds = e->size; | 1096 | unsigned ds = e->size; |
1087 | unsigned n_bios = 0; | 1097 | unsigned n_bios = 0; |
1088 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; | 1098 | unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; |
1099 | int err = -ENOMEM; | ||
1089 | 1100 | ||
1090 | /* In most cases, we will only need one bio. But in case the lower | 1101 | /* In most cases, we will only need one bio. But in case the lower |
1091 | * level restrictions happen to be different at this offset on this | 1102 | * level restrictions happen to be different at this offset on this |
@@ -1111,8 +1122,17 @@ next_bio: | |||
1111 | page_chain_for_each(page) { | 1122 | page_chain_for_each(page) { |
1112 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); | 1123 | unsigned len = min_t(unsigned, ds, PAGE_SIZE); |
1113 | if (!bio_add_page(bio, page, len, 0)) { | 1124 | if (!bio_add_page(bio, page, len, 0)) { |
1114 | /* a single page must always be possible! */ | 1125 | /* A single page must always be possible! |
1115 | BUG_ON(bio->bi_vcnt == 0); | 1126 | * But in case it fails anyways, |
1127 | * we deal with it, and complain (below). */ | ||
1128 | if (bio->bi_vcnt == 0) { | ||
1129 | dev_err(DEV, | ||
1130 | "bio_add_page failed for len=%u, " | ||
1131 | "bi_vcnt=0 (bi_sector=%llu)\n", | ||
1132 | len, (unsigned long long)bio->bi_sector); | ||
1133 | err = -ENOSPC; | ||
1134 | goto fail; | ||
1135 | } | ||
1116 | goto next_bio; | 1136 | goto next_bio; |
1117 | } | 1137 | } |
1118 | ds -= len; | 1138 | ds -= len; |
@@ -1138,7 +1158,7 @@ fail: | |||
1138 | bios = bios->bi_next; | 1158 | bios = bios->bi_next; |
1139 | bio_put(bio); | 1159 | bio_put(bio); |
1140 | } | 1160 | } |
1141 | return -ENOMEM; | 1161 | return err; |
1142 | } | 1162 | } |
1143 | 1163 | ||
1144 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1164 | static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1160,7 +1180,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1160 | switch (mdev->write_ordering) { | 1180 | switch (mdev->write_ordering) { |
1161 | case WO_none: | 1181 | case WO_none: |
1162 | if (rv == FE_RECYCLED) | 1182 | if (rv == FE_RECYCLED) |
1163 | return TRUE; | 1183 | return true; |
1164 | 1184 | ||
1165 | /* receiver context, in the writeout path of the other node. | 1185 | /* receiver context, in the writeout path of the other node. |
1166 | * avoid potential distributed deadlock */ | 1186 | * avoid potential distributed deadlock */ |
@@ -1188,10 +1208,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1188 | D_ASSERT(atomic_read(&epoch->active) == 0); | 1208 | D_ASSERT(atomic_read(&epoch->active) == 0); |
1189 | D_ASSERT(epoch->flags == 0); | 1209 | D_ASSERT(epoch->flags == 0); |
1190 | 1210 | ||
1191 | return TRUE; | 1211 | return true; |
1192 | default: | 1212 | default: |
1193 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); | 1213 | dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); |
1194 | return FALSE; | 1214 | return false; |
1195 | } | 1215 | } |
1196 | 1216 | ||
1197 | epoch->flags = 0; | 1217 | epoch->flags = 0; |
@@ -1209,7 +1229,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign | |||
1209 | } | 1229 | } |
1210 | spin_unlock(&mdev->epoch_lock); | 1230 | spin_unlock(&mdev->epoch_lock); |
1211 | 1231 | ||
1212 | return TRUE; | 1232 | return true; |
1213 | } | 1233 | } |
1214 | 1234 | ||
1215 | /* used from receive_RSDataReply (recv_resync_read) | 1235 | /* used from receive_RSDataReply (recv_resync_read) |
@@ -1231,21 +1251,25 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1231 | if (dgs) { | 1251 | if (dgs) { |
1232 | rr = drbd_recv(mdev, dig_in, dgs); | 1252 | rr = drbd_recv(mdev, dig_in, dgs); |
1233 | if (rr != dgs) { | 1253 | if (rr != dgs) { |
1234 | dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", | 1254 | if (!signal_pending(current)) |
1235 | rr, dgs); | 1255 | dev_warn(DEV, |
1256 | "short read receiving data digest: read %d expected %d\n", | ||
1257 | rr, dgs); | ||
1236 | return NULL; | 1258 | return NULL; |
1237 | } | 1259 | } |
1238 | } | 1260 | } |
1239 | 1261 | ||
1240 | data_size -= dgs; | 1262 | data_size -= dgs; |
1241 | 1263 | ||
1264 | ERR_IF(data_size == 0) return NULL; | ||
1242 | ERR_IF(data_size & 0x1ff) return NULL; | 1265 | ERR_IF(data_size & 0x1ff) return NULL; |
1243 | ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; | 1266 | ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; |
1244 | 1267 | ||
1245 | /* even though we trust out peer, | 1268 | /* even though we trust out peer, |
1246 | * we sometimes have to double check. */ | 1269 | * we sometimes have to double check. */ |
1247 | if (sector + (data_size>>9) > capacity) { | 1270 | if (sector + (data_size>>9) > capacity) { |
1248 | dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", | 1271 | dev_err(DEV, "request from peer beyond end of local disk: " |
1272 | "capacity: %llus < sector: %llus + size: %u\n", | ||
1249 | (unsigned long long)capacity, | 1273 | (unsigned long long)capacity, |
1250 | (unsigned long long)sector, data_size); | 1274 | (unsigned long long)sector, data_size); |
1251 | return NULL; | 1275 | return NULL; |
@@ -1264,15 +1288,16 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1264 | unsigned len = min_t(int, ds, PAGE_SIZE); | 1288 | unsigned len = min_t(int, ds, PAGE_SIZE); |
1265 | data = kmap(page); | 1289 | data = kmap(page); |
1266 | rr = drbd_recv(mdev, data, len); | 1290 | rr = drbd_recv(mdev, data, len); |
1267 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { | 1291 | if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { |
1268 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); | 1292 | dev_err(DEV, "Fault injection: Corrupting data on receive\n"); |
1269 | data[0] = data[0] ^ (unsigned long)-1; | 1293 | data[0] = data[0] ^ (unsigned long)-1; |
1270 | } | 1294 | } |
1271 | kunmap(page); | 1295 | kunmap(page); |
1272 | if (rr != len) { | 1296 | if (rr != len) { |
1273 | drbd_free_ee(mdev, e); | 1297 | drbd_free_ee(mdev, e); |
1274 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1298 | if (!signal_pending(current)) |
1275 | rr, len); | 1299 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", |
1300 | rr, len); | ||
1276 | return NULL; | 1301 | return NULL; |
1277 | } | 1302 | } |
1278 | ds -= rr; | 1303 | ds -= rr; |
@@ -1281,7 +1306,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ | |||
1281 | if (dgs) { | 1306 | if (dgs) { |
1282 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); | 1307 | drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); |
1283 | if (memcmp(dig_in, dig_vv, dgs)) { | 1308 | if (memcmp(dig_in, dig_vv, dgs)) { |
1284 | dev_err(DEV, "Digest integrity check FAILED.\n"); | 1309 | dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", |
1310 | (unsigned long long)sector, data_size); | ||
1285 | drbd_bcast_ee(mdev, "digest failed", | 1311 | drbd_bcast_ee(mdev, "digest failed", |
1286 | dgs, dig_in, dig_vv, e); | 1312 | dgs, dig_in, dig_vv, e); |
1287 | drbd_free_ee(mdev, e); | 1313 | drbd_free_ee(mdev, e); |
@@ -1302,7 +1328,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1302 | void *data; | 1328 | void *data; |
1303 | 1329 | ||
1304 | if (!data_size) | 1330 | if (!data_size) |
1305 | return TRUE; | 1331 | return true; |
1306 | 1332 | ||
1307 | page = drbd_pp_alloc(mdev, 1, 1); | 1333 | page = drbd_pp_alloc(mdev, 1, 1); |
1308 | 1334 | ||
@@ -1311,8 +1337,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) | |||
1311 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); | 1337 | rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); |
1312 | if (rr != min_t(int, data_size, PAGE_SIZE)) { | 1338 | if (rr != min_t(int, data_size, PAGE_SIZE)) { |
1313 | rv = 0; | 1339 | rv = 0; |
1314 | dev_warn(DEV, "short read receiving data: read %d expected %d\n", | 1340 | if (!signal_pending(current)) |
1315 | rr, min_t(int, data_size, PAGE_SIZE)); | 1341 | dev_warn(DEV, |
1342 | "short read receiving data: read %d expected %d\n", | ||
1343 | rr, min_t(int, data_size, PAGE_SIZE)); | ||
1316 | break; | 1344 | break; |
1317 | } | 1345 | } |
1318 | data_size -= rr; | 1346 | data_size -= rr; |
@@ -1337,8 +1365,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1337 | if (dgs) { | 1365 | if (dgs) { |
1338 | rr = drbd_recv(mdev, dig_in, dgs); | 1366 | rr = drbd_recv(mdev, dig_in, dgs); |
1339 | if (rr != dgs) { | 1367 | if (rr != dgs) { |
1340 | dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", | 1368 | if (!signal_pending(current)) |
1341 | rr, dgs); | 1369 | dev_warn(DEV, |
1370 | "short read receiving data reply digest: read %d expected %d\n", | ||
1371 | rr, dgs); | ||
1342 | return 0; | 1372 | return 0; |
1343 | } | 1373 | } |
1344 | } | 1374 | } |
@@ -1359,9 +1389,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, | |||
1359 | expect); | 1389 | expect); |
1360 | kunmap(bvec->bv_page); | 1390 | kunmap(bvec->bv_page); |
1361 | if (rr != expect) { | 1391 | if (rr != expect) { |
1362 | dev_warn(DEV, "short read receiving data reply: " | 1392 | if (!signal_pending(current)) |
1363 | "read %d expected %d\n", | 1393 | dev_warn(DEV, "short read receiving data reply: " |
1364 | rr, expect); | 1394 | "read %d expected %d\n", |
1395 | rr, expect); | ||
1365 | return 0; | 1396 | return 0; |
1366 | } | 1397 | } |
1367 | data_size -= rr; | 1398 | data_size -= rr; |
@@ -1425,11 +1456,10 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1425 | 1456 | ||
1426 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); | 1457 | atomic_add(data_size >> 9, &mdev->rs_sect_ev); |
1427 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) | 1458 | if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) |
1428 | return TRUE; | 1459 | return true; |
1429 | 1460 | ||
1430 | /* drbd_submit_ee currently fails for one reason only: | 1461 | /* don't care for the reason here */ |
1431 | * not being able to allocate enough bios. | 1462 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1432 | * Is dropping the connection going to help? */ | ||
1433 | spin_lock_irq(&mdev->req_lock); | 1463 | spin_lock_irq(&mdev->req_lock); |
1434 | list_del(&e->w.list); | 1464 | list_del(&e->w.list); |
1435 | spin_unlock_irq(&mdev->req_lock); | 1465 | spin_unlock_irq(&mdev->req_lock); |
@@ -1437,7 +1467,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si | |||
1437 | drbd_free_ee(mdev, e); | 1467 | drbd_free_ee(mdev, e); |
1438 | fail: | 1468 | fail: |
1439 | put_ldev(mdev); | 1469 | put_ldev(mdev); |
1440 | return FALSE; | 1470 | return false; |
1441 | } | 1471 | } |
1442 | 1472 | ||
1443 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 1473 | static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -1454,7 +1484,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
1454 | spin_unlock_irq(&mdev->req_lock); | 1484 | spin_unlock_irq(&mdev->req_lock); |
1455 | if (unlikely(!req)) { | 1485 | if (unlikely(!req)) { |
1456 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); | 1486 | dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); |
1457 | return FALSE; | 1487 | return false; |
1458 | } | 1488 | } |
1459 | 1489 | ||
1460 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid | 1490 | /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid |
@@ -1611,15 +1641,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) | |||
1611 | return ret; | 1641 | return ret; |
1612 | } | 1642 | } |
1613 | 1643 | ||
1614 | static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | 1644 | /* see also bio_flags_to_wire() |
1645 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | ||
1646 | * flags and back. We may replicate to other kernel versions. */ | ||
1647 | static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) | ||
1615 | { | 1648 | { |
1616 | if (mdev->agreed_pro_version >= 95) | 1649 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | | 1650 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1618 | (dpf & DP_FUA ? REQ_FUA : 0) | | 1651 | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | |
1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | | 1652 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); | ||
1621 | else | ||
1622 | return dpf & DP_RW_SYNC ? REQ_SYNC : 0; | ||
1623 | } | 1653 | } |
1624 | 1654 | ||
1625 | /* mirrored write */ | 1655 | /* mirrored write */ |
@@ -1632,9 +1662,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1632 | u32 dp_flags; | 1662 | u32 dp_flags; |
1633 | 1663 | ||
1634 | if (!get_ldev(mdev)) { | 1664 | if (!get_ldev(mdev)) { |
1635 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1636 | dev_err(DEV, "Can not write mirrored data block " | ||
1637 | "to local disk.\n"); | ||
1638 | spin_lock(&mdev->peer_seq_lock); | 1665 | spin_lock(&mdev->peer_seq_lock); |
1639 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) | 1666 | if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) |
1640 | mdev->peer_seq++; | 1667 | mdev->peer_seq++; |
@@ -1654,23 +1681,23 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1654 | e = read_in_block(mdev, p->block_id, sector, data_size); | 1681 | e = read_in_block(mdev, p->block_id, sector, data_size); |
1655 | if (!e) { | 1682 | if (!e) { |
1656 | put_ldev(mdev); | 1683 | put_ldev(mdev); |
1657 | return FALSE; | 1684 | return false; |
1658 | } | 1685 | } |
1659 | 1686 | ||
1660 | e->w.cb = e_end_block; | 1687 | e->w.cb = e_end_block; |
1661 | 1688 | ||
1689 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1690 | rw |= wire_flags_to_bio(mdev, dp_flags); | ||
1691 | |||
1692 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1693 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1694 | |||
1662 | spin_lock(&mdev->epoch_lock); | 1695 | spin_lock(&mdev->epoch_lock); |
1663 | e->epoch = mdev->current_epoch; | 1696 | e->epoch = mdev->current_epoch; |
1664 | atomic_inc(&e->epoch->epoch_size); | 1697 | atomic_inc(&e->epoch->epoch_size); |
1665 | atomic_inc(&e->epoch->active); | 1698 | atomic_inc(&e->epoch->active); |
1666 | spin_unlock(&mdev->epoch_lock); | 1699 | spin_unlock(&mdev->epoch_lock); |
1667 | 1700 | ||
1668 | dp_flags = be32_to_cpu(p->dp_flags); | ||
1669 | rw |= write_flags_to_bio(mdev, dp_flags); | ||
1670 | |||
1671 | if (dp_flags & DP_MAY_SET_IN_SYNC) | ||
1672 | e->flags |= EE_MAY_SET_IN_SYNC; | ||
1673 | |||
1674 | /* I'm the receiver, I do hold a net_cnt reference. */ | 1701 | /* I'm the receiver, I do hold a net_cnt reference. */ |
1675 | if (!mdev->net_conf->two_primaries) { | 1702 | if (!mdev->net_conf->two_primaries) { |
1676 | spin_lock_irq(&mdev->req_lock); | 1703 | spin_lock_irq(&mdev->req_lock); |
@@ -1773,7 +1800,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1773 | put_ldev(mdev); | 1800 | put_ldev(mdev); |
1774 | wake_asender(mdev); | 1801 | wake_asender(mdev); |
1775 | finish_wait(&mdev->misc_wait, &wait); | 1802 | finish_wait(&mdev->misc_wait, &wait); |
1776 | return TRUE; | 1803 | return true; |
1777 | } | 1804 | } |
1778 | 1805 | ||
1779 | if (signal_pending(current)) { | 1806 | if (signal_pending(current)) { |
@@ -1829,11 +1856,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1829 | } | 1856 | } |
1830 | 1857 | ||
1831 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) | 1858 | if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) |
1832 | return TRUE; | 1859 | return true; |
1833 | 1860 | ||
1834 | /* drbd_submit_ee currently fails for one reason only: | 1861 | /* don't care for the reason here */ |
1835 | * not being able to allocate enough bios. | 1862 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
1836 | * Is dropping the connection going to help? */ | ||
1837 | spin_lock_irq(&mdev->req_lock); | 1863 | spin_lock_irq(&mdev->req_lock); |
1838 | list_del(&e->w.list); | 1864 | list_del(&e->w.list); |
1839 | hlist_del_init(&e->colision); | 1865 | hlist_del_init(&e->colision); |
@@ -1842,12 +1868,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
1842 | drbd_al_complete_io(mdev, e->sector); | 1868 | drbd_al_complete_io(mdev, e->sector); |
1843 | 1869 | ||
1844 | out_interrupted: | 1870 | out_interrupted: |
1845 | /* yes, the epoch_size now is imbalanced. | 1871 | drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); |
1846 | * but we drop the connection anyways, so we don't have a chance to | ||
1847 | * receive a barrier... atomic_inc(&mdev->epoch_size); */ | ||
1848 | put_ldev(mdev); | 1872 | put_ldev(mdev); |
1849 | drbd_free_ee(mdev, e); | 1873 | drbd_free_ee(mdev, e); |
1850 | return FALSE; | 1874 | return false; |
1851 | } | 1875 | } |
1852 | 1876 | ||
1853 | /* We may throttle resync, if the lower device seems to be busy, | 1877 | /* We may throttle resync, if the lower device seems to be busy, |
@@ -1861,10 +1885,11 @@ out_interrupted: | |||
1861 | * The current sync rate used here uses only the most recent two step marks, | 1885 | * The current sync rate used here uses only the most recent two step marks, |
1862 | * to have a short time average so we can react faster. | 1886 | * to have a short time average so we can react faster. |
1863 | */ | 1887 | */ |
1864 | int drbd_rs_should_slow_down(struct drbd_conf *mdev) | 1888 | int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) |
1865 | { | 1889 | { |
1866 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; | 1890 | struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; |
1867 | unsigned long db, dt, dbdt; | 1891 | unsigned long db, dt, dbdt; |
1892 | struct lc_element *tmp; | ||
1868 | int curr_events; | 1893 | int curr_events; |
1869 | int throttle = 0; | 1894 | int throttle = 0; |
1870 | 1895 | ||
@@ -1872,9 +1897,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1872 | if (mdev->sync_conf.c_min_rate == 0) | 1897 | if (mdev->sync_conf.c_min_rate == 0) |
1873 | return 0; | 1898 | return 0; |
1874 | 1899 | ||
1900 | spin_lock_irq(&mdev->al_lock); | ||
1901 | tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); | ||
1902 | if (tmp) { | ||
1903 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | ||
1904 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) { | ||
1905 | spin_unlock_irq(&mdev->al_lock); | ||
1906 | return 0; | ||
1907 | } | ||
1908 | /* Do not slow down if app IO is already waiting for this extent */ | ||
1909 | } | ||
1910 | spin_unlock_irq(&mdev->al_lock); | ||
1911 | |||
1875 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + | 1912 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + |
1876 | (int)part_stat_read(&disk->part0, sectors[1]) - | 1913 | (int)part_stat_read(&disk->part0, sectors[1]) - |
1877 | atomic_read(&mdev->rs_sect_ev); | 1914 | atomic_read(&mdev->rs_sect_ev); |
1915 | |||
1878 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { | 1916 | if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { |
1879 | unsigned long rs_left; | 1917 | unsigned long rs_left; |
1880 | int i; | 1918 | int i; |
@@ -1883,8 +1921,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev) | |||
1883 | 1921 | ||
1884 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, | 1922 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, |
1885 | * approx. */ | 1923 | * approx. */ |
1886 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; | 1924 | i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; |
1887 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | 1925 | |
1926 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | ||
1927 | rs_left = mdev->ov_left; | ||
1928 | else | ||
1929 | rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | ||
1888 | 1930 | ||
1889 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; | 1931 | dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; |
1890 | if (!dt) | 1932 | if (!dt) |
@@ -1912,15 +1954,15 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1912 | sector = be64_to_cpu(p->sector); | 1954 | sector = be64_to_cpu(p->sector); |
1913 | size = be32_to_cpu(p->blksize); | 1955 | size = be32_to_cpu(p->blksize); |
1914 | 1956 | ||
1915 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | 1957 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
1916 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1958 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1917 | (unsigned long long)sector, size); | 1959 | (unsigned long long)sector, size); |
1918 | return FALSE; | 1960 | return false; |
1919 | } | 1961 | } |
1920 | if (sector + (size>>9) > capacity) { | 1962 | if (sector + (size>>9) > capacity) { |
1921 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, | 1963 | dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
1922 | (unsigned long long)sector, size); | 1964 | (unsigned long long)sector, size); |
1923 | return FALSE; | 1965 | return false; |
1924 | } | 1966 | } |
1925 | 1967 | ||
1926 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { | 1968 | if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { |
@@ -1957,7 +1999,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1957 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); | 1999 | e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); |
1958 | if (!e) { | 2000 | if (!e) { |
1959 | put_ldev(mdev); | 2001 | put_ldev(mdev); |
1960 | return FALSE; | 2002 | return false; |
1961 | } | 2003 | } |
1962 | 2004 | ||
1963 | switch (cmd) { | 2005 | switch (cmd) { |
@@ -1970,6 +2012,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1970 | case P_RS_DATA_REQUEST: | 2012 | case P_RS_DATA_REQUEST: |
1971 | e->w.cb = w_e_end_rsdata_req; | 2013 | e->w.cb = w_e_end_rsdata_req; |
1972 | fault_type = DRBD_FAULT_RS_RD; | 2014 | fault_type = DRBD_FAULT_RS_RD; |
2015 | /* used in the sector offset progress display */ | ||
2016 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1973 | break; | 2017 | break; |
1974 | 2018 | ||
1975 | case P_OV_REPLY: | 2019 | case P_OV_REPLY: |
@@ -1991,7 +2035,11 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
1991 | if (cmd == P_CSUM_RS_REQUEST) { | 2035 | if (cmd == P_CSUM_RS_REQUEST) { |
1992 | D_ASSERT(mdev->agreed_pro_version >= 89); | 2036 | D_ASSERT(mdev->agreed_pro_version >= 89); |
1993 | e->w.cb = w_e_end_csum_rs_req; | 2037 | e->w.cb = w_e_end_csum_rs_req; |
2038 | /* used in the sector offset progress display */ | ||
2039 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | ||
1994 | } else if (cmd == P_OV_REPLY) { | 2040 | } else if (cmd == P_OV_REPLY) { |
2041 | /* track progress, we may need to throttle */ | ||
2042 | atomic_add(size >> 9, &mdev->rs_sect_in); | ||
1995 | e->w.cb = w_e_end_ov_reply; | 2043 | e->w.cb = w_e_end_ov_reply; |
1996 | dec_rs_pending(mdev); | 2044 | dec_rs_pending(mdev); |
1997 | /* drbd_rs_begin_io done when we sent this request, | 2045 | /* drbd_rs_begin_io done when we sent this request, |
@@ -2003,9 +2051,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2003 | case P_OV_REQUEST: | 2051 | case P_OV_REQUEST: |
2004 | if (mdev->ov_start_sector == ~(sector_t)0 && | 2052 | if (mdev->ov_start_sector == ~(sector_t)0 && |
2005 | mdev->agreed_pro_version >= 90) { | 2053 | mdev->agreed_pro_version >= 90) { |
2054 | unsigned long now = jiffies; | ||
2055 | int i; | ||
2006 | mdev->ov_start_sector = sector; | 2056 | mdev->ov_start_sector = sector; |
2007 | mdev->ov_position = sector; | 2057 | mdev->ov_position = sector; |
2008 | mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); | 2058 | mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); |
2059 | mdev->rs_total = mdev->ov_left; | ||
2060 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | ||
2061 | mdev->rs_mark_left[i] = mdev->ov_left; | ||
2062 | mdev->rs_mark_time[i] = now; | ||
2063 | } | ||
2009 | dev_info(DEV, "Online Verify start sector: %llu\n", | 2064 | dev_info(DEV, "Online Verify start sector: %llu\n", |
2010 | (unsigned long long)sector); | 2065 | (unsigned long long)sector); |
2011 | } | 2066 | } |
@@ -2042,9 +2097,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un | |||
2042 | * we would also throttle its application reads. | 2097 | * we would also throttle its application reads. |
2043 | * In that case, throttling is done on the SyncTarget only. | 2098 | * In that case, throttling is done on the SyncTarget only. |
2044 | */ | 2099 | */ |
2045 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) | 2100 | if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) |
2046 | msleep(100); | 2101 | schedule_timeout_uninterruptible(HZ/10); |
2047 | if (drbd_rs_begin_io(mdev, e->sector)) | 2102 | if (drbd_rs_begin_io(mdev, sector)) |
2048 | goto out_free_e; | 2103 | goto out_free_e; |
2049 | 2104 | ||
2050 | submit_for_resync: | 2105 | submit_for_resync: |
@@ -2057,11 +2112,10 @@ submit: | |||
2057 | spin_unlock_irq(&mdev->req_lock); | 2112 | spin_unlock_irq(&mdev->req_lock); |
2058 | 2113 | ||
2059 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) | 2114 | if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) |
2060 | return TRUE; | 2115 | return true; |
2061 | 2116 | ||
2062 | /* drbd_submit_ee currently fails for one reason only: | 2117 | /* don't care for the reason here */ |
2063 | * not being able to allocate enough bios. | 2118 | dev_err(DEV, "submit failed, triggering re-connect\n"); |
2064 | * Is dropping the connection going to help? */ | ||
2065 | spin_lock_irq(&mdev->req_lock); | 2119 | spin_lock_irq(&mdev->req_lock); |
2066 | list_del(&e->w.list); | 2120 | list_del(&e->w.list); |
2067 | spin_unlock_irq(&mdev->req_lock); | 2121 | spin_unlock_irq(&mdev->req_lock); |
@@ -2070,7 +2124,7 @@ submit: | |||
2070 | out_free_e: | 2124 | out_free_e: |
2071 | put_ldev(mdev); | 2125 | put_ldev(mdev); |
2072 | drbd_free_ee(mdev, e); | 2126 | drbd_free_ee(mdev, e); |
2073 | return FALSE; | 2127 | return false; |
2074 | } | 2128 | } |
2075 | 2129 | ||
2076 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | 2130 | static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) |
@@ -2147,10 +2201,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) | |||
2147 | 2201 | ||
2148 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | 2202 | static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) |
2149 | { | 2203 | { |
2150 | int self, peer, hg, rv = -100; | 2204 | int hg, rv = -100; |
2151 | |||
2152 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2153 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2154 | 2205 | ||
2155 | switch (mdev->net_conf->after_sb_1p) { | 2206 | switch (mdev->net_conf->after_sb_1p) { |
2156 | case ASB_DISCARD_YOUNGER_PRI: | 2207 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2177,12 +2228,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2177 | case ASB_CALL_HELPER: | 2228 | case ASB_CALL_HELPER: |
2178 | hg = drbd_asb_recover_0p(mdev); | 2229 | hg = drbd_asb_recover_0p(mdev); |
2179 | if (hg == -1 && mdev->state.role == R_PRIMARY) { | 2230 | if (hg == -1 && mdev->state.role == R_PRIMARY) { |
2180 | self = drbd_set_role(mdev, R_SECONDARY, 0); | 2231 | enum drbd_state_rv rv2; |
2232 | |||
2233 | drbd_set_role(mdev, R_SECONDARY, 0); | ||
2181 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2234 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2182 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2235 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2183 | * we do not need to wait for the after state change work either. */ | 2236 | * we do not need to wait for the after state change work either. */ |
2184 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2237 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2185 | if (self != SS_SUCCESS) { | 2238 | if (rv2 != SS_SUCCESS) { |
2186 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2239 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2187 | } else { | 2240 | } else { |
2188 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2241 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2197,10 +2250,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) | |||
2197 | 2250 | ||
2198 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | 2251 | static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) |
2199 | { | 2252 | { |
2200 | int self, peer, hg, rv = -100; | 2253 | int hg, rv = -100; |
2201 | |||
2202 | self = mdev->ldev->md.uuid[UI_BITMAP] & 1; | ||
2203 | peer = mdev->p_uuid[UI_BITMAP] & 1; | ||
2204 | 2254 | ||
2205 | switch (mdev->net_conf->after_sb_2p) { | 2255 | switch (mdev->net_conf->after_sb_2p) { |
2206 | case ASB_DISCARD_YOUNGER_PRI: | 2256 | case ASB_DISCARD_YOUNGER_PRI: |
@@ -2220,11 +2270,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) | |||
2220 | case ASB_CALL_HELPER: | 2270 | case ASB_CALL_HELPER: |
2221 | hg = drbd_asb_recover_0p(mdev); | 2271 | hg = drbd_asb_recover_0p(mdev); |
2222 | if (hg == -1) { | 2272 | if (hg == -1) { |
2273 | enum drbd_state_rv rv2; | ||
2274 | |||
2223 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, | 2275 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
2224 | * we might be here in C_WF_REPORT_PARAMS which is transient. | 2276 | * we might be here in C_WF_REPORT_PARAMS which is transient. |
2225 | * we do not need to wait for the after state change work either. */ | 2277 | * we do not need to wait for the after state change work either. */ |
2226 | self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); | 2278 | rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); |
2227 | if (self != SS_SUCCESS) { | 2279 | if (rv2 != SS_SUCCESS) { |
2228 | drbd_khelper(mdev, "pri-lost-after-sb"); | 2280 | drbd_khelper(mdev, "pri-lost-after-sb"); |
2229 | } else { | 2281 | } else { |
2230 | dev_warn(DEV, "Successfully gave up primary role.\n"); | 2282 | dev_warn(DEV, "Successfully gave up primary role.\n"); |
@@ -2263,6 +2315,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, | |||
2263 | -2 C_SYNC_TARGET set BitMap | 2315 | -2 C_SYNC_TARGET set BitMap |
2264 | -100 after split brain, disconnect | 2316 | -100 after split brain, disconnect |
2265 | -1000 unrelated data | 2317 | -1000 unrelated data |
2318 | -1091 requires proto 91 | ||
2319 | -1096 requires proto 96 | ||
2266 | */ | 2320 | */ |
2267 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) | 2321 | static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) |
2268 | { | 2322 | { |
@@ -2292,7 +2346,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2292 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { | 2346 | if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { |
2293 | 2347 | ||
2294 | if (mdev->agreed_pro_version < 91) | 2348 | if (mdev->agreed_pro_version < 91) |
2295 | return -1001; | 2349 | return -1091; |
2296 | 2350 | ||
2297 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && | 2351 | if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && |
2298 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { | 2352 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { |
@@ -2313,7 +2367,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2313 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { | 2367 | if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { |
2314 | 2368 | ||
2315 | if (mdev->agreed_pro_version < 91) | 2369 | if (mdev->agreed_pro_version < 91) |
2316 | return -1001; | 2370 | return -1091; |
2317 | 2371 | ||
2318 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && | 2372 | if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && |
2319 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { | 2373 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { |
@@ -2358,17 +2412,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2358 | *rule_nr = 51; | 2412 | *rule_nr = 51; |
2359 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2413 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); |
2360 | if (self == peer) { | 2414 | if (self == peer) { |
2361 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2415 | if (mdev->agreed_pro_version < 96 ? |
2362 | peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2416 | (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == |
2363 | if (self == peer) { | 2417 | (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : |
2418 | peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { | ||
2364 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2419 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2365 | resync as sync source modifications of the peer's UUIDs. */ | 2420 | resync as sync source modifications of the peer's UUIDs. */ |
2366 | 2421 | ||
2367 | if (mdev->agreed_pro_version < 91) | 2422 | if (mdev->agreed_pro_version < 91) |
2368 | return -1001; | 2423 | return -1091; |
2369 | 2424 | ||
2370 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; | 2425 | mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; |
2371 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; | 2426 | mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; |
2427 | |||
2428 | dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); | ||
2429 | drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); | ||
2430 | |||
2372 | return -1; | 2431 | return -1; |
2373 | } | 2432 | } |
2374 | } | 2433 | } |
@@ -2390,20 +2449,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l | |||
2390 | *rule_nr = 71; | 2449 | *rule_nr = 71; |
2391 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2450 | self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); |
2392 | if (self == peer) { | 2451 | if (self == peer) { |
2393 | self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); | 2452 | if (mdev->agreed_pro_version < 96 ? |
2394 | peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2453 | (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == |
2395 | if (self == peer) { | 2454 | (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : |
2455 | self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | ||
2396 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2456 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2397 | resync as sync source modifications of our UUIDs. */ | 2457 | resync as sync source modifications of our UUIDs. */ |
2398 | 2458 | ||
2399 | if (mdev->agreed_pro_version < 91) | 2459 | if (mdev->agreed_pro_version < 91) |
2400 | return -1001; | 2460 | return -1091; |
2401 | 2461 | ||
2402 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); | 2462 | _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); |
2403 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); | 2463 | _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); |
2404 | 2464 | ||
2405 | dev_info(DEV, "Undid last start of resync:\n"); | 2465 | dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); |
2406 | |||
2407 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, | 2466 | drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, |
2408 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); | 2467 | mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); |
2409 | 2468 | ||
@@ -2466,8 +2525,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2466 | dev_alert(DEV, "Unrelated data, aborting!\n"); | 2525 | dev_alert(DEV, "Unrelated data, aborting!\n"); |
2467 | return C_MASK; | 2526 | return C_MASK; |
2468 | } | 2527 | } |
2469 | if (hg == -1001) { | 2528 | if (hg < -1000) { |
2470 | dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); | 2529 | dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); |
2471 | return C_MASK; | 2530 | return C_MASK; |
2472 | } | 2531 | } |
2473 | 2532 | ||
@@ -2566,7 +2625,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol | |||
2566 | 2625 | ||
2567 | if (abs(hg) >= 2) { | 2626 | if (abs(hg) >= 2) { |
2568 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); | 2627 | dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); |
2569 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) | 2628 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", |
2629 | BM_LOCKED_SET_ALLOWED)) | ||
2570 | return C_MASK; | 2630 | return C_MASK; |
2571 | } | 2631 | } |
2572 | 2632 | ||
@@ -2660,7 +2720,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2660 | unsigned char *my_alg = mdev->net_conf->integrity_alg; | 2720 | unsigned char *my_alg = mdev->net_conf->integrity_alg; |
2661 | 2721 | ||
2662 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) | 2722 | if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) |
2663 | return FALSE; | 2723 | return false; |
2664 | 2724 | ||
2665 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; | 2725 | p_integrity_alg[SHARED_SECRET_MAX-1] = 0; |
2666 | if (strcmp(p_integrity_alg, my_alg)) { | 2726 | if (strcmp(p_integrity_alg, my_alg)) { |
@@ -2671,11 +2731,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig | |||
2671 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); | 2731 | my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); |
2672 | } | 2732 | } |
2673 | 2733 | ||
2674 | return TRUE; | 2734 | return true; |
2675 | 2735 | ||
2676 | disconnect: | 2736 | disconnect: |
2677 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2737 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2678 | return FALSE; | 2738 | return false; |
2679 | } | 2739 | } |
2680 | 2740 | ||
2681 | /* helper function | 2741 | /* helper function |
@@ -2707,7 +2767,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, | |||
2707 | 2767 | ||
2708 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) | 2768 | static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) |
2709 | { | 2769 | { |
2710 | int ok = TRUE; | 2770 | int ok = true; |
2711 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; | 2771 | struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; |
2712 | unsigned int header_size, data_size, exp_max_sz; | 2772 | unsigned int header_size, data_size, exp_max_sz; |
2713 | struct crypto_hash *verify_tfm = NULL; | 2773 | struct crypto_hash *verify_tfm = NULL; |
@@ -2725,7 +2785,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2725 | if (packet_size > exp_max_sz) { | 2785 | if (packet_size > exp_max_sz) { |
2726 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", | 2786 | dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", |
2727 | packet_size, exp_max_sz); | 2787 | packet_size, exp_max_sz); |
2728 | return FALSE; | 2788 | return false; |
2729 | } | 2789 | } |
2730 | 2790 | ||
2731 | if (apv <= 88) { | 2791 | if (apv <= 88) { |
@@ -2745,7 +2805,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2745 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | 2805 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); |
2746 | 2806 | ||
2747 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) | 2807 | if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) |
2748 | return FALSE; | 2808 | return false; |
2749 | 2809 | ||
2750 | mdev->sync_conf.rate = be32_to_cpu(p->rate); | 2810 | mdev->sync_conf.rate = be32_to_cpu(p->rate); |
2751 | 2811 | ||
@@ -2755,11 +2815,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
2755 | dev_err(DEV, "verify-alg too long, " | 2815 | dev_err(DEV, "verify-alg too long, " |
2756 | "peer wants %u, accepting only %u byte\n", | 2816 | "peer wants %u, accepting only %u byte\n", |
2757 | data_size, SHARED_SECRET_MAX); | 2817 | data_size, SHARED_SECRET_MAX); |
2758 | return FALSE; | 2818 | return false; |
2759 | } | 2819 | } |
2760 | 2820 | ||
2761 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) | 2821 | if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) |
2762 | return FALSE; | 2822 | return false; |
2763 | 2823 | ||
2764 | /* we expect NUL terminated string */ | 2824 | /* we expect NUL terminated string */ |
2765 | /* but just in case someone tries to be evil */ | 2825 | /* but just in case someone tries to be evil */ |
@@ -2853,7 +2913,7 @@ disconnect: | |||
2853 | /* but free the verify_tfm again, if csums_tfm did not work out */ | 2913 | /* but free the verify_tfm again, if csums_tfm did not work out */ |
2854 | crypto_free_hash(verify_tfm); | 2914 | crypto_free_hash(verify_tfm); |
2855 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2915 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2856 | return FALSE; | 2916 | return false; |
2857 | } | 2917 | } |
2858 | 2918 | ||
2859 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) | 2919 | static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) |
@@ -2879,7 +2939,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2879 | { | 2939 | { |
2880 | struct p_sizes *p = &mdev->data.rbuf.sizes; | 2940 | struct p_sizes *p = &mdev->data.rbuf.sizes; |
2881 | enum determine_dev_size dd = unchanged; | 2941 | enum determine_dev_size dd = unchanged; |
2882 | unsigned int max_seg_s; | 2942 | unsigned int max_bio_size; |
2883 | sector_t p_size, p_usize, my_usize; | 2943 | sector_t p_size, p_usize, my_usize; |
2884 | int ldsc = 0; /* local disk size changed */ | 2944 | int ldsc = 0; /* local disk size changed */ |
2885 | enum dds_flags ddsf; | 2945 | enum dds_flags ddsf; |
@@ -2890,7 +2950,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2890 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { | 2950 | if (p_size == 0 && mdev->state.disk == D_DISKLESS) { |
2891 | dev_err(DEV, "some backing storage is needed\n"); | 2951 | dev_err(DEV, "some backing storage is needed\n"); |
2892 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2952 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2893 | return FALSE; | 2953 | return false; |
2894 | } | 2954 | } |
2895 | 2955 | ||
2896 | /* just store the peer's disk size for now. | 2956 | /* just store the peer's disk size for now. |
@@ -2927,18 +2987,17 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2927 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 2987 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
2928 | mdev->ldev->dc.disk_size = my_usize; | 2988 | mdev->ldev->dc.disk_size = my_usize; |
2929 | put_ldev(mdev); | 2989 | put_ldev(mdev); |
2930 | return FALSE; | 2990 | return false; |
2931 | } | 2991 | } |
2932 | put_ldev(mdev); | 2992 | put_ldev(mdev); |
2933 | } | 2993 | } |
2934 | #undef min_not_zero | ||
2935 | 2994 | ||
2936 | ddsf = be16_to_cpu(p->dds_flags); | 2995 | ddsf = be16_to_cpu(p->dds_flags); |
2937 | if (get_ldev(mdev)) { | 2996 | if (get_ldev(mdev)) { |
2938 | dd = drbd_determin_dev_size(mdev, ddsf); | 2997 | dd = drbd_determin_dev_size(mdev, ddsf); |
2939 | put_ldev(mdev); | 2998 | put_ldev(mdev); |
2940 | if (dd == dev_size_error) | 2999 | if (dd == dev_size_error) |
2941 | return FALSE; | 3000 | return false; |
2942 | drbd_md_sync(mdev); | 3001 | drbd_md_sync(mdev); |
2943 | } else { | 3002 | } else { |
2944 | /* I am diskless, need to accept the peer's size. */ | 3003 | /* I am diskless, need to accept the peer's size. */ |
@@ -2952,14 +3011,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2952 | } | 3011 | } |
2953 | 3012 | ||
2954 | if (mdev->agreed_pro_version < 94) | 3013 | if (mdev->agreed_pro_version < 94) |
2955 | max_seg_s = be32_to_cpu(p->max_segment_size); | 3014 | max_bio_size = be32_to_cpu(p->max_bio_size); |
2956 | else if (mdev->agreed_pro_version == 94) | 3015 | else if (mdev->agreed_pro_version == 94) |
2957 | max_seg_s = DRBD_MAX_SIZE_H80_PACKET; | 3016 | max_bio_size = DRBD_MAX_SIZE_H80_PACKET; |
2958 | else /* drbd 8.3.8 onwards */ | 3017 | else /* drbd 8.3.8 onwards */ |
2959 | max_seg_s = DRBD_MAX_SEGMENT_SIZE; | 3018 | max_bio_size = DRBD_MAX_BIO_SIZE; |
2960 | 3019 | ||
2961 | if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) | 3020 | if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) |
2962 | drbd_setup_queue_param(mdev, max_seg_s); | 3021 | drbd_setup_queue_param(mdev, max_bio_size); |
2963 | 3022 | ||
2964 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); | 3023 | drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); |
2965 | put_ldev(mdev); | 3024 | put_ldev(mdev); |
@@ -2985,14 +3044,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
2985 | } | 3044 | } |
2986 | } | 3045 | } |
2987 | 3046 | ||
2988 | return TRUE; | 3047 | return true; |
2989 | } | 3048 | } |
2990 | 3049 | ||
2991 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3050 | static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
2992 | { | 3051 | { |
2993 | struct p_uuids *p = &mdev->data.rbuf.uuids; | 3052 | struct p_uuids *p = &mdev->data.rbuf.uuids; |
2994 | u64 *p_uuid; | 3053 | u64 *p_uuid; |
2995 | int i; | 3054 | int i, updated_uuids = 0; |
2996 | 3055 | ||
2997 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); | 3056 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); |
2998 | 3057 | ||
@@ -3009,7 +3068,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3009 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", | 3068 | dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", |
3010 | (unsigned long long)mdev->ed_uuid); | 3069 | (unsigned long long)mdev->ed_uuid); |
3011 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3070 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3012 | return FALSE; | 3071 | return false; |
3013 | } | 3072 | } |
3014 | 3073 | ||
3015 | if (get_ldev(mdev)) { | 3074 | if (get_ldev(mdev)) { |
@@ -3021,19 +3080,21 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3021 | if (skip_initial_sync) { | 3080 | if (skip_initial_sync) { |
3022 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); | 3081 | dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); |
3023 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, | 3082 | drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, |
3024 | "clear_n_write from receive_uuids"); | 3083 | "clear_n_write from receive_uuids", |
3084 | BM_LOCKED_TEST_ALLOWED); | ||
3025 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); | 3085 | _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); |
3026 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | 3086 | _drbd_uuid_set(mdev, UI_BITMAP, 0); |
3027 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | 3087 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), |
3028 | CS_VERBOSE, NULL); | 3088 | CS_VERBOSE, NULL); |
3029 | drbd_md_sync(mdev); | 3089 | drbd_md_sync(mdev); |
3090 | updated_uuids = 1; | ||
3030 | } | 3091 | } |
3031 | put_ldev(mdev); | 3092 | put_ldev(mdev); |
3032 | } else if (mdev->state.disk < D_INCONSISTENT && | 3093 | } else if (mdev->state.disk < D_INCONSISTENT && |
3033 | mdev->state.role == R_PRIMARY) { | 3094 | mdev->state.role == R_PRIMARY) { |
3034 | /* I am a diskless primary, the peer just created a new current UUID | 3095 | /* I am a diskless primary, the peer just created a new current UUID |
3035 | for me. */ | 3096 | for me. */ |
3036 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3097 | updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3037 | } | 3098 | } |
3038 | 3099 | ||
3039 | /* Before we test for the disk state, we should wait until an eventually | 3100 | /* Before we test for the disk state, we should wait until an eventually |
@@ -3042,9 +3103,12 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3042 | new disk state... */ | 3103 | new disk state... */ |
3043 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | 3104 | wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); |
3044 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) | 3105 | if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) |
3045 | drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); | 3106 | updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); |
3046 | 3107 | ||
3047 | return TRUE; | 3108 | if (updated_uuids) |
3109 | drbd_print_uuids(mdev, "receiver updated UUIDs to"); | ||
3110 | |||
3111 | return true; | ||
3048 | } | 3112 | } |
3049 | 3113 | ||
3050 | /** | 3114 | /** |
@@ -3081,7 +3145,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3081 | { | 3145 | { |
3082 | struct p_req_state *p = &mdev->data.rbuf.req_state; | 3146 | struct p_req_state *p = &mdev->data.rbuf.req_state; |
3083 | union drbd_state mask, val; | 3147 | union drbd_state mask, val; |
3084 | int rv; | 3148 | enum drbd_state_rv rv; |
3085 | 3149 | ||
3086 | mask.i = be32_to_cpu(p->mask); | 3150 | mask.i = be32_to_cpu(p->mask); |
3087 | val.i = be32_to_cpu(p->val); | 3151 | val.i = be32_to_cpu(p->val); |
@@ -3089,7 +3153,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3089 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && | 3153 | if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && |
3090 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { | 3154 | test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { |
3091 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); | 3155 | drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); |
3092 | return TRUE; | 3156 | return true; |
3093 | } | 3157 | } |
3094 | 3158 | ||
3095 | mask = convert_state(mask); | 3159 | mask = convert_state(mask); |
@@ -3100,7 +3164,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3100 | drbd_send_sr_reply(mdev, rv); | 3164 | drbd_send_sr_reply(mdev, rv); |
3101 | drbd_md_sync(mdev); | 3165 | drbd_md_sync(mdev); |
3102 | 3166 | ||
3103 | return TRUE; | 3167 | return true; |
3104 | } | 3168 | } |
3105 | 3169 | ||
3106 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3170 | static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3145,7 +3209,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3145 | peer_state.conn == C_CONNECTED) { | 3209 | peer_state.conn == C_CONNECTED) { |
3146 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) | 3210 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) |
3147 | drbd_resync_finished(mdev); | 3211 | drbd_resync_finished(mdev); |
3148 | return TRUE; | 3212 | return true; |
3149 | } | 3213 | } |
3150 | } | 3214 | } |
3151 | 3215 | ||
@@ -3161,6 +3225,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3161 | if (ns.conn == C_WF_REPORT_PARAMS) | 3225 | if (ns.conn == C_WF_REPORT_PARAMS) |
3162 | ns.conn = C_CONNECTED; | 3226 | ns.conn = C_CONNECTED; |
3163 | 3227 | ||
3228 | if (peer_state.conn == C_AHEAD) | ||
3229 | ns.conn = C_BEHIND; | ||
3230 | |||
3164 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && | 3231 | if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && |
3165 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | 3232 | get_ldev_if_state(mdev, D_NEGOTIATING)) { |
3166 | int cr; /* consider resync */ | 3233 | int cr; /* consider resync */ |
@@ -3195,10 +3262,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3195 | real_peer_disk = D_DISKLESS; | 3262 | real_peer_disk = D_DISKLESS; |
3196 | } else { | 3263 | } else { |
3197 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) | 3264 | if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) |
3198 | return FALSE; | 3265 | return false; |
3199 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); | 3266 | D_ASSERT(os.conn == C_WF_REPORT_PARAMS); |
3200 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3267 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3201 | return FALSE; | 3268 | return false; |
3202 | } | 3269 | } |
3203 | } | 3270 | } |
3204 | } | 3271 | } |
@@ -3223,7 +3290,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3223 | drbd_uuid_new_current(mdev); | 3290 | drbd_uuid_new_current(mdev); |
3224 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 3291 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
3225 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); | 3292 | drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); |
3226 | return FALSE; | 3293 | return false; |
3227 | } | 3294 | } |
3228 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); | 3295 | rv = _drbd_set_state(mdev, ns, cs_flags, NULL); |
3229 | ns = mdev->state; | 3296 | ns = mdev->state; |
@@ -3231,7 +3298,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3231 | 3298 | ||
3232 | if (rv < SS_SUCCESS) { | 3299 | if (rv < SS_SUCCESS) { |
3233 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | 3300 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); |
3234 | return FALSE; | 3301 | return false; |
3235 | } | 3302 | } |
3236 | 3303 | ||
3237 | if (os.conn > C_WF_REPORT_PARAMS) { | 3304 | if (os.conn > C_WF_REPORT_PARAMS) { |
@@ -3249,7 +3316,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned | |||
3249 | 3316 | ||
3250 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ | 3317 | drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ |
3251 | 3318 | ||
3252 | return TRUE; | 3319 | return true; |
3253 | } | 3320 | } |
3254 | 3321 | ||
3255 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | 3322 | static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) |
@@ -3258,6 +3325,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3258 | 3325 | ||
3259 | wait_event(mdev->misc_wait, | 3326 | wait_event(mdev->misc_wait, |
3260 | mdev->state.conn == C_WF_SYNC_UUID || | 3327 | mdev->state.conn == C_WF_SYNC_UUID || |
3328 | mdev->state.conn == C_BEHIND || | ||
3261 | mdev->state.conn < C_CONNECTED || | 3329 | mdev->state.conn < C_CONNECTED || |
3262 | mdev->state.disk < D_NEGOTIATING); | 3330 | mdev->state.disk < D_NEGOTIATING); |
3263 | 3331 | ||
@@ -3269,32 +3337,42 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi | |||
3269 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); | 3337 | _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); |
3270 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); | 3338 | _drbd_uuid_set(mdev, UI_BITMAP, 0UL); |
3271 | 3339 | ||
3340 | drbd_print_uuids(mdev, "updated sync uuid"); | ||
3272 | drbd_start_resync(mdev, C_SYNC_TARGET); | 3341 | drbd_start_resync(mdev, C_SYNC_TARGET); |
3273 | 3342 | ||
3274 | put_ldev(mdev); | 3343 | put_ldev(mdev); |
3275 | } else | 3344 | } else |
3276 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); | 3345 | dev_err(DEV, "Ignoring SyncUUID packet!\n"); |
3277 | 3346 | ||
3278 | return TRUE; | 3347 | return true; |
3279 | } | 3348 | } |
3280 | 3349 | ||
3281 | enum receive_bitmap_ret { OK, DONE, FAILED }; | 3350 | /** |
3282 | 3351 | * receive_bitmap_plain | |
3283 | static enum receive_bitmap_ret | 3352 | * |
3353 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3354 | * code upon failure. | ||
3355 | */ | ||
3356 | static int | ||
3284 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | 3357 | receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, |
3285 | unsigned long *buffer, struct bm_xfer_ctx *c) | 3358 | unsigned long *buffer, struct bm_xfer_ctx *c) |
3286 | { | 3359 | { |
3287 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | 3360 | unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); |
3288 | unsigned want = num_words * sizeof(long); | 3361 | unsigned want = num_words * sizeof(long); |
3362 | int err; | ||
3289 | 3363 | ||
3290 | if (want != data_size) { | 3364 | if (want != data_size) { |
3291 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); | 3365 | dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); |
3292 | return FAILED; | 3366 | return -EIO; |
3293 | } | 3367 | } |
3294 | if (want == 0) | 3368 | if (want == 0) |
3295 | return DONE; | 3369 | return 0; |
3296 | if (drbd_recv(mdev, buffer, want) != want) | 3370 | err = drbd_recv(mdev, buffer, want); |
3297 | return FAILED; | 3371 | if (err != want) { |
3372 | if (err >= 0) | ||
3373 | err = -EIO; | ||
3374 | return err; | ||
3375 | } | ||
3298 | 3376 | ||
3299 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); | 3377 | drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); |
3300 | 3378 | ||
@@ -3303,10 +3381,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, | |||
3303 | if (c->bit_offset > c->bm_bits) | 3381 | if (c->bit_offset > c->bm_bits) |
3304 | c->bit_offset = c->bm_bits; | 3382 | c->bit_offset = c->bm_bits; |
3305 | 3383 | ||
3306 | return OK; | 3384 | return 1; |
3307 | } | 3385 | } |
3308 | 3386 | ||
3309 | static enum receive_bitmap_ret | 3387 | /** |
3388 | * recv_bm_rle_bits | ||
3389 | * | ||
3390 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3391 | * code upon failure. | ||
3392 | */ | ||
3393 | static int | ||
3310 | recv_bm_rle_bits(struct drbd_conf *mdev, | 3394 | recv_bm_rle_bits(struct drbd_conf *mdev, |
3311 | struct p_compressed_bm *p, | 3395 | struct p_compressed_bm *p, |
3312 | struct bm_xfer_ctx *c) | 3396 | struct bm_xfer_ctx *c) |
@@ -3326,18 +3410,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3326 | 3410 | ||
3327 | bits = bitstream_get_bits(&bs, &look_ahead, 64); | 3411 | bits = bitstream_get_bits(&bs, &look_ahead, 64); |
3328 | if (bits < 0) | 3412 | if (bits < 0) |
3329 | return FAILED; | 3413 | return -EIO; |
3330 | 3414 | ||
3331 | for (have = bits; have > 0; s += rl, toggle = !toggle) { | 3415 | for (have = bits; have > 0; s += rl, toggle = !toggle) { |
3332 | bits = vli_decode_bits(&rl, look_ahead); | 3416 | bits = vli_decode_bits(&rl, look_ahead); |
3333 | if (bits <= 0) | 3417 | if (bits <= 0) |
3334 | return FAILED; | 3418 | return -EIO; |
3335 | 3419 | ||
3336 | if (toggle) { | 3420 | if (toggle) { |
3337 | e = s + rl -1; | 3421 | e = s + rl -1; |
3338 | if (e >= c->bm_bits) { | 3422 | if (e >= c->bm_bits) { |
3339 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); | 3423 | dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); |
3340 | return FAILED; | 3424 | return -EIO; |
3341 | } | 3425 | } |
3342 | _drbd_bm_set_bits(mdev, s, e); | 3426 | _drbd_bm_set_bits(mdev, s, e); |
3343 | } | 3427 | } |
@@ -3347,14 +3431,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3347 | have, bits, look_ahead, | 3431 | have, bits, look_ahead, |
3348 | (unsigned int)(bs.cur.b - p->code), | 3432 | (unsigned int)(bs.cur.b - p->code), |
3349 | (unsigned int)bs.buf_len); | 3433 | (unsigned int)bs.buf_len); |
3350 | return FAILED; | 3434 | return -EIO; |
3351 | } | 3435 | } |
3352 | look_ahead >>= bits; | 3436 | look_ahead >>= bits; |
3353 | have -= bits; | 3437 | have -= bits; |
3354 | 3438 | ||
3355 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | 3439 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); |
3356 | if (bits < 0) | 3440 | if (bits < 0) |
3357 | return FAILED; | 3441 | return -EIO; |
3358 | look_ahead |= tmp << have; | 3442 | look_ahead |= tmp << have; |
3359 | have += bits; | 3443 | have += bits; |
3360 | } | 3444 | } |
@@ -3362,10 +3446,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev, | |||
3362 | c->bit_offset = s; | 3446 | c->bit_offset = s; |
3363 | bm_xfer_ctx_bit_to_word_offset(c); | 3447 | bm_xfer_ctx_bit_to_word_offset(c); |
3364 | 3448 | ||
3365 | return (s == c->bm_bits) ? DONE : OK; | 3449 | return (s != c->bm_bits); |
3366 | } | 3450 | } |
3367 | 3451 | ||
3368 | static enum receive_bitmap_ret | 3452 | /** |
3453 | * decode_bitmap_c | ||
3454 | * | ||
3455 | * Return 0 when done, 1 when another iteration is needed, and a negative error | ||
3456 | * code upon failure. | ||
3457 | */ | ||
3458 | static int | ||
3369 | decode_bitmap_c(struct drbd_conf *mdev, | 3459 | decode_bitmap_c(struct drbd_conf *mdev, |
3370 | struct p_compressed_bm *p, | 3460 | struct p_compressed_bm *p, |
3371 | struct bm_xfer_ctx *c) | 3461 | struct bm_xfer_ctx *c) |
@@ -3379,7 +3469,7 @@ decode_bitmap_c(struct drbd_conf *mdev, | |||
3379 | 3469 | ||
3380 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); | 3470 | dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); |
3381 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 3471 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
3382 | return FAILED; | 3472 | return -EIO; |
3383 | } | 3473 | } |
3384 | 3474 | ||
3385 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, | 3475 | void INFO_bm_xfer_stats(struct drbd_conf *mdev, |
@@ -3428,13 +3518,13 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3428 | { | 3518 | { |
3429 | struct bm_xfer_ctx c; | 3519 | struct bm_xfer_ctx c; |
3430 | void *buffer; | 3520 | void *buffer; |
3431 | enum receive_bitmap_ret ret; | 3521 | int err; |
3432 | int ok = FALSE; | 3522 | int ok = false; |
3433 | struct p_header80 *h = &mdev->data.rbuf.header.h80; | 3523 | struct p_header80 *h = &mdev->data.rbuf.header.h80; |
3434 | 3524 | ||
3435 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | 3525 | drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); |
3436 | 3526 | /* you are supposed to send additional out-of-sync information | |
3437 | drbd_bm_lock(mdev, "receive bitmap"); | 3527 | * if you actually set bits during this phase */ |
3438 | 3528 | ||
3439 | /* maybe we should use some per thread scratch page, | 3529 | /* maybe we should use some per thread scratch page, |
3440 | * and allocate that during initial device creation? */ | 3530 | * and allocate that during initial device creation? */ |
@@ -3449,9 +3539,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3449 | .bm_words = drbd_bm_words(mdev), | 3539 | .bm_words = drbd_bm_words(mdev), |
3450 | }; | 3540 | }; |
3451 | 3541 | ||
3452 | do { | 3542 | for(;;) { |
3453 | if (cmd == P_BITMAP) { | 3543 | if (cmd == P_BITMAP) { |
3454 | ret = receive_bitmap_plain(mdev, data_size, buffer, &c); | 3544 | err = receive_bitmap_plain(mdev, data_size, buffer, &c); |
3455 | } else if (cmd == P_COMPRESSED_BITMAP) { | 3545 | } else if (cmd == P_COMPRESSED_BITMAP) { |
3456 | /* MAYBE: sanity check that we speak proto >= 90, | 3546 | /* MAYBE: sanity check that we speak proto >= 90, |
3457 | * and the feature is enabled! */ | 3547 | * and the feature is enabled! */ |
@@ -3468,9 +3558,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3468 | goto out; | 3558 | goto out; |
3469 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { | 3559 | if (data_size <= (sizeof(*p) - sizeof(p->head))) { |
3470 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); | 3560 | dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); |
3471 | return FAILED; | 3561 | goto out; |
3472 | } | 3562 | } |
3473 | ret = decode_bitmap_c(mdev, p, &c); | 3563 | err = decode_bitmap_c(mdev, p, &c); |
3474 | } else { | 3564 | } else { |
3475 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); | 3565 | dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); |
3476 | goto out; | 3566 | goto out; |
@@ -3479,24 +3569,26 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3479 | c.packets[cmd == P_BITMAP]++; | 3569 | c.packets[cmd == P_BITMAP]++; |
3480 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; | 3570 | c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; |
3481 | 3571 | ||
3482 | if (ret != OK) | 3572 | if (err <= 0) { |
3573 | if (err < 0) | ||
3574 | goto out; | ||
3483 | break; | 3575 | break; |
3484 | 3576 | } | |
3485 | if (!drbd_recv_header(mdev, &cmd, &data_size)) | 3577 | if (!drbd_recv_header(mdev, &cmd, &data_size)) |
3486 | goto out; | 3578 | goto out; |
3487 | } while (ret == OK); | 3579 | } |
3488 | if (ret == FAILED) | ||
3489 | goto out; | ||
3490 | 3580 | ||
3491 | INFO_bm_xfer_stats(mdev, "receive", &c); | 3581 | INFO_bm_xfer_stats(mdev, "receive", &c); |
3492 | 3582 | ||
3493 | if (mdev->state.conn == C_WF_BITMAP_T) { | 3583 | if (mdev->state.conn == C_WF_BITMAP_T) { |
3584 | enum drbd_state_rv rv; | ||
3585 | |||
3494 | ok = !drbd_send_bitmap(mdev); | 3586 | ok = !drbd_send_bitmap(mdev); |
3495 | if (!ok) | 3587 | if (!ok) |
3496 | goto out; | 3588 | goto out; |
3497 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ | 3589 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ |
3498 | ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); | 3590 | rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); |
3499 | D_ASSERT(ok == SS_SUCCESS); | 3591 | D_ASSERT(rv == SS_SUCCESS); |
3500 | } else if (mdev->state.conn != C_WF_BITMAP_S) { | 3592 | } else if (mdev->state.conn != C_WF_BITMAP_S) { |
3501 | /* admin may have requested C_DISCONNECTING, | 3593 | /* admin may have requested C_DISCONNECTING, |
3502 | * other threads may have noticed network errors */ | 3594 | * other threads may have noticed network errors */ |
@@ -3504,7 +3596,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne | |||
3504 | drbd_conn_str(mdev->state.conn)); | 3596 | drbd_conn_str(mdev->state.conn)); |
3505 | } | 3597 | } |
3506 | 3598 | ||
3507 | ok = TRUE; | 3599 | ok = true; |
3508 | out: | 3600 | out: |
3509 | drbd_bm_unlock(mdev); | 3601 | drbd_bm_unlock(mdev); |
3510 | if (ok && mdev->state.conn == C_WF_BITMAP_S) | 3602 | if (ok && mdev->state.conn == C_WF_BITMAP_S) |
@@ -3538,7 +3630,26 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u | |||
3538 | * with the data requests being unplugged */ | 3630 | * with the data requests being unplugged */ |
3539 | drbd_tcp_quickack(mdev->data.socket); | 3631 | drbd_tcp_quickack(mdev->data.socket); |
3540 | 3632 | ||
3541 | return TRUE; | 3633 | return true; |
3634 | } | ||
3635 | |||
3636 | static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) | ||
3637 | { | ||
3638 | struct p_block_desc *p = &mdev->data.rbuf.block_desc; | ||
3639 | |||
3640 | switch (mdev->state.conn) { | ||
3641 | case C_WF_SYNC_UUID: | ||
3642 | case C_WF_BITMAP_T: | ||
3643 | case C_BEHIND: | ||
3644 | break; | ||
3645 | default: | ||
3646 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", | ||
3647 | drbd_conn_str(mdev->state.conn)); | ||
3648 | } | ||
3649 | |||
3650 | drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); | ||
3651 | |||
3652 | return true; | ||
3542 | } | 3653 | } |
3543 | 3654 | ||
3544 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); | 3655 | typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); |
@@ -3571,6 +3682,7 @@ static struct data_cmd drbd_cmd_handler[] = { | |||
3571 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3682 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3572 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | 3683 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, |
3573 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, | 3684 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, |
3685 | [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, | ||
3574 | /* anything missing from this table is in | 3686 | /* anything missing from this table is in |
3575 | * the asender_tbl, see get_asender_cmd */ | 3687 | * the asender_tbl, see get_asender_cmd */ |
3576 | [P_MAX_CMD] = { 0, 0, NULL }, | 3688 | [P_MAX_CMD] = { 0, 0, NULL }, |
@@ -3610,7 +3722,8 @@ static void drbdd(struct drbd_conf *mdev) | |||
3610 | if (shs) { | 3722 | if (shs) { |
3611 | rv = drbd_recv(mdev, &header->h80.payload, shs); | 3723 | rv = drbd_recv(mdev, &header->h80.payload, shs); |
3612 | if (unlikely(rv != shs)) { | 3724 | if (unlikely(rv != shs)) { |
3613 | dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); | 3725 | if (!signal_pending(current)) |
3726 | dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); | ||
3614 | goto err_out; | 3727 | goto err_out; |
3615 | } | 3728 | } |
3616 | } | 3729 | } |
@@ -3682,9 +3795,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3682 | 3795 | ||
3683 | if (mdev->state.conn == C_STANDALONE) | 3796 | if (mdev->state.conn == C_STANDALONE) |
3684 | return; | 3797 | return; |
3685 | if (mdev->state.conn >= C_WF_CONNECTION) | ||
3686 | dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", | ||
3687 | drbd_conn_str(mdev->state.conn)); | ||
3688 | 3798 | ||
3689 | /* asender does not clean up anything. it must not interfere, either */ | 3799 | /* asender does not clean up anything. it must not interfere, either */ |
3690 | drbd_thread_stop(&mdev->asender); | 3800 | drbd_thread_stop(&mdev->asender); |
@@ -3713,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3713 | atomic_set(&mdev->rs_pending_cnt, 0); | 3823 | atomic_set(&mdev->rs_pending_cnt, 0); |
3714 | wake_up(&mdev->misc_wait); | 3824 | wake_up(&mdev->misc_wait); |
3715 | 3825 | ||
3826 | del_timer(&mdev->request_timer); | ||
3827 | |||
3716 | /* make sure syncer is stopped and w_resume_next_sg queued */ | 3828 | /* make sure syncer is stopped and w_resume_next_sg queued */ |
3717 | del_timer_sync(&mdev->resync_timer); | 3829 | del_timer_sync(&mdev->resync_timer); |
3718 | resync_timer_fn((unsigned long)mdev); | 3830 | resync_timer_fn((unsigned long)mdev); |
@@ -3758,13 +3870,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3758 | if (os.conn == C_DISCONNECTING) { | 3870 | if (os.conn == C_DISCONNECTING) { |
3759 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); | 3871 | wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); |
3760 | 3872 | ||
3761 | if (!is_susp(mdev->state)) { | ||
3762 | /* we must not free the tl_hash | ||
3763 | * while application io is still on the fly */ | ||
3764 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | ||
3765 | drbd_free_tl_hash(mdev); | ||
3766 | } | ||
3767 | |||
3768 | crypto_free_hash(mdev->cram_hmac_tfm); | 3873 | crypto_free_hash(mdev->cram_hmac_tfm); |
3769 | mdev->cram_hmac_tfm = NULL; | 3874 | mdev->cram_hmac_tfm = NULL; |
3770 | 3875 | ||
@@ -3773,6 +3878,10 @@ static void drbd_disconnect(struct drbd_conf *mdev) | |||
3773 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); | 3878 | drbd_request_state(mdev, NS(conn, C_STANDALONE)); |
3774 | } | 3879 | } |
3775 | 3880 | ||
3881 | /* serialize with bitmap writeout triggered by the state change, | ||
3882 | * if any. */ | ||
3883 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
3884 | |||
3776 | /* tcp_close and release of sendpage pages can be deferred. I don't | 3885 | /* tcp_close and release of sendpage pages can be deferred. I don't |
3777 | * want to use SO_LINGER, because apparently it can be deferred for | 3886 | * want to use SO_LINGER, because apparently it can be deferred for |
3778 | * more than 20 seconds (longest time I checked). | 3887 | * more than 20 seconds (longest time I checked). |
@@ -3873,7 +3982,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev) | |||
3873 | rv = drbd_recv(mdev, &p->head.payload, expect); | 3982 | rv = drbd_recv(mdev, &p->head.payload, expect); |
3874 | 3983 | ||
3875 | if (rv != expect) { | 3984 | if (rv != expect) { |
3876 | dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); | 3985 | if (!signal_pending(current)) |
3986 | dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); | ||
3877 | return 0; | 3987 | return 0; |
3878 | } | 3988 | } |
3879 | 3989 | ||
@@ -3975,7 +4085,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
3975 | rv = drbd_recv(mdev, peers_ch, length); | 4085 | rv = drbd_recv(mdev, peers_ch, length); |
3976 | 4086 | ||
3977 | if (rv != length) { | 4087 | if (rv != length) { |
3978 | dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); | 4088 | if (!signal_pending(current)) |
4089 | dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); | ||
3979 | rv = 0; | 4090 | rv = 0; |
3980 | goto fail; | 4091 | goto fail; |
3981 | } | 4092 | } |
@@ -4022,7 +4133,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) | |||
4022 | rv = drbd_recv(mdev, response , resp_size); | 4133 | rv = drbd_recv(mdev, response , resp_size); |
4023 | 4134 | ||
4024 | if (rv != resp_size) { | 4135 | if (rv != resp_size) { |
4025 | dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); | 4136 | if (!signal_pending(current)) |
4137 | dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); | ||
4026 | rv = 0; | 4138 | rv = 0; |
4027 | goto fail; | 4139 | goto fail; |
4028 | } | 4140 | } |
@@ -4074,8 +4186,7 @@ int drbdd_init(struct drbd_thread *thi) | |||
4074 | h = drbd_connect(mdev); | 4186 | h = drbd_connect(mdev); |
4075 | if (h == 0) { | 4187 | if (h == 0) { |
4076 | drbd_disconnect(mdev); | 4188 | drbd_disconnect(mdev); |
4077 | __set_current_state(TASK_INTERRUPTIBLE); | 4189 | schedule_timeout_interruptible(HZ); |
4078 | schedule_timeout(HZ); | ||
4079 | } | 4190 | } |
4080 | if (h == -1) { | 4191 | if (h == -1) { |
4081 | dev_warn(DEV, "Discarding network configuration.\n"); | 4192 | dev_warn(DEV, "Discarding network configuration.\n"); |
@@ -4113,7 +4224,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4113 | } | 4224 | } |
4114 | wake_up(&mdev->state_wait); | 4225 | wake_up(&mdev->state_wait); |
4115 | 4226 | ||
4116 | return TRUE; | 4227 | return true; |
4117 | } | 4228 | } |
4118 | 4229 | ||
4119 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) | 4230 | static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4129,7 +4240,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4129 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) | 4240 | if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) |
4130 | wake_up(&mdev->misc_wait); | 4241 | wake_up(&mdev->misc_wait); |
4131 | 4242 | ||
4132 | return TRUE; | 4243 | return true; |
4133 | } | 4244 | } |
4134 | 4245 | ||
4135 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | 4246 | static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4152,7 +4263,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) | |||
4152 | dec_rs_pending(mdev); | 4263 | dec_rs_pending(mdev); |
4153 | atomic_add(blksize >> 9, &mdev->rs_sect_in); | 4264 | atomic_add(blksize >> 9, &mdev->rs_sect_in); |
4154 | 4265 | ||
4155 | return TRUE; | 4266 | return true; |
4156 | } | 4267 | } |
4157 | 4268 | ||
4158 | /* when we receive the ACK for a write request, | 4269 | /* when we receive the ACK for a write request, |
@@ -4176,8 +4287,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, | |||
4176 | return req; | 4287 | return req; |
4177 | } | 4288 | } |
4178 | } | 4289 | } |
4179 | dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", | ||
4180 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4181 | return NULL; | 4290 | return NULL; |
4182 | } | 4291 | } |
4183 | 4292 | ||
@@ -4195,15 +4304,17 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, | |||
4195 | req = validator(mdev, id, sector); | 4304 | req = validator(mdev, id, sector); |
4196 | if (unlikely(!req)) { | 4305 | if (unlikely(!req)) { |
4197 | spin_unlock_irq(&mdev->req_lock); | 4306 | spin_unlock_irq(&mdev->req_lock); |
4198 | dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); | 4307 | |
4199 | return FALSE; | 4308 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, |
4309 | (void *)(unsigned long)id, (unsigned long long)sector); | ||
4310 | return false; | ||
4200 | } | 4311 | } |
4201 | __req_mod(req, what, &m); | 4312 | __req_mod(req, what, &m); |
4202 | spin_unlock_irq(&mdev->req_lock); | 4313 | spin_unlock_irq(&mdev->req_lock); |
4203 | 4314 | ||
4204 | if (m.bio) | 4315 | if (m.bio) |
4205 | complete_master_bio(mdev, &m); | 4316 | complete_master_bio(mdev, &m); |
4206 | return TRUE; | 4317 | return true; |
4207 | } | 4318 | } |
4208 | 4319 | ||
4209 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | 4320 | static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4218,7 +4329,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4218 | if (is_syncer_block_id(p->block_id)) { | 4329 | if (is_syncer_block_id(p->block_id)) { |
4219 | drbd_set_in_sync(mdev, sector, blksize); | 4330 | drbd_set_in_sync(mdev, sector, blksize); |
4220 | dec_rs_pending(mdev); | 4331 | dec_rs_pending(mdev); |
4221 | return TRUE; | 4332 | return true; |
4222 | } | 4333 | } |
4223 | switch (be16_to_cpu(h->command)) { | 4334 | switch (be16_to_cpu(h->command)) { |
4224 | case P_RS_WRITE_ACK: | 4335 | case P_RS_WRITE_ACK: |
@@ -4239,7 +4350,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4239 | break; | 4350 | break; |
4240 | default: | 4351 | default: |
4241 | D_ASSERT(0); | 4352 | D_ASSERT(0); |
4242 | return FALSE; | 4353 | return false; |
4243 | } | 4354 | } |
4244 | 4355 | ||
4245 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4356 | return validate_req_change_req_state(mdev, p->block_id, sector, |
@@ -4250,20 +4361,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4250 | { | 4361 | { |
4251 | struct p_block_ack *p = (struct p_block_ack *)h; | 4362 | struct p_block_ack *p = (struct p_block_ack *)h; |
4252 | sector_t sector = be64_to_cpu(p->sector); | 4363 | sector_t sector = be64_to_cpu(p->sector); |
4253 | 4364 | int size = be32_to_cpu(p->blksize); | |
4254 | if (__ratelimit(&drbd_ratelimit_state)) | 4365 | struct drbd_request *req; |
4255 | dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); | 4366 | struct bio_and_error m; |
4256 | 4367 | ||
4257 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); | 4368 | update_peer_seq(mdev, be32_to_cpu(p->seq_num)); |
4258 | 4369 | ||
4259 | if (is_syncer_block_id(p->block_id)) { | 4370 | if (is_syncer_block_id(p->block_id)) { |
4260 | int size = be32_to_cpu(p->blksize); | ||
4261 | dec_rs_pending(mdev); | 4371 | dec_rs_pending(mdev); |
4262 | drbd_rs_failed_io(mdev, sector, size); | 4372 | drbd_rs_failed_io(mdev, sector, size); |
4263 | return TRUE; | 4373 | return true; |
4264 | } | 4374 | } |
4265 | return validate_req_change_req_state(mdev, p->block_id, sector, | 4375 | |
4266 | _ack_id_to_req, __func__ , neg_acked); | 4376 | spin_lock_irq(&mdev->req_lock); |
4377 | req = _ack_id_to_req(mdev, p->block_id, sector); | ||
4378 | if (!req) { | ||
4379 | spin_unlock_irq(&mdev->req_lock); | ||
4380 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A || | ||
4381 | mdev->net_conf->wire_protocol == DRBD_PROT_B) { | ||
4382 | /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. | ||
4383 | The master bio might already be completed, therefore the | ||
4384 | request is no longer in the collision hash. | ||
4385 | => Do not try to validate block_id as request. */ | ||
4386 | /* In Protocol B we might already have got a P_RECV_ACK | ||
4387 | but then get a P_NEG_ACK after wards. */ | ||
4388 | drbd_set_out_of_sync(mdev, sector, size); | ||
4389 | return true; | ||
4390 | } else { | ||
4391 | dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, | ||
4392 | (void *)(unsigned long)p->block_id, (unsigned long long)sector); | ||
4393 | return false; | ||
4394 | } | ||
4395 | } | ||
4396 | __req_mod(req, neg_acked, &m); | ||
4397 | spin_unlock_irq(&mdev->req_lock); | ||
4398 | |||
4399 | if (m.bio) | ||
4400 | complete_master_bio(mdev, &m); | ||
4401 | return true; | ||
4267 | } | 4402 | } |
4268 | 4403 | ||
4269 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) | 4404 | static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4294,11 +4429,20 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) | |||
4294 | 4429 | ||
4295 | if (get_ldev_if_state(mdev, D_FAILED)) { | 4430 | if (get_ldev_if_state(mdev, D_FAILED)) { |
4296 | drbd_rs_complete_io(mdev, sector); | 4431 | drbd_rs_complete_io(mdev, sector); |
4297 | drbd_rs_failed_io(mdev, sector, size); | 4432 | switch (be16_to_cpu(h->command)) { |
4433 | case P_NEG_RS_DREPLY: | ||
4434 | drbd_rs_failed_io(mdev, sector, size); | ||
4435 | case P_RS_CANCEL: | ||
4436 | break; | ||
4437 | default: | ||
4438 | D_ASSERT(0); | ||
4439 | put_ldev(mdev); | ||
4440 | return false; | ||
4441 | } | ||
4298 | put_ldev(mdev); | 4442 | put_ldev(mdev); |
4299 | } | 4443 | } |
4300 | 4444 | ||
4301 | return TRUE; | 4445 | return true; |
4302 | } | 4446 | } |
4303 | 4447 | ||
4304 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | 4448 | static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4307,7 +4451,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) | |||
4307 | 4451 | ||
4308 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); | 4452 | tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); |
4309 | 4453 | ||
4310 | return TRUE; | 4454 | if (mdev->state.conn == C_AHEAD && |
4455 | atomic_read(&mdev->ap_in_flight) == 0 && | ||
4456 | !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { | ||
4457 | mdev->start_resync_timer.expires = jiffies + HZ; | ||
4458 | add_timer(&mdev->start_resync_timer); | ||
4459 | } | ||
4460 | |||
4461 | return true; | ||
4311 | } | 4462 | } |
4312 | 4463 | ||
4313 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | 4464 | static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) |
@@ -4328,12 +4479,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4328 | ov_oos_print(mdev); | 4479 | ov_oos_print(mdev); |
4329 | 4480 | ||
4330 | if (!get_ldev(mdev)) | 4481 | if (!get_ldev(mdev)) |
4331 | return TRUE; | 4482 | return true; |
4332 | 4483 | ||
4333 | drbd_rs_complete_io(mdev, sector); | 4484 | drbd_rs_complete_io(mdev, sector); |
4334 | dec_rs_pending(mdev); | 4485 | dec_rs_pending(mdev); |
4335 | 4486 | ||
4336 | if (--mdev->ov_left == 0) { | 4487 | --mdev->ov_left; |
4488 | |||
4489 | /* let's advance progress step marks only for every other megabyte */ | ||
4490 | if ((mdev->ov_left & 0x200) == 0x200) | ||
4491 | drbd_advance_rs_marks(mdev, mdev->ov_left); | ||
4492 | |||
4493 | if (mdev->ov_left == 0) { | ||
4337 | w = kmalloc(sizeof(*w), GFP_NOIO); | 4494 | w = kmalloc(sizeof(*w), GFP_NOIO); |
4338 | if (w) { | 4495 | if (w) { |
4339 | w->cb = w_ov_finished; | 4496 | w->cb = w_ov_finished; |
@@ -4345,12 +4502,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) | |||
4345 | } | 4502 | } |
4346 | } | 4503 | } |
4347 | put_ldev(mdev); | 4504 | put_ldev(mdev); |
4348 | return TRUE; | 4505 | return true; |
4349 | } | 4506 | } |
4350 | 4507 | ||
4351 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) | 4508 | static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) |
4352 | { | 4509 | { |
4353 | return TRUE; | 4510 | return true; |
4354 | } | 4511 | } |
4355 | 4512 | ||
4356 | struct asender_cmd { | 4513 | struct asender_cmd { |
@@ -4378,6 +4535,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) | |||
4378 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | 4535 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, |
4379 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | 4536 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, |
4380 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, | 4537 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, |
4538 | [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, | ||
4381 | [P_MAX_CMD] = { 0, NULL }, | 4539 | [P_MAX_CMD] = { 0, NULL }, |
4382 | }; | 4540 | }; |
4383 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) | 4541 | if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) |