diff options
author | Lars Ellenberg <lars.ellenberg@linbit.com> | 2013-11-22 06:40:58 -0500 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-07-10 09:22:22 -0400 |
commit | 44a4d551846b8c61aa430b9432c1fcdf88444708 (patch) | |
tree | 8188189bed59234fdf943702ebb55c9c17106215 /drivers/block/drbd | |
parent | 35b5ed5bbac2432acdfce1d9dec8dbf8fe7d60dd (diff) |
drbd: refactor use of first_peer_device()
Reduce the number of calls to first_peer_device(). Instead, call
first_peer_device() just once to assign a local variable peer_device.
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 30 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 18 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 25 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_state.c | 65 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_worker.c | 55 |
5 files changed, 105 insertions, 88 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 25f4b6f67c21..0bf8a6082bb8 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -552,8 +552,10 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) | |||
552 | } | 552 | } |
553 | 553 | ||
554 | enum drbd_state_rv | 554 | enum drbd_state_rv |
555 | drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | 555 | drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force) |
556 | { | 556 | { |
557 | struct drbd_peer_device *const peer_device = first_peer_device(device); | ||
558 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | ||
557 | const int max_tries = 4; | 559 | const int max_tries = 4; |
558 | enum drbd_state_rv rv = SS_UNKNOWN_ERROR; | 560 | enum drbd_state_rv rv = SS_UNKNOWN_ERROR; |
559 | struct net_conf *nc; | 561 | struct net_conf *nc; |
@@ -601,7 +603,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | |||
601 | device->state.disk == D_CONSISTENT && mask.pdsk == 0) { | 603 | device->state.disk == D_CONSISTENT && mask.pdsk == 0) { |
602 | D_ASSERT(device, device->state.pdsk == D_UNKNOWN); | 604 | D_ASSERT(device, device->state.pdsk == D_UNKNOWN); |
603 | 605 | ||
604 | if (conn_try_outdate_peer(first_peer_device(device)->connection)) { | 606 | if (conn_try_outdate_peer(connection)) { |
605 | val.disk = D_UP_TO_DATE; | 607 | val.disk = D_UP_TO_DATE; |
606 | mask.disk = D_MASK; | 608 | mask.disk = D_MASK; |
607 | } | 609 | } |
@@ -611,7 +613,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | |||
611 | if (rv == SS_NOTHING_TO_DO) | 613 | if (rv == SS_NOTHING_TO_DO) |
612 | goto out; | 614 | goto out; |
613 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { | 615 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { |
614 | if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) { | 616 | if (!conn_try_outdate_peer(connection) && force) { |
615 | drbd_warn(device, "Forced into split brain situation!\n"); | 617 | drbd_warn(device, "Forced into split brain situation!\n"); |
616 | mask.pdsk = D_MASK; | 618 | mask.pdsk = D_MASK; |
617 | val.pdsk = D_OUTDATED; | 619 | val.pdsk = D_OUTDATED; |
@@ -624,7 +626,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | |||
624 | retry at most once more in this case. */ | 626 | retry at most once more in this case. */ |
625 | int timeo; | 627 | int timeo; |
626 | rcu_read_lock(); | 628 | rcu_read_lock(); |
627 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 629 | nc = rcu_dereference(connection->net_conf); |
628 | timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; | 630 | timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; |
629 | rcu_read_unlock(); | 631 | rcu_read_unlock(); |
630 | schedule_timeout_interruptible(timeo); | 632 | schedule_timeout_interruptible(timeo); |
@@ -661,7 +663,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | |||
661 | } else { | 663 | } else { |
662 | /* Called from drbd_adm_set_role only. | 664 | /* Called from drbd_adm_set_role only. |
663 | * We are still holding the conf_update mutex. */ | 665 | * We are still holding the conf_update mutex. */ |
664 | nc = first_peer_device(device)->connection->net_conf; | 666 | nc = connection->net_conf; |
665 | if (nc) | 667 | if (nc) |
666 | nc->discard_my_data = 0; /* without copy; single bit op is atomic */ | 668 | nc->discard_my_data = 0; /* without copy; single bit op is atomic */ |
667 | 669 | ||
@@ -683,8 +685,8 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) | |||
683 | if (device->state.conn >= C_WF_REPORT_PARAMS) { | 685 | if (device->state.conn >= C_WF_REPORT_PARAMS) { |
684 | /* if this was forced, we should consider sync */ | 686 | /* if this was forced, we should consider sync */ |
685 | if (forced) | 687 | if (forced) |
686 | drbd_send_uuids(first_peer_device(device)); | 688 | drbd_send_uuids(peer_device); |
687 | drbd_send_current_state(first_peer_device(device)); | 689 | drbd_send_current_state(peer_device); |
688 | } | 690 | } |
689 | 691 | ||
690 | drbd_md_sync(device); | 692 | drbd_md_sync(device); |
@@ -1433,6 +1435,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1433 | { | 1435 | { |
1434 | struct drbd_config_context adm_ctx; | 1436 | struct drbd_config_context adm_ctx; |
1435 | struct drbd_device *device; | 1437 | struct drbd_device *device; |
1438 | struct drbd_peer_device *peer_device; | ||
1439 | struct drbd_connection *connection; | ||
1436 | int err; | 1440 | int err; |
1437 | enum drbd_ret_code retcode; | 1441 | enum drbd_ret_code retcode; |
1438 | enum determine_dev_size dd; | 1442 | enum determine_dev_size dd; |
@@ -1455,7 +1459,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1455 | 1459 | ||
1456 | device = adm_ctx.device; | 1460 | device = adm_ctx.device; |
1457 | mutex_lock(&adm_ctx.resource->adm_mutex); | 1461 | mutex_lock(&adm_ctx.resource->adm_mutex); |
1458 | conn_reconfig_start(first_peer_device(device)->connection); | 1462 | peer_device = first_peer_device(device); |
1463 | connection = peer_device ? peer_device->connection : NULL; | ||
1464 | conn_reconfig_start(connection); | ||
1459 | 1465 | ||
1460 | /* if you want to reconfigure, please tear down first */ | 1466 | /* if you want to reconfigure, please tear down first */ |
1461 | if (device->state.disk > D_DISKLESS) { | 1467 | if (device->state.disk > D_DISKLESS) { |
@@ -1522,7 +1528,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1522 | goto fail; | 1528 | goto fail; |
1523 | 1529 | ||
1524 | rcu_read_lock(); | 1530 | rcu_read_lock(); |
1525 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 1531 | nc = rcu_dereference(connection->net_conf); |
1526 | if (nc) { | 1532 | if (nc) { |
1527 | if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { | 1533 | if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { |
1528 | rcu_read_unlock(); | 1534 | rcu_read_unlock(); |
@@ -1642,7 +1648,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1642 | */ | 1648 | */ |
1643 | wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); | 1649 | wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); |
1644 | /* and for any other previously queued work */ | 1650 | /* and for any other previously queued work */ |
1645 | drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); | 1651 | drbd_flush_workqueue(&connection->sender_work); |
1646 | 1652 | ||
1647 | rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); | 1653 | rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); |
1648 | retcode = rv; /* FIXME: Type mismatch. */ | 1654 | retcode = rv; /* FIXME: Type mismatch. */ |
@@ -1838,7 +1844,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1838 | 1844 | ||
1839 | kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); | 1845 | kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); |
1840 | put_ldev(device); | 1846 | put_ldev(device); |
1841 | conn_reconfig_done(first_peer_device(device)->connection); | 1847 | conn_reconfig_done(connection); |
1842 | mutex_unlock(&adm_ctx.resource->adm_mutex); | 1848 | mutex_unlock(&adm_ctx.resource->adm_mutex); |
1843 | drbd_adm_finish(&adm_ctx, info, retcode); | 1849 | drbd_adm_finish(&adm_ctx, info, retcode); |
1844 | return 0; | 1850 | return 0; |
@@ -1849,7 +1855,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) | |||
1849 | drbd_force_state(device, NS(disk, D_DISKLESS)); | 1855 | drbd_force_state(device, NS(disk, D_DISKLESS)); |
1850 | drbd_md_sync(device); | 1856 | drbd_md_sync(device); |
1851 | fail: | 1857 | fail: |
1852 | conn_reconfig_done(first_peer_device(device)->connection); | 1858 | conn_reconfig_done(connection); |
1853 | if (nbc) { | 1859 | if (nbc) { |
1854 | if (nbc->backing_bdev) | 1860 | if (nbc->backing_bdev) |
1855 | blkdev_put(nbc->backing_bdev, | 1861 | blkdev_put(nbc->backing_bdev, |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index be0c3761cdc6..bb1434dfec8a 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -2857,8 +2857,10 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, | |||
2857 | -1091 requires proto 91 | 2857 | -1091 requires proto 91 |
2858 | -1096 requires proto 96 | 2858 | -1096 requires proto 96 |
2859 | */ | 2859 | */ |
2860 | static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local) | 2860 | static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local) |
2861 | { | 2861 | { |
2862 | struct drbd_peer_device *const peer_device = first_peer_device(device); | ||
2863 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | ||
2862 | u64 self, peer; | 2864 | u64 self, peer; |
2863 | int i, j; | 2865 | int i, j; |
2864 | 2866 | ||
@@ -2884,7 +2886,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho | |||
2884 | 2886 | ||
2885 | if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { | 2887 | if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { |
2886 | 2888 | ||
2887 | if (first_peer_device(device)->connection->agreed_pro_version < 91) | 2889 | if (connection->agreed_pro_version < 91) |
2888 | return -1091; | 2890 | return -1091; |
2889 | 2891 | ||
2890 | if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && | 2892 | if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && |
@@ -2907,7 +2909,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho | |||
2907 | 2909 | ||
2908 | if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { | 2910 | if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { |
2909 | 2911 | ||
2910 | if (first_peer_device(device)->connection->agreed_pro_version < 91) | 2912 | if (connection->agreed_pro_version < 91) |
2911 | return -1091; | 2913 | return -1091; |
2912 | 2914 | ||
2913 | if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && | 2915 | if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && |
@@ -2940,7 +2942,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho | |||
2940 | case 1: /* self_pri && !peer_pri */ return 1; | 2942 | case 1: /* self_pri && !peer_pri */ return 1; |
2941 | case 2: /* !self_pri && peer_pri */ return -1; | 2943 | case 2: /* !self_pri && peer_pri */ return -1; |
2942 | case 3: /* self_pri && peer_pri */ | 2944 | case 3: /* self_pri && peer_pri */ |
2943 | dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags); | 2945 | dc = test_bit(RESOLVE_CONFLICTS, &connection->flags); |
2944 | return dc ? -1 : 1; | 2946 | return dc ? -1 : 1; |
2945 | } | 2947 | } |
2946 | } | 2948 | } |
@@ -2953,14 +2955,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho | |||
2953 | *rule_nr = 51; | 2955 | *rule_nr = 51; |
2954 | peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); | 2956 | peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); |
2955 | if (self == peer) { | 2957 | if (self == peer) { |
2956 | if (first_peer_device(device)->connection->agreed_pro_version < 96 ? | 2958 | if (connection->agreed_pro_version < 96 ? |
2957 | (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == | 2959 | (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == |
2958 | (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : | 2960 | (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : |
2959 | peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { | 2961 | peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { |
2960 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2962 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2961 | resync as sync source modifications of the peer's UUIDs. */ | 2963 | resync as sync source modifications of the peer's UUIDs. */ |
2962 | 2964 | ||
2963 | if (first_peer_device(device)->connection->agreed_pro_version < 91) | 2965 | if (connection->agreed_pro_version < 91) |
2964 | return -1091; | 2966 | return -1091; |
2965 | 2967 | ||
2966 | device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; | 2968 | device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; |
@@ -2990,14 +2992,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho | |||
2990 | *rule_nr = 71; | 2992 | *rule_nr = 71; |
2991 | self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); | 2993 | self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); |
2992 | if (self == peer) { | 2994 | if (self == peer) { |
2993 | if (first_peer_device(device)->connection->agreed_pro_version < 96 ? | 2995 | if (connection->agreed_pro_version < 96 ? |
2994 | (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == | 2996 | (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == |
2995 | (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : | 2997 | (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : |
2996 | self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | 2998 | self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { |
2997 | /* The last P_SYNC_UUID did not get though. Undo the last start of | 2999 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
2998 | resync as sync source modifications of our UUIDs. */ | 3000 | resync as sync source modifications of our UUIDs. */ |
2999 | 3001 | ||
3000 | if (first_peer_device(device)->connection->agreed_pro_version < 91) | 3002 | if (connection->agreed_pro_version < 91) |
3001 | return -1091; | 3003 | return -1091; |
3002 | 3004 | ||
3003 | __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); | 3005 | __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 4c7fee1a5a85..042bbc689f5e 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -454,7 +454,9 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request | |||
454 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, | 454 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
455 | struct bio_and_error *m) | 455 | struct bio_and_error *m) |
456 | { | 456 | { |
457 | struct drbd_device *device = req->device; | 457 | struct drbd_device *const device = req->device; |
458 | struct drbd_peer_device *const peer_device = first_peer_device(device); | ||
459 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | ||
458 | struct net_conf *nc; | 460 | struct net_conf *nc; |
459 | int p, rv = 0; | 461 | int p, rv = 0; |
460 | 462 | ||
@@ -477,7 +479,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
477 | * and from w_read_retry_remote */ | 479 | * and from w_read_retry_remote */ |
478 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); | 480 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
479 | rcu_read_lock(); | 481 | rcu_read_lock(); |
480 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 482 | nc = rcu_dereference(connection->net_conf); |
481 | p = nc->wire_protocol; | 483 | p = nc->wire_protocol; |
482 | rcu_read_unlock(); | 484 | rcu_read_unlock(); |
483 | req->rq_state |= | 485 | req->rq_state |= |
@@ -549,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
549 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); | 551 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); |
550 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); | 552 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
551 | req->w.cb = w_send_read_req; | 553 | req->w.cb = w_send_read_req; |
552 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 554 | drbd_queue_work(&connection->sender_work, |
553 | &req->w); | 555 | &req->w); |
554 | break; | 556 | break; |
555 | 557 | ||
@@ -585,23 +587,23 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
585 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); | 587 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
586 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); | 588 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
587 | req->w.cb = w_send_dblock; | 589 | req->w.cb = w_send_dblock; |
588 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 590 | drbd_queue_work(&connection->sender_work, |
589 | &req->w); | 591 | &req->w); |
590 | 592 | ||
591 | /* close the epoch, in case it outgrew the limit */ | 593 | /* close the epoch, in case it outgrew the limit */ |
592 | rcu_read_lock(); | 594 | rcu_read_lock(); |
593 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 595 | nc = rcu_dereference(connection->net_conf); |
594 | p = nc->max_epoch_size; | 596 | p = nc->max_epoch_size; |
595 | rcu_read_unlock(); | 597 | rcu_read_unlock(); |
596 | if (first_peer_device(device)->connection->current_tle_writes >= p) | 598 | if (connection->current_tle_writes >= p) |
597 | start_new_tl_epoch(first_peer_device(device)->connection); | 599 | start_new_tl_epoch(connection); |
598 | 600 | ||
599 | break; | 601 | break; |
600 | 602 | ||
601 | case QUEUE_FOR_SEND_OOS: | 603 | case QUEUE_FOR_SEND_OOS: |
602 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); | 604 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
603 | req->w.cb = w_send_out_of_sync; | 605 | req->w.cb = w_send_out_of_sync; |
604 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 606 | drbd_queue_work(&connection->sender_work, |
605 | &req->w); | 607 | &req->w); |
606 | break; | 608 | break; |
607 | 609 | ||
@@ -714,7 +716,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
714 | 716 | ||
715 | get_ldev(device); /* always succeeds in this call path */ | 717 | get_ldev(device); /* always succeeds in this call path */ |
716 | req->w.cb = w_restart_disk_io; | 718 | req->w.cb = w_restart_disk_io; |
717 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 719 | drbd_queue_work(&connection->sender_work, |
718 | &req->w); | 720 | &req->w); |
719 | break; | 721 | break; |
720 | 722 | ||
@@ -736,7 +738,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
736 | 738 | ||
737 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); | 739 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); |
738 | if (req->w.cb) { | 740 | if (req->w.cb) { |
739 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 741 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
742 | drbd_queue_work(&connection->sender_work, | ||
740 | &req->w); | 743 | &req->w); |
741 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; | 744 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
742 | } /* else: FIXME can this happen? */ | 745 | } /* else: FIXME can this happen? */ |
@@ -769,7 +772,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
769 | break; | 772 | break; |
770 | 773 | ||
771 | case QUEUE_AS_DRBD_BARRIER: | 774 | case QUEUE_AS_DRBD_BARRIER: |
772 | start_new_tl_epoch(first_peer_device(device)->connection); | 775 | start_new_tl_epoch(connection); |
773 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); | 776 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); |
774 | break; | 777 | break; |
775 | }; | 778 | }; |
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index a5d8aae00e04..19da7c7590cd 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c | |||
@@ -952,6 +952,8 @@ enum drbd_state_rv | |||
952 | __drbd_set_state(struct drbd_device *device, union drbd_state ns, | 952 | __drbd_set_state(struct drbd_device *device, union drbd_state ns, |
953 | enum chg_state_flags flags, struct completion *done) | 953 | enum chg_state_flags flags, struct completion *done) |
954 | { | 954 | { |
955 | struct drbd_peer_device *peer_device = first_peer_device(device); | ||
956 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | ||
955 | union drbd_state os; | 957 | union drbd_state os; |
956 | enum drbd_state_rv rv = SS_SUCCESS; | 958 | enum drbd_state_rv rv = SS_SUCCESS; |
957 | enum sanitize_state_warnings ssw; | 959 | enum sanitize_state_warnings ssw; |
@@ -978,9 +980,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
978 | this happen...*/ | 980 | this happen...*/ |
979 | 981 | ||
980 | if (is_valid_state(device, os) == rv) | 982 | if (is_valid_state(device, os) == rv) |
981 | rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); | 983 | rv = is_valid_soft_transition(os, ns, connection); |
982 | } else | 984 | } else |
983 | rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); | 985 | rv = is_valid_soft_transition(os, ns, connection); |
984 | } | 986 | } |
985 | 987 | ||
986 | if (rv < SS_SUCCESS) { | 988 | if (rv < SS_SUCCESS) { |
@@ -997,7 +999,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
997 | sanitize_state(). Only display it here if we where not called from | 999 | sanitize_state(). Only display it here if we where not called from |
998 | _conn_request_state() */ | 1000 | _conn_request_state() */ |
999 | if (!(flags & CS_DC_SUSP)) | 1001 | if (!(flags & CS_DC_SUSP)) |
1000 | conn_pr_state_change(first_peer_device(device)->connection, os, ns, | 1002 | conn_pr_state_change(connection, os, ns, |
1001 | (flags & ~CS_DC_MASK) | CS_DC_SUSP); | 1003 | (flags & ~CS_DC_MASK) | CS_DC_SUSP); |
1002 | 1004 | ||
1003 | /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference | 1005 | /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference |
@@ -1017,19 +1019,19 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
1017 | 1019 | ||
1018 | /* put replicated vs not-replicated requests in seperate epochs */ | 1020 | /* put replicated vs not-replicated requests in seperate epochs */ |
1019 | if (did_remote != should_do_remote) | 1021 | if (did_remote != should_do_remote) |
1020 | start_new_tl_epoch(first_peer_device(device)->connection); | 1022 | start_new_tl_epoch(connection); |
1021 | 1023 | ||
1022 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) | 1024 | if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) |
1023 | drbd_print_uuids(device, "attached to UUIDs"); | 1025 | drbd_print_uuids(device, "attached to UUIDs"); |
1024 | 1026 | ||
1025 | /* Wake up role changes, that were delayed because of connection establishing */ | 1027 | /* Wake up role changes, that were delayed because of connection establishing */ |
1026 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && | 1028 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && |
1027 | no_peer_wf_report_params(first_peer_device(device)->connection)) | 1029 | no_peer_wf_report_params(connection)) |
1028 | clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags); | 1030 | clear_bit(STATE_SENT, &connection->flags); |
1029 | 1031 | ||
1030 | wake_up(&device->misc_wait); | 1032 | wake_up(&device->misc_wait); |
1031 | wake_up(&device->state_wait); | 1033 | wake_up(&device->state_wait); |
1032 | wake_up(&first_peer_device(device)->connection->ping_wait); | 1034 | wake_up(&connection->ping_wait); |
1033 | 1035 | ||
1034 | /* Aborted verify run, or we reached the stop sector. | 1036 | /* Aborted verify run, or we reached the stop sector. |
1035 | * Log the last position, unless end-of-device. */ | 1037 | * Log the last position, unless end-of-device. */ |
@@ -1118,21 +1120,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
1118 | 1120 | ||
1119 | /* Receiver should clean up itself */ | 1121 | /* Receiver should clean up itself */ |
1120 | if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) | 1122 | if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) |
1121 | drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); | 1123 | drbd_thread_stop_nowait(&connection->receiver); |
1122 | 1124 | ||
1123 | /* Now the receiver finished cleaning up itself, it should die */ | 1125 | /* Now the receiver finished cleaning up itself, it should die */ |
1124 | if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) | 1126 | if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) |
1125 | drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); | 1127 | drbd_thread_stop_nowait(&connection->receiver); |
1126 | 1128 | ||
1127 | /* Upon network failure, we need to restart the receiver. */ | 1129 | /* Upon network failure, we need to restart the receiver. */ |
1128 | if (os.conn > C_WF_CONNECTION && | 1130 | if (os.conn > C_WF_CONNECTION && |
1129 | ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) | 1131 | ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) |
1130 | drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver); | 1132 | drbd_thread_restart_nowait(&connection->receiver); |
1131 | 1133 | ||
1132 | /* Resume AL writing if we get a connection */ | 1134 | /* Resume AL writing if we get a connection */ |
1133 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { | 1135 | if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { |
1134 | drbd_resume_al(device); | 1136 | drbd_resume_al(device); |
1135 | first_peer_device(device)->connection->connect_cnt++; | 1137 | connection->connect_cnt++; |
1136 | } | 1138 | } |
1137 | 1139 | ||
1138 | /* remember last attach time so request_timer_fn() won't | 1140 | /* remember last attach time so request_timer_fn() won't |
@@ -1150,7 +1152,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
1150 | ascw->w.cb = w_after_state_ch; | 1152 | ascw->w.cb = w_after_state_ch; |
1151 | ascw->device = device; | 1153 | ascw->device = device; |
1152 | ascw->done = done; | 1154 | ascw->done = done; |
1153 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, | 1155 | drbd_queue_work(&connection->sender_work, |
1154 | &ascw->w); | 1156 | &ascw->w); |
1155 | } else { | 1157 | } else { |
1156 | drbd_err(device, "Could not kmalloc an ascw\n"); | 1158 | drbd_err(device, "Could not kmalloc an ascw\n"); |
@@ -1222,6 +1224,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1222 | union drbd_state ns, enum chg_state_flags flags) | 1224 | union drbd_state ns, enum chg_state_flags flags) |
1223 | { | 1225 | { |
1224 | struct drbd_resource *resource = device->resource; | 1226 | struct drbd_resource *resource = device->resource; |
1227 | struct drbd_peer_device *peer_device = first_peer_device(device); | ||
1228 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | ||
1225 | struct sib_info sib; | 1229 | struct sib_info sib; |
1226 | 1230 | ||
1227 | sib.sib_reason = SIB_STATE_CHANGE; | 1231 | sib.sib_reason = SIB_STATE_CHANGE; |
@@ -1245,7 +1249,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1245 | state change. This function might sleep */ | 1249 | state change. This function might sleep */ |
1246 | 1250 | ||
1247 | if (ns.susp_nod) { | 1251 | if (ns.susp_nod) { |
1248 | struct drbd_connection *connection = first_peer_device(device)->connection; | ||
1249 | enum drbd_req_event what = NOTHING; | 1252 | enum drbd_req_event what = NOTHING; |
1250 | 1253 | ||
1251 | spin_lock_irq(&device->resource->req_lock); | 1254 | spin_lock_irq(&device->resource->req_lock); |
@@ -1267,8 +1270,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1267 | } | 1270 | } |
1268 | 1271 | ||
1269 | if (ns.susp_fen) { | 1272 | if (ns.susp_fen) { |
1270 | struct drbd_connection *connection = first_peer_device(device)->connection; | ||
1271 | |||
1272 | spin_lock_irq(&device->resource->req_lock); | 1273 | spin_lock_irq(&device->resource->req_lock); |
1273 | if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { | 1274 | if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { |
1274 | /* case2: The connection was established again: */ | 1275 | /* case2: The connection was established again: */ |
@@ -1294,8 +1295,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1294 | * which is unexpected. */ | 1295 | * which is unexpected. */ |
1295 | if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && | 1296 | if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && |
1296 | (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && | 1297 | (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && |
1297 | first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) { | 1298 | connection->agreed_pro_version >= 96 && get_ldev(device)) { |
1298 | drbd_gen_and_send_sync_uuid(first_peer_device(device)); | 1299 | drbd_gen_and_send_sync_uuid(peer_device); |
1299 | put_ldev(device); | 1300 | put_ldev(device); |
1300 | } | 1301 | } |
1301 | 1302 | ||
@@ -1309,8 +1310,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1309 | atomic_set(&device->rs_pending_cnt, 0); | 1310 | atomic_set(&device->rs_pending_cnt, 0); |
1310 | drbd_rs_cancel_all(device); | 1311 | drbd_rs_cancel_all(device); |
1311 | 1312 | ||
1312 | drbd_send_uuids(first_peer_device(device)); | 1313 | drbd_send_uuids(peer_device); |
1313 | drbd_send_state(first_peer_device(device), ns); | 1314 | drbd_send_state(peer_device, ns); |
1314 | } | 1315 | } |
1315 | /* No point in queuing send_bitmap if we don't have a connection | 1316 | /* No point in queuing send_bitmap if we don't have a connection |
1316 | * anymore, so check also the _current_ state, not only the new state | 1317 | * anymore, so check also the _current_ state, not only the new state |
@@ -1335,7 +1336,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1335 | set_bit(NEW_CUR_UUID, &device->flags); | 1336 | set_bit(NEW_CUR_UUID, &device->flags); |
1336 | } else { | 1337 | } else { |
1337 | drbd_uuid_new_current(device); | 1338 | drbd_uuid_new_current(device); |
1338 | drbd_send_uuids(first_peer_device(device)); | 1339 | drbd_send_uuids(peer_device); |
1339 | } | 1340 | } |
1340 | } | 1341 | } |
1341 | put_ldev(device); | 1342 | put_ldev(device); |
@@ -1346,7 +1347,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1346 | if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY && | 1347 | if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY && |
1347 | device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { | 1348 | device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { |
1348 | drbd_uuid_new_current(device); | 1349 | drbd_uuid_new_current(device); |
1349 | drbd_send_uuids(first_peer_device(device)); | 1350 | drbd_send_uuids(peer_device); |
1350 | } | 1351 | } |
1351 | /* D_DISKLESS Peer becomes secondary */ | 1352 | /* D_DISKLESS Peer becomes secondary */ |
1352 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) | 1353 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) |
@@ -1373,16 +1374,16 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1373 | /* Last part of the attaching process ... */ | 1374 | /* Last part of the attaching process ... */ |
1374 | if (ns.conn >= C_CONNECTED && | 1375 | if (ns.conn >= C_CONNECTED && |
1375 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { | 1376 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { |
1376 | drbd_send_sizes(first_peer_device(device), 0, 0); /* to start sync... */ | 1377 | drbd_send_sizes(peer_device, 0, 0); /* to start sync... */ |
1377 | drbd_send_uuids(first_peer_device(device)); | 1378 | drbd_send_uuids(peer_device); |
1378 | drbd_send_state(first_peer_device(device), ns); | 1379 | drbd_send_state(peer_device, ns); |
1379 | } | 1380 | } |
1380 | 1381 | ||
1381 | /* We want to pause/continue resync, tell peer. */ | 1382 | /* We want to pause/continue resync, tell peer. */ |
1382 | if (ns.conn >= C_CONNECTED && | 1383 | if (ns.conn >= C_CONNECTED && |
1383 | ((os.aftr_isp != ns.aftr_isp) || | 1384 | ((os.aftr_isp != ns.aftr_isp) || |
1384 | (os.user_isp != ns.user_isp))) | 1385 | (os.user_isp != ns.user_isp))) |
1385 | drbd_send_state(first_peer_device(device), ns); | 1386 | drbd_send_state(peer_device, ns); |
1386 | 1387 | ||
1387 | /* In case one of the isp bits got set, suspend other devices. */ | 1388 | /* In case one of the isp bits got set, suspend other devices. */ |
1388 | if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) && | 1389 | if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) && |
@@ -1392,10 +1393,10 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1392 | /* Make sure the peer gets informed about eventual state | 1393 | /* Make sure the peer gets informed about eventual state |
1393 | changes (ISP bits) while we were in WFReportParams. */ | 1394 | changes (ISP bits) while we were in WFReportParams. */ |
1394 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) | 1395 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) |
1395 | drbd_send_state(first_peer_device(device), ns); | 1396 | drbd_send_state(peer_device, ns); |
1396 | 1397 | ||
1397 | if (os.conn != C_AHEAD && ns.conn == C_AHEAD) | 1398 | if (os.conn != C_AHEAD && ns.conn == C_AHEAD) |
1398 | drbd_send_state(first_peer_device(device), ns); | 1399 | drbd_send_state(peer_device, ns); |
1399 | 1400 | ||
1400 | /* We are in the progress to start a full sync... */ | 1401 | /* We are in the progress to start a full sync... */ |
1401 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || | 1402 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || |
@@ -1449,7 +1450,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1449 | drbd_disk_str(device->state.disk)); | 1450 | drbd_disk_str(device->state.disk)); |
1450 | 1451 | ||
1451 | if (ns.conn >= C_CONNECTED) | 1452 | if (ns.conn >= C_CONNECTED) |
1452 | drbd_send_state(first_peer_device(device), ns); | 1453 | drbd_send_state(peer_device, ns); |
1453 | 1454 | ||
1454 | drbd_rs_cancel_all(device); | 1455 | drbd_rs_cancel_all(device); |
1455 | 1456 | ||
@@ -1473,7 +1474,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1473 | drbd_disk_str(device->state.disk)); | 1474 | drbd_disk_str(device->state.disk)); |
1474 | 1475 | ||
1475 | if (ns.conn >= C_CONNECTED) | 1476 | if (ns.conn >= C_CONNECTED) |
1476 | drbd_send_state(first_peer_device(device), ns); | 1477 | drbd_send_state(peer_device, ns); |
1477 | /* corresponding get_ldev in __drbd_set_state | 1478 | /* corresponding get_ldev in __drbd_set_state |
1478 | * this may finally trigger drbd_ldev_destroy. */ | 1479 | * this may finally trigger drbd_ldev_destroy. */ |
1479 | put_ldev(device); | 1480 | put_ldev(device); |
@@ -1481,7 +1482,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1481 | 1482 | ||
1482 | /* Notify peer that I had a local IO error, and did not detached.. */ | 1483 | /* Notify peer that I had a local IO error, and did not detached.. */ |
1483 | if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED) | 1484 | if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED) |
1484 | drbd_send_state(first_peer_device(device), ns); | 1485 | drbd_send_state(peer_device, ns); |
1485 | 1486 | ||
1486 | /* Disks got bigger while they were detached */ | 1487 | /* Disks got bigger while they were detached */ |
1487 | if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && | 1488 | if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && |
@@ -1499,14 +1500,14 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, | |||
1499 | /* sync target done with resync. Explicitly notify peer, even though | 1500 | /* sync target done with resync. Explicitly notify peer, even though |
1500 | * it should (at least for non-empty resyncs) already know itself. */ | 1501 | * it should (at least for non-empty resyncs) already know itself. */ |
1501 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) | 1502 | if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) |
1502 | drbd_send_state(first_peer_device(device), ns); | 1503 | drbd_send_state(peer_device, ns); |
1503 | 1504 | ||
1504 | /* Verify finished, or reached stop sector. Peer did not know about | 1505 | /* Verify finished, or reached stop sector. Peer did not know about |
1505 | * the stop sector, and we may even have changed the stop sector during | 1506 | * the stop sector, and we may even have changed the stop sector during |
1506 | * verify to interrupt/stop early. Send the new state. */ | 1507 | * verify to interrupt/stop early. Send the new state. */ |
1507 | if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED | 1508 | if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED |
1508 | && verify_can_do_stop_sector(device)) | 1509 | && verify_can_do_stop_sector(device)) |
1509 | drbd_send_state(first_peer_device(device), ns); | 1510 | drbd_send_state(peer_device, ns); |
1510 | 1511 | ||
1511 | /* This triggers bitmap writeout of potentially still unwritten pages | 1512 | /* This triggers bitmap writeout of potentially still unwritten pages |
1512 | * if the resync finished cleanly, or aborted because of peer disk | 1513 | * if the resync finished cleanly, or aborted because of peer disk |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d8f57b6305cd..595ab57aea96 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -583,8 +583,10 @@ static int drbd_rs_number_requests(struct drbd_device *device) | |||
583 | return number; | 583 | return number; |
584 | } | 584 | } |
585 | 585 | ||
586 | static int make_resync_request(struct drbd_device *device, int cancel) | 586 | static int make_resync_request(struct drbd_device *const device, int cancel) |
587 | { | 587 | { |
588 | struct drbd_peer_device *const peer_device = first_peer_device(device); | ||
589 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | ||
588 | unsigned long bit; | 590 | unsigned long bit; |
589 | sector_t sector; | 591 | sector_t sector; |
590 | const sector_t capacity = drbd_get_capacity(device->this_bdev); | 592 | const sector_t capacity = drbd_get_capacity(device->this_bdev); |
@@ -618,15 +620,15 @@ static int make_resync_request(struct drbd_device *device, int cancel) | |||
618 | 620 | ||
619 | for (i = 0; i < number; i++) { | 621 | for (i = 0; i < number; i++) { |
620 | /* Stop generating RS requests, when half of the send buffer is filled */ | 622 | /* Stop generating RS requests, when half of the send buffer is filled */ |
621 | mutex_lock(&first_peer_device(device)->connection->data.mutex); | 623 | mutex_lock(&connection->data.mutex); |
622 | if (first_peer_device(device)->connection->data.socket) { | 624 | if (connection->data.socket) { |
623 | queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued; | 625 | queued = connection->data.socket->sk->sk_wmem_queued; |
624 | sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf; | 626 | sndbuf = connection->data.socket->sk->sk_sndbuf; |
625 | } else { | 627 | } else { |
626 | queued = 1; | 628 | queued = 1; |
627 | sndbuf = 0; | 629 | sndbuf = 0; |
628 | } | 630 | } |
629 | mutex_unlock(&first_peer_device(device)->connection->data.mutex); | 631 | mutex_unlock(&connection->data.mutex); |
630 | if (queued > sndbuf / 2) | 632 | if (queued > sndbuf / 2) |
631 | goto requeue; | 633 | goto requeue; |
632 | 634 | ||
@@ -696,9 +698,9 @@ next_sector: | |||
696 | /* adjust very last sectors, in case we are oddly sized */ | 698 | /* adjust very last sectors, in case we are oddly sized */ |
697 | if (sector + (size>>9) > capacity) | 699 | if (sector + (size>>9) > capacity) |
698 | size = (capacity-sector)<<9; | 700 | size = (capacity-sector)<<9; |
699 | if (first_peer_device(device)->connection->agreed_pro_version >= 89 && | 701 | if (connection->agreed_pro_version >= 89 && |
700 | first_peer_device(device)->connection->csums_tfm) { | 702 | connection->csums_tfm) { |
701 | switch (read_for_csum(first_peer_device(device), sector, size)) { | 703 | switch (read_for_csum(peer_device, sector, size)) { |
702 | case -EIO: /* Disk failure */ | 704 | case -EIO: /* Disk failure */ |
703 | put_ldev(device); | 705 | put_ldev(device); |
704 | return -EIO; | 706 | return -EIO; |
@@ -717,7 +719,7 @@ next_sector: | |||
717 | int err; | 719 | int err; |
718 | 720 | ||
719 | inc_rs_pending(device); | 721 | inc_rs_pending(device); |
720 | err = drbd_send_drequest(first_peer_device(device), P_RS_DATA_REQUEST, | 722 | err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST, |
721 | sector, size, ID_SYNCER); | 723 | sector, size, ID_SYNCER); |
722 | if (err) { | 724 | if (err) { |
723 | drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); | 725 | drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); |
@@ -1351,7 +1353,8 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) | |||
1351 | { | 1353 | { |
1352 | struct drbd_request *req = container_of(w, struct drbd_request, w); | 1354 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
1353 | struct drbd_device *device = req->device; | 1355 | struct drbd_device *device = req->device; |
1354 | struct drbd_connection *connection = first_peer_device(device)->connection; | 1356 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
1357 | struct drbd_connection *const connection = peer_device->connection; | ||
1355 | int err; | 1358 | int err; |
1356 | 1359 | ||
1357 | if (unlikely(cancel)) { | 1360 | if (unlikely(cancel)) { |
@@ -1365,7 +1368,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) | |||
1365 | * No more barriers will be sent, until we leave AHEAD mode again. */ | 1368 | * No more barriers will be sent, until we leave AHEAD mode again. */ |
1366 | maybe_send_barrier(connection, req->epoch); | 1369 | maybe_send_barrier(connection, req->epoch); |
1367 | 1370 | ||
1368 | err = drbd_send_out_of_sync(first_peer_device(device), req); | 1371 | err = drbd_send_out_of_sync(peer_device, req); |
1369 | req_mod(req, OOS_HANDED_TO_NETWORK); | 1372 | req_mod(req, OOS_HANDED_TO_NETWORK); |
1370 | 1373 | ||
1371 | return err; | 1374 | return err; |
@@ -1380,7 +1383,8 @@ int w_send_dblock(struct drbd_work *w, int cancel) | |||
1380 | { | 1383 | { |
1381 | struct drbd_request *req = container_of(w, struct drbd_request, w); | 1384 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
1382 | struct drbd_device *device = req->device; | 1385 | struct drbd_device *device = req->device; |
1383 | struct drbd_connection *connection = first_peer_device(device)->connection; | 1386 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
1387 | struct drbd_connection *connection = peer_device->connection; | ||
1384 | int err; | 1388 | int err; |
1385 | 1389 | ||
1386 | if (unlikely(cancel)) { | 1390 | if (unlikely(cancel)) { |
@@ -1392,7 +1396,7 @@ int w_send_dblock(struct drbd_work *w, int cancel) | |||
1392 | maybe_send_barrier(connection, req->epoch); | 1396 | maybe_send_barrier(connection, req->epoch); |
1393 | connection->send.current_epoch_writes++; | 1397 | connection->send.current_epoch_writes++; |
1394 | 1398 | ||
1395 | err = drbd_send_dblock(first_peer_device(device), req); | 1399 | err = drbd_send_dblock(peer_device, req); |
1396 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); | 1400 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
1397 | 1401 | ||
1398 | return err; | 1402 | return err; |
@@ -1407,7 +1411,8 @@ int w_send_read_req(struct drbd_work *w, int cancel) | |||
1407 | { | 1411 | { |
1408 | struct drbd_request *req = container_of(w, struct drbd_request, w); | 1412 | struct drbd_request *req = container_of(w, struct drbd_request, w); |
1409 | struct drbd_device *device = req->device; | 1413 | struct drbd_device *device = req->device; |
1410 | struct drbd_connection *connection = first_peer_device(device)->connection; | 1414 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
1415 | struct drbd_connection *connection = peer_device->connection; | ||
1411 | int err; | 1416 | int err; |
1412 | 1417 | ||
1413 | if (unlikely(cancel)) { | 1418 | if (unlikely(cancel)) { |
@@ -1419,7 +1424,7 @@ int w_send_read_req(struct drbd_work *w, int cancel) | |||
1419 | * if there was any yet. */ | 1424 | * if there was any yet. */ |
1420 | maybe_send_barrier(connection, req->epoch); | 1425 | maybe_send_barrier(connection, req->epoch); |
1421 | 1426 | ||
1422 | err = drbd_send_drequest(first_peer_device(device), P_DATA_REQUEST, req->i.sector, req->i.size, | 1427 | err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size, |
1423 | (unsigned long)req); | 1428 | (unsigned long)req); |
1424 | 1429 | ||
1425 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); | 1430 | req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); |
@@ -1633,6 +1638,8 @@ int w_start_resync(struct drbd_work *w, int cancel) | |||
1633 | */ | 1638 | */ |
1634 | void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | 1639 | void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) |
1635 | { | 1640 | { |
1641 | struct drbd_peer_device *peer_device = first_peer_device(device); | ||
1642 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; | ||
1636 | union drbd_state ns; | 1643 | union drbd_state ns; |
1637 | int r; | 1644 | int r; |
1638 | 1645 | ||
@@ -1651,7 +1658,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1651 | if (r > 0) { | 1658 | if (r > 0) { |
1652 | drbd_info(device, "before-resync-target handler returned %d, " | 1659 | drbd_info(device, "before-resync-target handler returned %d, " |
1653 | "dropping connection.\n", r); | 1660 | "dropping connection.\n", r); |
1654 | conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); | 1661 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
1655 | return; | 1662 | return; |
1656 | } | 1663 | } |
1657 | } else /* C_SYNC_SOURCE */ { | 1664 | } else /* C_SYNC_SOURCE */ { |
@@ -1664,7 +1671,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1664 | } else { | 1671 | } else { |
1665 | drbd_info(device, "before-resync-source handler returned %d, " | 1672 | drbd_info(device, "before-resync-source handler returned %d, " |
1666 | "dropping connection.\n", r); | 1673 | "dropping connection.\n", r); |
1667 | conn_request_state(first_peer_device(device)->connection, | 1674 | conn_request_state(connection, |
1668 | NS(conn, C_DISCONNECTING), CS_HARD); | 1675 | NS(conn, C_DISCONNECTING), CS_HARD); |
1669 | return; | 1676 | return; |
1670 | } | 1677 | } |
@@ -1672,7 +1679,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1672 | } | 1679 | } |
1673 | } | 1680 | } |
1674 | 1681 | ||
1675 | if (current == first_peer_device(device)->connection->worker.task) { | 1682 | if (current == connection->worker.task) { |
1676 | /* The worker should not sleep waiting for state_mutex, | 1683 | /* The worker should not sleep waiting for state_mutex, |
1677 | that can take long */ | 1684 | that can take long */ |
1678 | if (!mutex_trylock(device->state_mutex)) { | 1685 | if (!mutex_trylock(device->state_mutex)) { |
@@ -1756,12 +1763,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1756 | * drbd_resync_finished from here in that case. | 1763 | * drbd_resync_finished from here in that case. |
1757 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | 1764 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, |
1758 | * and from after_state_ch otherwise. */ | 1765 | * and from after_state_ch otherwise. */ |
1759 | if (side == C_SYNC_SOURCE && | 1766 | if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96) |
1760 | first_peer_device(device)->connection->agreed_pro_version < 96) | 1767 | drbd_gen_and_send_sync_uuid(peer_device); |
1761 | drbd_gen_and_send_sync_uuid(first_peer_device(device)); | ||
1762 | 1768 | ||
1763 | if (first_peer_device(device)->connection->agreed_pro_version < 95 && | 1769 | if (connection->agreed_pro_version < 95 && device->rs_total == 0) { |
1764 | device->rs_total == 0) { | ||
1765 | /* This still has a race (about when exactly the peers | 1770 | /* This still has a race (about when exactly the peers |
1766 | * detect connection loss) that can lead to a full sync | 1771 | * detect connection loss) that can lead to a full sync |
1767 | * on next handshake. In 8.3.9 we fixed this with explicit | 1772 | * on next handshake. In 8.3.9 we fixed this with explicit |
@@ -1777,7 +1782,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1777 | int timeo; | 1782 | int timeo; |
1778 | 1783 | ||
1779 | rcu_read_lock(); | 1784 | rcu_read_lock(); |
1780 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 1785 | nc = rcu_dereference(connection->net_conf); |
1781 | timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; | 1786 | timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; |
1782 | rcu_read_unlock(); | 1787 | rcu_read_unlock(); |
1783 | schedule_timeout_interruptible(timeo); | 1788 | schedule_timeout_interruptible(timeo); |