aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-05-31 08:33:49 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:44:51 -0500
commita6b32bc3cebd3fb6848c526763733b9dbc389c02 (patch)
tree91435ad006bf2eaada891ae26e01bb51fcc790d9
parentbde89a9e151b482765ed40e04307a6190236b387 (diff)
drbd: Introduce "peer_device" object between "device" and "connection"
In a setup where a device (aka volume) can replicate to multiple peers and one connection can be shared between multiple devices, we need separate objects to represent devices on peer nodes and network connections. As a first step to introduce multiple connections per device, give each drbd_device object a single drbd_peer_device object which connects it to a drbd_connection object. Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_bitmap.c8
-rw-r--r--drivers/block/drbd/drbd_int.h58
-rw-r--r--drivers/block/drbd/drbd_main.c156
-rw-r--r--drivers/block/drbd/drbd_nl.c90
-rw-r--r--drivers/block/drbd/drbd_proc.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c246
-rw-r--r--drivers/block/drbd/drbd_req.c48
-rw-r--r--drivers/block/drbd/drbd_req.h4
-rw-r--r--drivers/block/drbd/drbd_state.c73
-rw-r--r--drivers/block/drbd/drbd_worker.c96
11 files changed, 416 insertions, 377 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 8b507455f71e..4af4dc166373 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -315,7 +315,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
315{ 315{
316 bool locked = false; 316 bool locked = false;
317 317
318 BUG_ON(delegate && current == device->connection->worker.task); 318 BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
319 319
320 /* Serialize multiple transactions. 320 /* Serialize multiple transactions.
321 * This uses test_and_set_bit, memory barrier is implicit. 321 * This uses test_and_set_bit, memory barrier is implicit.
@@ -354,7 +354,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
354 */ 354 */
355void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate) 355void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
356{ 356{
357 BUG_ON(delegate && current == device->connection->worker.task); 357 BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task);
358 358
359 if (drbd_al_begin_io_prepare(device, i)) 359 if (drbd_al_begin_io_prepare(device, i))
360 drbd_al_begin_io_commit(device, delegate); 360 drbd_al_begin_io_commit(device, delegate);
@@ -614,7 +614,7 @@ static int al_write_transaction(struct drbd_device *device, bool delegate)
614 init_completion(&al_work.event); 614 init_completion(&al_work.event);
615 al_work.w.cb = w_al_write_transaction; 615 al_work.w.cb = w_al_write_transaction;
616 al_work.w.device = device; 616 al_work.w.device = device;
617 drbd_queue_work_front(&device->connection->sender_work, &al_work.w); 617 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &al_work.w);
618 wait_for_completion(&al_work.event); 618 wait_for_completion(&al_work.event);
619 return al_work.err; 619 return al_work.err;
620 } else 620 } else
@@ -796,7 +796,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
796 udw->enr = ext->lce.lc_number; 796 udw->enr = ext->lce.lc_number;
797 udw->w.cb = w_update_odbm; 797 udw->w.cb = w_update_odbm;
798 udw->w.device = device; 798 udw->w.device = device;
799 drbd_queue_work_front(&device->connection->sender_work, &udw->w); 799 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &udw->w);
800 } else { 800 } else {
801 dev_warn(DEV, "Could not kmalloc an udw\n"); 801 dev_warn(DEV, "Could not kmalloc an udw\n");
802 } 802 }
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index cd3e0dea7a5d..cb8e64978b8e 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -119,9 +119,9 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func)
119 if (!__ratelimit(&drbd_ratelimit_state)) 119 if (!__ratelimit(&drbd_ratelimit_state))
120 return; 120 return;
121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122 drbd_task_to_thread_name(device->connection, current), 122 drbd_task_to_thread_name(first_peer_device(device)->connection, current),
123 func, b->bm_why ?: "?", 123 func, b->bm_why ?: "?",
124 drbd_task_to_thread_name(device->connection, b->bm_task)); 124 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task));
125} 125}
126 126
127void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) 127void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
@@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
138 138
139 if (trylock_failed) { 139 if (trylock_failed) {
140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141 drbd_task_to_thread_name(device->connection, current), 141 drbd_task_to_thread_name(first_peer_device(device)->connection, current),
142 why, b->bm_why ?: "?", 142 why, b->bm_why ?: "?",
143 drbd_task_to_thread_name(device->connection, b->bm_task)); 143 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task));
144 mutex_lock(&b->bm_change); 144 mutex_lock(&b->bm_change);
145 } 145 }
146 if (BM_LOCKED_MASK & b->bm_flags) 146 if (BM_LOCKED_MASK & b->bm_flags)
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 32517a0cbc62..85e2f4b56a06 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -483,7 +483,7 @@ struct drbd_backing_dev {
483 struct block_device *backing_bdev; 483 struct block_device *backing_bdev;
484 struct block_device *md_bdev; 484 struct block_device *md_bdev;
485 struct drbd_md md; 485 struct drbd_md md;
486 struct disk_conf *disk_conf; /* RCU, for updates: device->connection->conf_update */ 486 struct disk_conf *disk_conf; /* RCU, for updates: first_peer_device(device)->connection->conf_update */
487 sector_t known_size; /* last known size of that backing device */ 487 sector_t known_size; /* last known size of that backing device */
488}; 488};
489 489
@@ -617,8 +617,14 @@ struct submit_worker {
617 struct list_head writes; 617 struct list_head writes;
618}; 618};
619 619
620struct drbd_device { 620struct drbd_peer_device {
621 struct list_head peer_devices;
622 struct drbd_device *device;
621 struct drbd_connection *connection; 623 struct drbd_connection *connection;
624};
625
626struct drbd_device {
627 struct list_head peer_devices;
622 int vnr; /* volume number within the connection */ 628 int vnr; /* volume number within the connection */
623 struct kref kref; 629 struct kref kref;
624 630
@@ -744,7 +750,7 @@ struct drbd_device {
744 struct bm_io_work bm_io_work; 750 struct bm_io_work bm_io_work;
745 u64 ed_uuid; /* UUID of the exposed data */ 751 u64 ed_uuid; /* UUID of the exposed data */
746 struct mutex own_state_mutex; 752 struct mutex own_state_mutex;
747 struct mutex *state_mutex; /* either own_state_mutex or device->connection->cstate_mutex */ 753 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
748 char congestion_reason; /* Why we where congested... */ 754 char congestion_reason; /* Why we where congested... */
749 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ 755 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
750 atomic_t rs_sect_ev; /* for submitted resync data rate, both */ 756 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -768,6 +774,20 @@ static inline struct drbd_device *minor_to_device(unsigned int minor)
768 return (struct drbd_device *)idr_find(&minors, minor); 774 return (struct drbd_device *)idr_find(&minors, minor);
769} 775}
770 776
777static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
778{
779 return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
780}
781
782#define for_each_peer_device(peer_device, device) \
783 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
784
785#define for_each_peer_device_rcu(peer_device, device) \
786 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
787
788#define for_each_peer_device_safe(peer_device, tmp, device) \
789 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
790
771static inline unsigned int device_to_minor(struct drbd_device *device) 791static inline unsigned int device_to_minor(struct drbd_device *device)
772{ 792{
773 return device->minor; 793 return device->minor;
@@ -1154,7 +1174,7 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1154extern rwlock_t global_state_lock; 1174extern rwlock_t global_state_lock;
1155 1175
1156extern int conn_lowest_minor(struct drbd_connection *connection); 1176extern int conn_lowest_minor(struct drbd_connection *connection);
1157enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr); 1177enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigned int minor, int vnr);
1158extern void drbd_minor_destroy(struct kref *kref); 1178extern void drbd_minor_destroy(struct kref *kref);
1159 1179
1160extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts); 1180extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts);
@@ -1275,7 +1295,7 @@ extern void conn_flush_workqueue(struct drbd_connection *connection);
1275extern int drbd_connected(struct drbd_device *device); 1295extern int drbd_connected(struct drbd_device *device);
1276static inline void drbd_flush_workqueue(struct drbd_device *device) 1296static inline void drbd_flush_workqueue(struct drbd_device *device)
1277{ 1297{
1278 conn_flush_workqueue(device->connection); 1298 conn_flush_workqueue(first_peer_device(device)->connection);
1279} 1299}
1280 1300
1281/* Yes, there is kernel_setsockopt, but only since 2.6.18. 1301/* Yes, there is kernel_setsockopt, but only since 2.6.18.
@@ -1421,9 +1441,9 @@ static inline union drbd_state drbd_read_state(struct drbd_device *device)
1421 union drbd_state rv; 1441 union drbd_state rv;
1422 1442
1423 rv.i = device->state.i; 1443 rv.i = device->state.i;
1424 rv.susp = device->connection->susp; 1444 rv.susp = first_peer_device(device)->connection->susp;
1425 rv.susp_nod = device->connection->susp_nod; 1445 rv.susp_nod = first_peer_device(device)->connection->susp_nod;
1426 rv.susp_fen = device->connection->susp_fen; 1446 rv.susp_fen = first_peer_device(device)->connection->susp_fen;
1427 1447
1428 return rv; 1448 return rv;
1429} 1449}
@@ -1505,9 +1525,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
1505{ 1525{
1506 if (error) { 1526 if (error) {
1507 unsigned long flags; 1527 unsigned long flags;
1508 spin_lock_irqsave(&device->connection->req_lock, flags); 1528 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
1509 __drbd_chk_io_error_(device, forcedetach, where); 1529 __drbd_chk_io_error_(device, forcedetach, where);
1510 spin_unlock_irqrestore(&device->connection->req_lock, flags); 1530 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
1511 } 1531 }
1512} 1532}
1513 1533
@@ -1783,7 +1803,7 @@ static inline void put_ldev(struct drbd_device *device)
1783 if (device->state.disk == D_FAILED) { 1803 if (device->state.disk == D_FAILED) {
1784 /* all application IO references gone. */ 1804 /* all application IO references gone. */
1785 if (!test_and_set_bit(GO_DISKLESS, &device->flags)) 1805 if (!test_and_set_bit(GO_DISKLESS, &device->flags))
1786 drbd_queue_work(&device->connection->sender_work, &device->go_diskless); 1806 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->go_diskless);
1787 } 1807 }
1788 wake_up(&device->misc_wait); 1808 wake_up(&device->misc_wait);
1789 } 1809 }
@@ -1865,7 +1885,7 @@ static inline int drbd_get_max_buffers(struct drbd_device *device)
1865 int mxb; 1885 int mxb;
1866 1886
1867 rcu_read_lock(); 1887 rcu_read_lock();
1868 nc = rcu_dereference(device->connection->net_conf); 1888 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
1869 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ 1889 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
1870 rcu_read_unlock(); 1890 rcu_read_unlock();
1871 1891
@@ -1908,7 +1928,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
1908 1928
1909 /* Allow IO in BM exchange states with new protocols */ 1929 /* Allow IO in BM exchange states with new protocols */
1910 case C_WF_BITMAP_S: 1930 case C_WF_BITMAP_S:
1911 if (device->connection->agreed_pro_version < 96) 1931 if (first_peer_device(device)->connection->agreed_pro_version < 96)
1912 return 0; 1932 return 0;
1913 break; 1933 break;
1914 1934
@@ -1944,7 +1964,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
1944 1964
1945static inline int drbd_suspended(struct drbd_device *device) 1965static inline int drbd_suspended(struct drbd_device *device)
1946{ 1966{
1947 struct drbd_connection *connection = device->connection; 1967 struct drbd_connection *connection = first_peer_device(device)->connection;
1948 1968
1949 return connection->susp || connection->susp_fen || connection->susp_nod; 1969 return connection->susp || connection->susp_fen || connection->susp_nod;
1950} 1970}
@@ -1979,11 +1999,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
1979{ 1999{
1980 bool rv = false; 2000 bool rv = false;
1981 2001
1982 spin_lock_irq(&device->connection->req_lock); 2002 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1983 rv = may_inc_ap_bio(device); 2003 rv = may_inc_ap_bio(device);
1984 if (rv) 2004 if (rv)
1985 atomic_inc(&device->ap_bio_cnt); 2005 atomic_inc(&device->ap_bio_cnt);
1986 spin_unlock_irq(&device->connection->req_lock); 2006 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1987 2007
1988 return rv; 2008 return rv;
1989} 2009}
@@ -2010,7 +2030,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
2010 2030
2011 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2031 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2012 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 2032 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2013 drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w); 2033 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
2014 } 2034 }
2015 2035
2016 /* this currently does wake_up for every dec_ap_bio! 2036 /* this currently does wake_up for every dec_ap_bio!
@@ -2022,8 +2042,8 @@ static inline void dec_ap_bio(struct drbd_device *device)
2022 2042
2023static inline bool verify_can_do_stop_sector(struct drbd_device *device) 2043static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2024{ 2044{
2025 return device->connection->agreed_pro_version >= 97 && 2045 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2026 device->connection->agreed_pro_version != 100; 2046 first_peer_device(device)->connection->agreed_pro_version != 100;
2027} 2047}
2028 2048
2029static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) 2049static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e4fd1806dc25..b7c858f51fa6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -308,7 +308,7 @@ void tl_clear(struct drbd_connection *connection)
308 */ 308 */
309void tl_abort_disk_io(struct drbd_device *device) 309void tl_abort_disk_io(struct drbd_device *device)
310{ 310{
311 struct drbd_connection *connection = device->connection; 311 struct drbd_connection *connection = first_peer_device(device)->connection;
312 struct drbd_request *req, *r; 312 struct drbd_request *req, *r;
313 313
314 spin_lock_irq(&connection->req_lock); 314 spin_lock_irq(&connection->req_lock);
@@ -633,7 +633,7 @@ void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socke
633 633
634void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock) 634void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock)
635{ 635{
636 return conn_prepare_command(device->connection, sock); 636 return conn_prepare_command(first_peer_device(device)->connection, sock);
637} 637}
638 638
639static int __send_command(struct drbd_connection *connection, int vnr, 639static int __send_command(struct drbd_connection *connection, int vnr,
@@ -686,7 +686,7 @@ int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock,
686{ 686{
687 int err; 687 int err;
688 688
689 err = __send_command(device->connection, device->vnr, sock, cmd, header_size, 689 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, header_size,
690 data, size); 690 data, size);
691 mutex_unlock(&sock->mutex); 691 mutex_unlock(&sock->mutex);
692 return err; 692 return err;
@@ -717,18 +717,18 @@ int drbd_send_sync_param(struct drbd_device *device)
717 struct drbd_socket *sock; 717 struct drbd_socket *sock;
718 struct p_rs_param_95 *p; 718 struct p_rs_param_95 *p;
719 int size; 719 int size;
720 const int apv = device->connection->agreed_pro_version; 720 const int apv = first_peer_device(device)->connection->agreed_pro_version;
721 enum drbd_packet cmd; 721 enum drbd_packet cmd;
722 struct net_conf *nc; 722 struct net_conf *nc;
723 struct disk_conf *dc; 723 struct disk_conf *dc;
724 724
725 sock = &device->connection->data; 725 sock = &first_peer_device(device)->connection->data;
726 p = drbd_prepare_command(device, sock); 726 p = drbd_prepare_command(device, sock);
727 if (!p) 727 if (!p)
728 return -EIO; 728 return -EIO;
729 729
730 rcu_read_lock(); 730 rcu_read_lock();
731 nc = rcu_dereference(device->connection->net_conf); 731 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
732 732
733 size = apv <= 87 ? sizeof(struct p_rs_param) 733 size = apv <= 87 ? sizeof(struct p_rs_param)
734 : apv == 88 ? sizeof(struct p_rs_param) 734 : apv == 88 ? sizeof(struct p_rs_param)
@@ -831,7 +831,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
831 if (!get_ldev_if_state(device, D_NEGOTIATING)) 831 if (!get_ldev_if_state(device, D_NEGOTIATING))
832 return 0; 832 return 0;
833 833
834 sock = &device->connection->data; 834 sock = &first_peer_device(device)->connection->data;
835 p = drbd_prepare_command(device, sock); 835 p = drbd_prepare_command(device, sock);
836 if (!p) { 836 if (!p) {
837 put_ldev(device); 837 put_ldev(device);
@@ -845,7 +845,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
845 device->comm_bm_set = drbd_bm_total_weight(device); 845 device->comm_bm_set = drbd_bm_total_weight(device);
846 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); 846 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
847 rcu_read_lock(); 847 rcu_read_lock();
848 uuid_flags |= rcu_dereference(device->connection->net_conf)->discard_my_data ? 1 : 0; 848 uuid_flags |= rcu_dereference(first_peer_device(device)->connection->net_conf)->discard_my_data ? 1 : 0;
849 rcu_read_unlock(); 849 rcu_read_unlock();
850 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; 850 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
851 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; 851 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
@@ -900,7 +900,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
900 drbd_print_uuids(device, "updated sync UUID"); 900 drbd_print_uuids(device, "updated sync UUID");
901 drbd_md_sync(device); 901 drbd_md_sync(device);
902 902
903 sock = &device->connection->data; 903 sock = &first_peer_device(device)->connection->data;
904 p = drbd_prepare_command(device, sock); 904 p = drbd_prepare_command(device, sock);
905 if (p) { 905 if (p) {
906 p->uuid = cpu_to_be64(uuid); 906 p->uuid = cpu_to_be64(uuid);
@@ -933,14 +933,14 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
933 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ 933 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
934 } 934 }
935 935
936 sock = &device->connection->data; 936 sock = &first_peer_device(device)->connection->data;
937 p = drbd_prepare_command(device, sock); 937 p = drbd_prepare_command(device, sock);
938 if (!p) 938 if (!p)
939 return -EIO; 939 return -EIO;
940 940
941 if (device->connection->agreed_pro_version <= 94) 941 if (first_peer_device(device)->connection->agreed_pro_version <= 94)
942 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 942 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
943 else if (device->connection->agreed_pro_version < 100) 943 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
944 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95); 944 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
945 945
946 p->d_size = cpu_to_be64(d_size); 946 p->d_size = cpu_to_be64(d_size);
@@ -961,7 +961,7 @@ int drbd_send_current_state(struct drbd_device *device)
961 struct drbd_socket *sock; 961 struct drbd_socket *sock;
962 struct p_state *p; 962 struct p_state *p;
963 963
964 sock = &device->connection->data; 964 sock = &first_peer_device(device)->connection->data;
965 p = drbd_prepare_command(device, sock); 965 p = drbd_prepare_command(device, sock);
966 if (!p) 966 if (!p)
967 return -EIO; 967 return -EIO;
@@ -984,7 +984,7 @@ int drbd_send_state(struct drbd_device *device, union drbd_state state)
984 struct drbd_socket *sock; 984 struct drbd_socket *sock;
985 struct p_state *p; 985 struct p_state *p;
986 986
987 sock = &device->connection->data; 987 sock = &first_peer_device(device)->connection->data;
988 p = drbd_prepare_command(device, sock); 988 p = drbd_prepare_command(device, sock);
989 if (!p) 989 if (!p)
990 return -EIO; 990 return -EIO;
@@ -997,7 +997,7 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union
997 struct drbd_socket *sock; 997 struct drbd_socket *sock;
998 struct p_req_state *p; 998 struct p_req_state *p;
999 999
1000 sock = &device->connection->data; 1000 sock = &first_peer_device(device)->connection->data;
1001 p = drbd_prepare_command(device, sock); 1001 p = drbd_prepare_command(device, sock);
1002 if (!p) 1002 if (!p)
1003 return -EIO; 1003 return -EIO;
@@ -1027,7 +1027,7 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
1027 struct drbd_socket *sock; 1027 struct drbd_socket *sock;
1028 struct p_req_state_reply *p; 1028 struct p_req_state_reply *p;
1029 1029
1030 sock = &device->connection->meta; 1030 sock = &first_peer_device(device)->connection->meta;
1031 p = drbd_prepare_command(device, sock); 1031 p = drbd_prepare_command(device, sock);
1032 if (p) { 1032 if (p) {
1033 p->retcode = cpu_to_be32(retcode); 1033 p->retcode = cpu_to_be32(retcode);
@@ -1081,9 +1081,9 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1081 1081
1082 /* may we use this feature? */ 1082 /* may we use this feature? */
1083 rcu_read_lock(); 1083 rcu_read_lock();
1084 use_rle = rcu_dereference(device->connection->net_conf)->use_rle; 1084 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1085 rcu_read_unlock(); 1085 rcu_read_unlock();
1086 if (!use_rle || device->connection->agreed_pro_version < 90) 1086 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1087 return 0; 1087 return 0;
1088 1088
1089 if (c->bit_offset >= c->bm_bits) 1089 if (c->bit_offset >= c->bm_bits)
@@ -1172,8 +1172,8 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1172static int 1172static int
1173send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) 1173send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1174{ 1174{
1175 struct drbd_socket *sock = &device->connection->data; 1175 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1176 unsigned int header_size = drbd_header_size(device->connection); 1176 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1177 struct p_compressed_bm *p = sock->sbuf + header_size; 1177 struct p_compressed_bm *p = sock->sbuf + header_size;
1178 int len, err; 1178 int len, err;
1179 1179
@@ -1184,7 +1184,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1184 1184
1185 if (len) { 1185 if (len) {
1186 dcbp_set_code(p, RLE_VLI_Bits); 1186 dcbp_set_code(p, RLE_VLI_Bits);
1187 err = __send_command(device->connection, device->vnr, sock, 1187 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1188 P_COMPRESSED_BITMAP, sizeof(*p) + len, 1188 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1189 NULL, 0); 1189 NULL, 0);
1190 c->packets[0]++; 1190 c->packets[0]++;
@@ -1205,7 +1205,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1205 len = num_words * sizeof(*p); 1205 len = num_words * sizeof(*p);
1206 if (len) 1206 if (len)
1207 drbd_bm_get_lel(device, c->word_offset, num_words, p); 1207 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1208 err = __send_command(device->connection, device->vnr, sock, P_BITMAP, len, NULL, 0); 1208 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1209 c->word_offset += num_words; 1209 c->word_offset += num_words;
1210 c->bit_offset = c->word_offset * BITS_PER_LONG; 1210 c->bit_offset = c->word_offset * BITS_PER_LONG;
1211 1211
@@ -1265,7 +1265,7 @@ static int _drbd_send_bitmap(struct drbd_device *device)
1265 1265
1266int drbd_send_bitmap(struct drbd_device *device) 1266int drbd_send_bitmap(struct drbd_device *device)
1267{ 1267{
1268 struct drbd_socket *sock = &device->connection->data; 1268 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1269 int err = -1; 1269 int err = -1;
1270 1270
1271 mutex_lock(&sock->mutex); 1271 mutex_lock(&sock->mutex);
@@ -1309,7 +1309,7 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
1309 if (device->state.conn < C_CONNECTED) 1309 if (device->state.conn < C_CONNECTED)
1310 return -EIO; 1310 return -EIO;
1311 1311
1312 sock = &device->connection->meta; 1312 sock = &first_peer_device(device)->connection->meta;
1313 p = drbd_prepare_command(device, sock); 1313 p = drbd_prepare_command(device, sock);
1314 if (!p) 1314 if (!p)
1315 return -EIO; 1315 return -EIO;
@@ -1326,8 +1326,8 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
1326void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd, 1326void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
1327 struct p_data *dp, int data_size) 1327 struct p_data *dp, int data_size)
1328{ 1328{
1329 if (device->connection->peer_integrity_tfm) 1329 if (first_peer_device(device)->connection->peer_integrity_tfm)
1330 data_size -= crypto_hash_digestsize(device->connection->peer_integrity_tfm); 1330 data_size -= crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
1331 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size), 1331 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size),
1332 dp->block_id); 1332 dp->block_id);
1333} 1333}
@@ -1370,7 +1370,7 @@ int drbd_send_drequest(struct drbd_device *device, int cmd,
1370 struct drbd_socket *sock; 1370 struct drbd_socket *sock;
1371 struct p_block_req *p; 1371 struct p_block_req *p;
1372 1372
1373 sock = &device->connection->data; 1373 sock = &first_peer_device(device)->connection->data;
1374 p = drbd_prepare_command(device, sock); 1374 p = drbd_prepare_command(device, sock);
1375 if (!p) 1375 if (!p)
1376 return -EIO; 1376 return -EIO;
@@ -1388,7 +1388,7 @@ int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int siz
1388 1388
1389 /* FIXME: Put the digest into the preallocated socket buffer. */ 1389 /* FIXME: Put the digest into the preallocated socket buffer. */
1390 1390
1391 sock = &device->connection->data; 1391 sock = &first_peer_device(device)->connection->data;
1392 p = drbd_prepare_command(device, sock); 1392 p = drbd_prepare_command(device, sock);
1393 if (!p) 1393 if (!p)
1394 return -EIO; 1394 return -EIO;
@@ -1404,7 +1404,7 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
1404 struct drbd_socket *sock; 1404 struct drbd_socket *sock;
1405 struct p_block_req *p; 1405 struct p_block_req *p;
1406 1406
1407 sock = &device->connection->data; 1407 sock = &first_peer_device(device)->connection->data;
1408 p = drbd_prepare_command(device, sock); 1408 p = drbd_prepare_command(device, sock);
1409 if (!p) 1409 if (!p)
1410 return -EIO; 1410 return -EIO;
@@ -1476,9 +1476,9 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
1476 void *addr; 1476 void *addr;
1477 int err; 1477 int err;
1478 1478
1479 socket = device->connection->data.socket; 1479 socket = first_peer_device(device)->connection->data.socket;
1480 addr = kmap(page) + offset; 1480 addr = kmap(page) + offset;
1481 err = drbd_send_all(device->connection, socket, addr, size, msg_flags); 1481 err = drbd_send_all(first_peer_device(device)->connection, socket, addr, size, msg_flags);
1482 kunmap(page); 1482 kunmap(page);
1483 if (!err) 1483 if (!err)
1484 device->send_cnt += size >> 9; 1484 device->send_cnt += size >> 9;
@@ -1488,7 +1488,7 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
1488static int _drbd_send_page(struct drbd_device *device, struct page *page, 1488static int _drbd_send_page(struct drbd_device *device, struct page *page,
1489 int offset, size_t size, unsigned msg_flags) 1489 int offset, size_t size, unsigned msg_flags)
1490{ 1490{
1491 struct socket *socket = device->connection->data.socket; 1491 struct socket *socket = first_peer_device(device)->connection->data.socket;
1492 mm_segment_t oldfs = get_fs(); 1492 mm_segment_t oldfs = get_fs();
1493 int len = size; 1493 int len = size;
1494 int err = -EIO; 1494 int err = -EIO;
@@ -1503,7 +1503,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1503 return _drbd_no_send_page(device, page, offset, size, msg_flags); 1503 return _drbd_no_send_page(device, page, offset, size, msg_flags);
1504 1504
1505 msg_flags |= MSG_NOSIGNAL; 1505 msg_flags |= MSG_NOSIGNAL;
1506 drbd_update_congested(device->connection); 1506 drbd_update_congested(first_peer_device(device)->connection);
1507 set_fs(KERNEL_DS); 1507 set_fs(KERNEL_DS);
1508 do { 1508 do {
1509 int sent; 1509 int sent;
@@ -1511,7 +1511,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1511 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags); 1511 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1512 if (sent <= 0) { 1512 if (sent <= 0) {
1513 if (sent == -EAGAIN) { 1513 if (sent == -EAGAIN) {
1514 if (we_should_drop_the_connection(device->connection, socket)) 1514 if (we_should_drop_the_connection(first_peer_device(device)->connection, socket))
1515 break; 1515 break;
1516 continue; 1516 continue;
1517 } 1517 }
@@ -1525,7 +1525,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1525 offset += sent; 1525 offset += sent;
1526 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); 1526 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1527 set_fs(oldfs); 1527 set_fs(oldfs);
1528 clear_bit(NET_CONGESTED, &device->connection->flags); 1528 clear_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags);
1529 1529
1530 if (len == 0) { 1530 if (len == 0) {
1531 err = 0; 1531 err = 0;
@@ -1593,7 +1593,7 @@ static int _drbd_send_zc_ee(struct drbd_device *device,
1593 1593
1594static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw) 1594static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw)
1595{ 1595{
1596 if (device->connection->agreed_pro_version >= 95) 1596 if (first_peer_device(device)->connection->agreed_pro_version >= 95)
1597 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1597 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1598 (bi_rw & REQ_FUA ? DP_FUA : 0) | 1598 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1599 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 1599 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -1613,9 +1613,10 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1613 int dgs; 1613 int dgs;
1614 int err; 1614 int err;
1615 1615
1616 sock = &device->connection->data; 1616 sock = &first_peer_device(device)->connection->data;
1617 p = drbd_prepare_command(device, sock); 1617 p = drbd_prepare_command(device, sock);
1618 dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0; 1618 dgs = first_peer_device(device)->connection->integrity_tfm ?
1619 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
1619 1620
1620 if (!p) 1621 if (!p)
1621 return -EIO; 1622 return -EIO;
@@ -1626,7 +1627,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1626 if (device->state.conn >= C_SYNC_SOURCE && 1627 if (device->state.conn >= C_SYNC_SOURCE &&
1627 device->state.conn <= C_PAUSED_SYNC_T) 1628 device->state.conn <= C_PAUSED_SYNC_T)
1628 dp_flags |= DP_MAY_SET_IN_SYNC; 1629 dp_flags |= DP_MAY_SET_IN_SYNC;
1629 if (device->connection->agreed_pro_version >= 100) { 1630 if (first_peer_device(device)->connection->agreed_pro_version >= 100) {
1630 if (req->rq_state & RQ_EXP_RECEIVE_ACK) 1631 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1631 dp_flags |= DP_SEND_RECEIVE_ACK; 1632 dp_flags |= DP_SEND_RECEIVE_ACK;
1632 if (req->rq_state & RQ_EXP_WRITE_ACK) 1633 if (req->rq_state & RQ_EXP_WRITE_ACK)
@@ -1634,8 +1635,8 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1634 } 1635 }
1635 p->dp_flags = cpu_to_be32(dp_flags); 1636 p->dp_flags = cpu_to_be32(dp_flags);
1636 if (dgs) 1637 if (dgs)
1637 drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, p + 1); 1638 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, p + 1);
1638 err = __send_command(device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); 1639 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
1639 if (!err) { 1640 if (!err) {
1640 /* For protocol A, we have to memcpy the payload into 1641 /* For protocol A, we have to memcpy the payload into
1641 * socket buffers, as we may complete right away 1642 * socket buffers, as we may complete right away
@@ -1658,7 +1659,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1658 /* 64 byte, 512 bit, is the largest digest size 1659 /* 64 byte, 512 bit, is the largest digest size
1659 * currently supported in kernel crypto. */ 1660 * currently supported in kernel crypto. */
1660 unsigned char digest[64]; 1661 unsigned char digest[64];
1661 drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, digest); 1662 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, digest);
1662 if (memcmp(p + 1, digest, dgs)) { 1663 if (memcmp(p + 1, digest, dgs)) {
1663 dev_warn(DEV, 1664 dev_warn(DEV,
1664 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1665 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
@@ -1685,10 +1686,11 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
1685 int err; 1686 int err;
1686 int dgs; 1687 int dgs;
1687 1688
1688 sock = &device->connection->data; 1689 sock = &first_peer_device(device)->connection->data;
1689 p = drbd_prepare_command(device, sock); 1690 p = drbd_prepare_command(device, sock);
1690 1691
1691 dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0; 1692 dgs = first_peer_device(device)->connection->integrity_tfm ?
1693 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
1692 1694
1693 if (!p) 1695 if (!p)
1694 return -EIO; 1696 return -EIO;
@@ -1697,8 +1699,8 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
1697 p->seq_num = 0; /* unused */ 1699 p->seq_num = 0; /* unused */
1698 p->dp_flags = 0; 1700 p->dp_flags = 0;
1699 if (dgs) 1701 if (dgs)
1700 drbd_csum_ee(device, device->connection->integrity_tfm, peer_req, p + 1); 1702 drbd_csum_ee(device, first_peer_device(device)->connection->integrity_tfm, peer_req, p + 1);
1701 err = __send_command(device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); 1703 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
1702 if (!err) 1704 if (!err)
1703 err = _drbd_send_zc_ee(device, peer_req); 1705 err = _drbd_send_zc_ee(device, peer_req);
1704 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1706 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
@@ -1711,7 +1713,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
1711 struct drbd_socket *sock; 1713 struct drbd_socket *sock;
1712 struct p_block_desc *p; 1714 struct p_block_desc *p;
1713 1715
1714 sock = &device->connection->data; 1716 sock = &first_peer_device(device)->connection->data;
1715 p = drbd_prepare_command(device, sock); 1717 p = drbd_prepare_command(device, sock);
1716 if (!p) 1718 if (!p)
1717 return -EIO; 1719 return -EIO;
@@ -1832,7 +1834,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1832 int rv = 0; 1834 int rv = 0;
1833 1835
1834 mutex_lock(&drbd_main_mutex); 1836 mutex_lock(&drbd_main_mutex);
1835 spin_lock_irqsave(&device->connection->req_lock, flags); 1837 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
1836 /* to have a stable device->state.role 1838 /* to have a stable device->state.role
1837 * and no race with updating open_cnt */ 1839 * and no race with updating open_cnt */
1838 1840
@@ -1845,7 +1847,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1845 1847
1846 if (!rv) 1848 if (!rv)
1847 device->open_cnt++; 1849 device->open_cnt++;
1848 spin_unlock_irqrestore(&device->connection->req_lock, flags); 1850 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
1849 mutex_unlock(&drbd_main_mutex); 1851 mutex_unlock(&drbd_main_mutex);
1850 1852
1851 return rv; 1853 return rv;
@@ -1950,9 +1952,9 @@ void drbd_init_set_defaults(struct drbd_device *device)
1950void drbd_device_cleanup(struct drbd_device *device) 1952void drbd_device_cleanup(struct drbd_device *device)
1951{ 1953{
1952 int i; 1954 int i;
1953 if (device->connection->receiver.t_state != NONE) 1955 if (first_peer_device(device)->connection->receiver.t_state != NONE)
1954 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 1956 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1955 device->connection->receiver.t_state); 1957 first_peer_device(device)->connection->receiver.t_state);
1956 1958
1957 device->al_writ_cnt = 1959 device->al_writ_cnt =
1958 device->bm_writ_cnt = 1960 device->bm_writ_cnt =
@@ -1970,7 +1972,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1970 device->rs_mark_left[i] = 0; 1972 device->rs_mark_left[i] = 0;
1971 device->rs_mark_time[i] = 0; 1973 device->rs_mark_time[i] = 0;
1972 } 1974 }
1973 D_ASSERT(device->connection->net_conf == NULL); 1975 D_ASSERT(first_peer_device(device)->connection->net_conf == NULL);
1974 1976
1975 drbd_set_my_capacity(device, 0); 1977 drbd_set_my_capacity(device, 0);
1976 if (device->bitmap) { 1978 if (device->bitmap) {
@@ -1990,7 +1992,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1990 D_ASSERT(list_empty(&device->read_ee)); 1992 D_ASSERT(list_empty(&device->read_ee));
1991 D_ASSERT(list_empty(&device->net_ee)); 1993 D_ASSERT(list_empty(&device->net_ee));
1992 D_ASSERT(list_empty(&device->resync_reads)); 1994 D_ASSERT(list_empty(&device->resync_reads));
1993 D_ASSERT(list_empty(&device->connection->sender_work.q)); 1995 D_ASSERT(list_empty(&first_peer_device(device)->connection->sender_work.q));
1994 D_ASSERT(list_empty(&device->resync_work.list)); 1996 D_ASSERT(list_empty(&device->resync_work.list));
1995 D_ASSERT(list_empty(&device->unplug_work.list)); 1997 D_ASSERT(list_empty(&device->unplug_work.list));
1996 D_ASSERT(list_empty(&device->go_diskless.list)); 1998 D_ASSERT(list_empty(&device->go_diskless.list));
@@ -2159,7 +2161,7 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
2159void drbd_minor_destroy(struct kref *kref) 2161void drbd_minor_destroy(struct kref *kref)
2160{ 2162{
2161 struct drbd_device *device = container_of(kref, struct drbd_device, kref); 2163 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2162 struct drbd_connection *connection = device->connection; 2164 struct drbd_connection *connection = first_peer_device(device)->connection;
2163 2165
2164 del_timer_sync(&device->request_timer); 2166 del_timer_sync(&device->request_timer);
2165 2167
@@ -2190,6 +2192,7 @@ void drbd_minor_destroy(struct kref *kref)
2190 put_disk(device->vdisk); 2192 put_disk(device->vdisk);
2191 blk_cleanup_queue(device->rq_queue); 2193 blk_cleanup_queue(device->rq_queue);
2192 kfree(device->rs_plan_s); 2194 kfree(device->rs_plan_s);
2195 kfree(first_peer_device(device));
2193 kfree(device); 2196 kfree(device);
2194 2197
2195 kref_put(&connection->kref, &conn_destroy); 2198 kref_put(&connection->kref, &conn_destroy);
@@ -2300,7 +2303,7 @@ static void drbd_cleanup(void)
2300 2303
2301 idr_for_each_entry(&minors, device, i) { 2304 idr_for_each_entry(&minors, device, i) {
2302 idr_remove(&minors, device_to_minor(device)); 2305 idr_remove(&minors, device_to_minor(device));
2303 idr_remove(&device->connection->volumes, device->vnr); 2306 idr_remove(&first_peer_device(device)->connection->volumes, device->vnr);
2304 destroy_workqueue(device->submit.wq); 2307 destroy_workqueue(device->submit.wq);
2305 del_gendisk(device->vdisk); 2308 del_gendisk(device->vdisk);
2306 /* synchronize_rcu(); No other threads running at this point */ 2309 /* synchronize_rcu(); No other threads running at this point */
@@ -2343,7 +2346,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
2343 goto out; 2346 goto out;
2344 } 2347 }
2345 2348
2346 if (test_bit(CALLBACK_PENDING, &device->connection->flags)) { 2349 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2347 r |= (1 << BDI_async_congested); 2350 r |= (1 << BDI_async_congested);
2348 /* Without good local data, we would need to read from remote, 2351 /* Without good local data, we would need to read from remote,
2349 * and that would need the worker thread as well, which is 2352 * and that would need the worker thread as well, which is
@@ -2367,7 +2370,8 @@ static int drbd_congested(void *congested_data, int bdi_bits)
2367 reason = 'b'; 2370 reason = 'b';
2368 } 2371 }
2369 2372
2370 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &device->connection->flags)) { 2373 if (bdi_bits & (1 << BDI_async_congested) &&
2374 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2371 r |= (1 << BDI_async_congested); 2375 r |= (1 << BDI_async_congested);
2372 reason = reason == 'b' ? 'a' : 'n'; 2376 reason = reason == 'b' ? 'a' : 'n';
2373 } 2377 }
@@ -2606,9 +2610,10 @@ static int init_submitter(struct drbd_device *device)
2606 return 0; 2610 return 0;
2607} 2611}
2608 2612
2609enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr) 2613enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigned int minor, int vnr)
2610{ 2614{
2611 struct drbd_device *device; 2615 struct drbd_device *device;
2616 struct drbd_peer_device *peer_device;
2612 struct gendisk *disk; 2617 struct gendisk *disk;
2613 struct request_queue *q; 2618 struct request_queue *q;
2614 int vnr_got = vnr; 2619 int vnr_got = vnr;
@@ -2623,9 +2628,15 @@ enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned i
2623 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); 2628 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2624 if (!device) 2629 if (!device)
2625 return ERR_NOMEM; 2630 return ERR_NOMEM;
2631 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2632 if (!peer_device)
2633 goto out_no_peer_device;
2626 2634
2635 INIT_LIST_HEAD(&device->peer_devices);
2636 list_add(&peer_device->peer_devices, &device->peer_devices);
2627 kref_get(&connection->kref); 2637 kref_get(&connection->kref);
2628 device->connection = connection; 2638 peer_device->connection = connection;
2639 peer_device->device = device;
2629 2640
2630 device->minor = minor; 2641 device->minor = minor;
2631 device->vnr = vnr; 2642 device->vnr = vnr;
@@ -2666,7 +2677,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned i
2666 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 2677 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2667 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 2678 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2668 blk_queue_merge_bvec(q, drbd_merge_bvec); 2679 blk_queue_merge_bvec(q, drbd_merge_bvec);
2669 q->queue_lock = &device->connection->req_lock; /* needed since we use */ 2680 q->queue_lock = &first_peer_device(device)->connection->req_lock; /* needed since we use */
2670 2681
2671 device->md_io_page = alloc_page(GFP_KERNEL); 2682 device->md_io_page = alloc_page(GFP_KERNEL);
2672 if (!device->md_io_page) 2683 if (!device->md_io_page)
@@ -2725,8 +2736,9 @@ out_no_io_page:
2725out_no_disk: 2736out_no_disk:
2726 blk_cleanup_queue(q); 2737 blk_cleanup_queue(q);
2727out_no_q: 2738out_no_q:
2728 kfree(device);
2729 kref_put(&connection->kref, &conn_destroy); 2739 kref_put(&connection->kref, &conn_destroy);
2740out_no_peer_device:
2741 kfree(device);
2730 return err; 2742 return err;
2731} 2743}
2732 2744
@@ -3172,14 +3184,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3172 3184
3173 rv = NO_ERROR; 3185 rv = NO_ERROR;
3174 3186
3175 spin_lock_irq(&device->connection->req_lock); 3187 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3176 if (device->state.conn < C_CONNECTED) { 3188 if (device->state.conn < C_CONNECTED) {
3177 unsigned int peer; 3189 unsigned int peer;
3178 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3190 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3179 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); 3191 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3180 device->peer_max_bio_size = peer; 3192 device->peer_max_bio_size = peer;
3181 } 3193 }
3182 spin_unlock_irq(&device->connection->req_lock); 3194 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3183 3195
3184 err: 3196 err:
3185 drbd_md_put_buffer(device); 3197 drbd_md_put_buffer(device);
@@ -3454,7 +3466,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3454 void (*done)(struct drbd_device *, int), 3466 void (*done)(struct drbd_device *, int),
3455 char *why, enum bm_flag flags) 3467 char *why, enum bm_flag flags)
3456{ 3468{
3457 D_ASSERT(current == device->connection->worker.task); 3469 D_ASSERT(current == first_peer_device(device)->connection->worker.task);
3458 3470
3459 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags)); 3471 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
3460 D_ASSERT(!test_bit(BITMAP_IO, &device->flags)); 3472 D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
@@ -3468,13 +3480,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3468 device->bm_io_work.why = why; 3480 device->bm_io_work.why = why;
3469 device->bm_io_work.flags = flags; 3481 device->bm_io_work.flags = flags;
3470 3482
3471 spin_lock_irq(&device->connection->req_lock); 3483 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3472 set_bit(BITMAP_IO, &device->flags); 3484 set_bit(BITMAP_IO, &device->flags);
3473 if (atomic_read(&device->ap_bio_cnt) == 0) { 3485 if (atomic_read(&device->ap_bio_cnt) == 0) {
3474 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 3486 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3475 drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w); 3487 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
3476 } 3488 }
3477 spin_unlock_irq(&device->connection->req_lock); 3489 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3478} 3490}
3479 3491
3480/** 3492/**
@@ -3491,7 +3503,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
3491{ 3503{
3492 int rv; 3504 int rv;
3493 3505
3494 D_ASSERT(current != device->connection->worker.task); 3506 D_ASSERT(current != first_peer_device(device)->connection->worker.task);
3495 3507
3496 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3508 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3497 drbd_suspend_io(device); 3509 drbd_suspend_io(device);
@@ -3532,7 +3544,7 @@ static void md_sync_timer_fn(unsigned long data)
3532 3544
3533 /* must not double-queue! */ 3545 /* must not double-queue! */
3534 if (list_empty(&device->md_sync_work.list)) 3546 if (list_empty(&device->md_sync_work.list))
3535 drbd_queue_work_front(&device->connection->sender_work, &device->md_sync_work); 3547 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &device->md_sync_work);
3536} 3548}
3537 3549
3538static int w_md_sync(struct drbd_work *w, int unused) 3550static int w_md_sync(struct drbd_work *w, int unused)
@@ -3631,7 +3643,7 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3631 long timeout; 3643 long timeout;
3632 3644
3633 rcu_read_lock(); 3645 rcu_read_lock();
3634 nc = rcu_dereference(device->connection->net_conf); 3646 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3635 if (!nc) { 3647 if (!nc) {
3636 rcu_read_unlock(); 3648 rcu_read_unlock();
3637 return -ETIMEDOUT; 3649 return -ETIMEDOUT;
@@ -3642,10 +3654,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3642 /* Indicate to wake up device->misc_wait on progress. */ 3654 /* Indicate to wake up device->misc_wait on progress. */
3643 i->waiting = true; 3655 i->waiting = true;
3644 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); 3656 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3645 spin_unlock_irq(&device->connection->req_lock); 3657 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3646 timeout = schedule_timeout(timeout); 3658 timeout = schedule_timeout(timeout);
3647 finish_wait(&device->misc_wait, &wait); 3659 finish_wait(&device->misc_wait, &wait);
3648 spin_lock_irq(&device->connection->req_lock); 3660 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3649 if (!timeout || device->state.conn < C_CONNECTED) 3661 if (!timeout || device->state.conn < C_CONNECTED)
3650 return -ETIMEDOUT; 3662 return -ETIMEDOUT;
3651 if (signal_pending(current)) 3663 if (signal_pending(current))
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b5b7ea7f7cc..a8c9c86e29f5 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -246,10 +246,10 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
246 246
247 /* some more paranoia, if the request was over-determined */ 247 /* some more paranoia, if the request was over-determined */
248 if (adm_ctx.device && adm_ctx.connection && 248 if (adm_ctx.device && adm_ctx.connection &&
249 adm_ctx.device->connection != adm_ctx.connection) { 249 first_peer_device(adm_ctx.device)->connection != adm_ctx.connection) {
250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n", 250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
251 adm_ctx.minor, adm_ctx.resource_name, 251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.device->connection->name); 252 first_peer_device(adm_ctx.device)->connection->name);
253 drbd_msg_put_info("minor exists in different resource"); 253 drbd_msg_put_info("minor exists in different resource");
254 return ERR_INVALID_REQUEST; 254 return ERR_INVALID_REQUEST;
255 } 255 }
@@ -258,7 +258,7 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
258 adm_ctx.volume != adm_ctx.device->vnr) { 258 adm_ctx.volume != adm_ctx.device->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", 259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume, 260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.device->vnr, adm_ctx.device->connection->name); 261 adm_ctx.device->vnr, first_peer_device(adm_ctx.device)->connection->name);
262 drbd_msg_put_info("minor exists as different volume"); 262 drbd_msg_put_info("minor exists as different volume");
263 return ERR_INVALID_REQUEST; 263 return ERR_INVALID_REQUEST;
264 } 264 }
@@ -323,7 +323,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
323 NULL }; 323 NULL };
324 char mb[12]; 324 char mb[12];
325 char *argv[] = {usermode_helper, cmd, mb, NULL }; 325 char *argv[] = {usermode_helper, cmd, mb, NULL };
326 struct drbd_connection *connection = device->connection; 326 struct drbd_connection *connection = first_peer_device(device)->connection;
327 struct sib_info sib; 327 struct sib_info sib;
328 int ret; 328 int ret;
329 329
@@ -544,7 +544,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
544 union drbd_state mask, val; 544 union drbd_state mask, val;
545 545
546 if (new_role == R_PRIMARY) 546 if (new_role == R_PRIMARY)
547 request_ping(device->connection); /* Detect a dead peer ASAP */ 547 request_ping(first_peer_device(device)->connection); /* Detect a dead peer ASAP */
548 548
549 mutex_lock(device->state_mutex); 549 mutex_lock(device->state_mutex);
550 550
@@ -575,7 +575,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
575 device->state.disk == D_CONSISTENT && mask.pdsk == 0) { 575 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
576 D_ASSERT(device->state.pdsk == D_UNKNOWN); 576 D_ASSERT(device->state.pdsk == D_UNKNOWN);
577 577
578 if (conn_try_outdate_peer(device->connection)) { 578 if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
579 val.disk = D_UP_TO_DATE; 579 val.disk = D_UP_TO_DATE;
580 mask.disk = D_MASK; 580 mask.disk = D_MASK;
581 } 581 }
@@ -585,7 +585,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
585 if (rv == SS_NOTHING_TO_DO) 585 if (rv == SS_NOTHING_TO_DO)
586 goto out; 586 goto out;
587 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 587 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
588 if (!conn_try_outdate_peer(device->connection) && force) { 588 if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) {
589 dev_warn(DEV, "Forced into split brain situation!\n"); 589 dev_warn(DEV, "Forced into split brain situation!\n");
590 mask.pdsk = D_MASK; 590 mask.pdsk = D_MASK;
591 val.pdsk = D_OUTDATED; 591 val.pdsk = D_OUTDATED;
@@ -598,7 +598,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
598 retry at most once more in this case. */ 598 retry at most once more in this case. */
599 int timeo; 599 int timeo;
600 rcu_read_lock(); 600 rcu_read_lock();
601 nc = rcu_dereference(device->connection->net_conf); 601 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
602 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; 602 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
603 rcu_read_unlock(); 603 rcu_read_unlock();
604 schedule_timeout_interruptible(timeo); 604 schedule_timeout_interruptible(timeo);
@@ -633,11 +633,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
633 put_ldev(device); 633 put_ldev(device);
634 } 634 }
635 } else { 635 } else {
636 mutex_lock(&device->connection->conf_update); 636 mutex_lock(&first_peer_device(device)->connection->conf_update);
637 nc = device->connection->net_conf; 637 nc = first_peer_device(device)->connection->net_conf;
638 if (nc) 638 if (nc)
639 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 639 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
640 mutex_unlock(&device->connection->conf_update); 640 mutex_unlock(&first_peer_device(device)->connection->conf_update);
641 641
642 set_disk_ro(device->vdisk, false); 642 set_disk_ro(device->vdisk, false);
643 if (get_ldev(device)) { 643 if (get_ldev(device)) {
@@ -1134,12 +1134,12 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1134 Because new from 8.3.8 onwards the peer can use multiple 1134 Because new from 8.3.8 onwards the peer can use multiple
1135 BIOs for a single peer_request */ 1135 BIOs for a single peer_request */
1136 if (device->state.conn >= C_WF_REPORT_PARAMS) { 1136 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1137 if (device->connection->agreed_pro_version < 94) 1137 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1138 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 1138 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1139 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ 1139 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1140 else if (device->connection->agreed_pro_version == 94) 1140 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1141 peer = DRBD_MAX_SIZE_H80_PACKET; 1141 peer = DRBD_MAX_SIZE_H80_PACKET;
1142 else if (device->connection->agreed_pro_version < 100) 1142 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1143 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ 1143 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1144 else 1144 else
1145 peer = DRBD_MAX_BIO_SIZE; 1145 peer = DRBD_MAX_BIO_SIZE;
@@ -1190,10 +1190,10 @@ static void drbd_suspend_al(struct drbd_device *device)
1190 } 1190 }
1191 1191
1192 drbd_al_shrink(device); 1192 drbd_al_shrink(device);
1193 spin_lock_irq(&device->connection->req_lock); 1193 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1194 if (device->state.conn < C_CONNECTED) 1194 if (device->state.conn < C_CONNECTED)
1195 s = !test_and_set_bit(AL_SUSPENDED, &device->flags); 1195 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1196 spin_unlock_irq(&device->connection->req_lock); 1196 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1197 lc_unlock(device->act_log); 1197 lc_unlock(device->act_log);
1198 1198
1199 if (s) 1199 if (s)
@@ -1264,7 +1264,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1264 goto fail; 1264 goto fail;
1265 } 1265 }
1266 1266
1267 mutex_lock(&device->connection->conf_update); 1267 mutex_lock(&first_peer_device(device)->connection->conf_update);
1268 old_disk_conf = device->ldev->disk_conf; 1268 old_disk_conf = device->ldev->disk_conf;
1269 *new_disk_conf = *old_disk_conf; 1269 *new_disk_conf = *old_disk_conf;
1270 if (should_set_defaults(info)) 1270 if (should_set_defaults(info))
@@ -1327,7 +1327,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1327 rcu_assign_pointer(device->rs_plan_s, new_plan); 1327 rcu_assign_pointer(device->rs_plan_s, new_plan);
1328 } 1328 }
1329 1329
1330 mutex_unlock(&device->connection->conf_update); 1330 mutex_unlock(&first_peer_device(device)->connection->conf_update);
1331 1331
1332 if (new_disk_conf->al_updates) 1332 if (new_disk_conf->al_updates)
1333 device->ldev->md.flags &= ~MDF_AL_DISABLED; 1333 device->ldev->md.flags &= ~MDF_AL_DISABLED;
@@ -1339,7 +1339,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1339 else 1339 else
1340 set_bit(MD_NO_FUA, &device->flags); 1340 set_bit(MD_NO_FUA, &device->flags);
1341 1341
1342 drbd_bump_write_ordering(device->connection, WO_bdev_flush); 1342 drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
1343 1343
1344 drbd_md_sync(device); 1344 drbd_md_sync(device);
1345 1345
@@ -1353,7 +1353,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1353 goto success; 1353 goto success;
1354 1354
1355fail_unlock: 1355fail_unlock:
1356 mutex_unlock(&device->connection->conf_update); 1356 mutex_unlock(&first_peer_device(device)->connection->conf_update);
1357 fail: 1357 fail:
1358 kfree(new_disk_conf); 1358 kfree(new_disk_conf);
1359 kfree(new_plan); 1359 kfree(new_plan);
@@ -1388,7 +1388,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1388 goto finish; 1388 goto finish;
1389 1389
1390 device = adm_ctx.device; 1390 device = adm_ctx.device;
1391 conn_reconfig_start(device->connection); 1391 conn_reconfig_start(first_peer_device(device)->connection);
1392 1392
1393 /* if you want to reconfigure, please tear down first */ 1393 /* if you want to reconfigure, please tear down first */
1394 if (device->state.disk > D_DISKLESS) { 1394 if (device->state.disk > D_DISKLESS) {
@@ -1455,7 +1455,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1455 goto fail; 1455 goto fail;
1456 1456
1457 rcu_read_lock(); 1457 rcu_read_lock();
1458 nc = rcu_dereference(device->connection->net_conf); 1458 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
1459 if (nc) { 1459 if (nc) {
1460 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { 1460 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1461 rcu_read_unlock(); 1461 rcu_read_unlock();
@@ -1636,7 +1636,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1636 new_disk_conf = NULL; 1636 new_disk_conf = NULL;
1637 new_plan = NULL; 1637 new_plan = NULL;
1638 1638
1639 drbd_bump_write_ordering(device->connection, WO_bdev_flush); 1639 drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
1640 1640
1641 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) 1641 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1642 set_bit(CRASHED_PRIMARY, &device->flags); 1642 set_bit(CRASHED_PRIMARY, &device->flags);
@@ -1644,7 +1644,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1644 clear_bit(CRASHED_PRIMARY, &device->flags); 1644 clear_bit(CRASHED_PRIMARY, &device->flags);
1645 1645
1646 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && 1646 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1647 !(device->state.role == R_PRIMARY && device->connection->susp_nod)) 1647 !(device->state.role == R_PRIMARY &&
1648 first_peer_device(device)->connection->susp_nod))
1648 set_bit(CRASHED_PRIMARY, &device->flags); 1649 set_bit(CRASHED_PRIMARY, &device->flags);
1649 1650
1650 device->send_cnt = 0; 1651 device->send_cnt = 0;
@@ -1702,7 +1703,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1702 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) 1703 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1703 drbd_suspend_al(device); /* IO is still suspended here... */ 1704 drbd_suspend_al(device); /* IO is still suspended here... */
1704 1705
1705 spin_lock_irq(&device->connection->req_lock); 1706 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1706 os = drbd_read_state(device); 1707 os = drbd_read_state(device);
1707 ns = os; 1708 ns = os;
1708 /* If MDF_CONSISTENT is not set go into inconsistent state, 1709 /* If MDF_CONSISTENT is not set go into inconsistent state,
@@ -1754,7 +1755,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1754 } 1755 }
1755 1756
1756 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); 1757 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1757 spin_unlock_irq(&device->connection->req_lock); 1758 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1758 1759
1759 if (rv < SS_SUCCESS) 1760 if (rv < SS_SUCCESS)
1760 goto force_diskless_dec; 1761 goto force_diskless_dec;
@@ -1771,7 +1772,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1771 1772
1772 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 1773 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1773 put_ldev(device); 1774 put_ldev(device);
1774 conn_reconfig_done(device->connection); 1775 conn_reconfig_done(first_peer_device(device)->connection);
1775 drbd_adm_finish(info, retcode); 1776 drbd_adm_finish(info, retcode);
1776 return 0; 1777 return 0;
1777 1778
@@ -1781,7 +1782,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1781 drbd_force_state(device, NS(disk, D_DISKLESS)); 1782 drbd_force_state(device, NS(disk, D_DISKLESS));
1782 drbd_md_sync(device); 1783 drbd_md_sync(device);
1783 fail: 1784 fail:
1784 conn_reconfig_done(device->connection); 1785 conn_reconfig_done(first_peer_device(device)->connection);
1785 if (nbc) { 1786 if (nbc) {
1786 if (nbc->backing_bdev) 1787 if (nbc->backing_bdev)
1787 blkdev_put(nbc->backing_bdev, 1788 blkdev_put(nbc->backing_bdev,
@@ -2357,7 +2358,7 @@ void resync_after_online_grow(struct drbd_device *device)
2357 if (device->state.role != device->state.peer) 2358 if (device->state.role != device->state.peer)
2358 iass = (device->state.role == R_PRIMARY); 2359 iass = (device->state.role == R_PRIMARY);
2359 else 2360 else
2360 iass = test_bit(RESOLVE_CONFLICTS, &device->connection->flags); 2361 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2361 2362
2362 if (iass) 2363 if (iass)
2363 drbd_start_resync(device, C_SYNC_SOURCE); 2364 drbd_start_resync(device, C_SYNC_SOURCE);
@@ -2412,7 +2413,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2412 goto fail_ldev; 2413 goto fail_ldev;
2413 } 2414 }
2414 2415
2415 if (rs.no_resync && device->connection->agreed_pro_version < 93) { 2416 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2416 retcode = ERR_NEED_APV_93; 2417 retcode = ERR_NEED_APV_93;
2417 goto fail_ldev; 2418 goto fail_ldev;
2418 } 2419 }
@@ -2454,12 +2455,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2454 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 2455 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2455 2456
2456 if (new_disk_conf) { 2457 if (new_disk_conf) {
2457 mutex_lock(&device->connection->conf_update); 2458 mutex_lock(&first_peer_device(device)->connection->conf_update);
2458 old_disk_conf = device->ldev->disk_conf; 2459 old_disk_conf = device->ldev->disk_conf;
2459 *new_disk_conf = *old_disk_conf; 2460 *new_disk_conf = *old_disk_conf;
2460 new_disk_conf->disk_size = (sector_t)rs.resize_size; 2461 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2461 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 2462 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2462 mutex_unlock(&device->connection->conf_update); 2463 mutex_unlock(&first_peer_device(device)->connection->conf_update);
2463 synchronize_rcu(); 2464 synchronize_rcu();
2464 kfree(old_disk_conf); 2465 kfree(old_disk_conf);
2465 } 2466 }
@@ -2710,9 +2711,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2710 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 2711 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2711 if (retcode == SS_SUCCESS) { 2712 if (retcode == SS_SUCCESS) {
2712 if (device->state.conn < C_CONNECTED) 2713 if (device->state.conn < C_CONNECTED)
2713 tl_clear(device->connection); 2714 tl_clear(first_peer_device(device)->connection);
2714 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED) 2715 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
2715 tl_restart(device->connection, FAIL_FROZEN_DISK_IO); 2716 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
2716 } 2717 }
2717 drbd_resume_io(device); 2718 drbd_resume_io(device);
2718 2719
@@ -2778,10 +2779,10 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2778 2779
2779 /* We need to add connection name and volume number information still. 2780 /* We need to add connection name and volume number information still.
2780 * Minor number is in drbd_genlmsghdr. */ 2781 * Minor number is in drbd_genlmsghdr. */
2781 if (nla_put_drbd_cfg_context(skb, device->connection, device->vnr)) 2782 if (nla_put_drbd_cfg_context(skb, first_peer_device(device)->connection, device->vnr))
2782 goto nla_put_failure; 2783 goto nla_put_failure;
2783 2784
2784 if (res_opts_to_skb(skb, &device->connection->res_opts, exclude_sensitive)) 2785 if (res_opts_to_skb(skb, &first_peer_device(device)->connection->res_opts, exclude_sensitive))
2785 goto nla_put_failure; 2786 goto nla_put_failure;
2786 2787
2787 rcu_read_lock(); 2788 rcu_read_lock();
@@ -2794,7 +2795,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2794 if (!err) { 2795 if (!err) {
2795 struct net_conf *nc; 2796 struct net_conf *nc;
2796 2797
2797 nc = rcu_dereference(device->connection->net_conf); 2798 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2798 if (nc) 2799 if (nc)
2799 err = net_conf_to_skb(skb, nc, exclude_sensitive); 2800 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2800 } 2801 }
@@ -2981,7 +2982,7 @@ next_connection:
2981 } 2982 }
2982 2983
2983 D_ASSERT(device->vnr == volume); 2984 D_ASSERT(device->vnr == volume);
2984 D_ASSERT(device->connection == connection); 2985 D_ASSERT(first_peer_device(device)->connection == connection);
2985 2986
2986 dh->minor = device_to_minor(device); 2987 dh->minor = device_to_minor(device);
2987 dh->ret_code = NO_ERROR; 2988 dh->ret_code = NO_ERROR;
@@ -3168,7 +3169,8 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3168 } 3169 }
3169 3170
3170 /* this is "skip initial sync", assume to be clean */ 3171 /* this is "skip initial sync", assume to be clean */
3171 if (device->state.conn == C_CONNECTED && device->connection->agreed_pro_version >= 90 && 3172 if (device->state.conn == C_CONNECTED &&
3173 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3172 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 3174 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3173 dev_info(DEV, "Preparing to skip initial sync\n"); 3175 dev_info(DEV, "Preparing to skip initial sync\n");
3174 skip_initial_sync = 1; 3176 skip_initial_sync = 1;
@@ -3191,10 +3193,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3191 drbd_send_uuids_skip_initial_sync(device); 3193 drbd_send_uuids_skip_initial_sync(device);
3192 _drbd_uuid_set(device, UI_BITMAP, 0); 3194 _drbd_uuid_set(device, UI_BITMAP, 0);
3193 drbd_print_uuids(device, "cleared bitmap UUID"); 3195 drbd_print_uuids(device, "cleared bitmap UUID");
3194 spin_lock_irq(&device->connection->req_lock); 3196 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3195 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3197 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3196 CS_VERBOSE, NULL); 3198 CS_VERBOSE, NULL);
3197 spin_unlock_irq(&device->connection->req_lock); 3199 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3198 } 3200 }
3199 } 3201 }
3200 3202
@@ -3287,7 +3289,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3287 } 3289 }
3288 3290
3289 /* drbd_adm_prepare made sure already 3291 /* drbd_adm_prepare made sure already
3290 * that device->connection and device->vnr match the request. */ 3292 * that first_peer_device(device)->connection and device->vnr match the request. */
3291 if (adm_ctx.device) { 3293 if (adm_ctx.device) {
3292 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) 3294 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3293 retcode = ERR_MINOR_EXISTS; 3295 retcode = ERR_MINOR_EXISTS;
@@ -3295,7 +3297,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3295 goto out; 3297 goto out;
3296 } 3298 }
3297 3299
3298 retcode = conn_new_minor(adm_ctx.connection, dh->minor, adm_ctx.volume); 3300 retcode = drbd_create_minor(adm_ctx.connection, dh->minor, adm_ctx.volume);
3299out: 3301out:
3300 drbd_adm_finish(info, retcode); 3302 drbd_adm_finish(info, retcode);
3301 return 0; 3303 return 0;
@@ -3310,7 +3312,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_device *device)
3310 device->state.role == R_SECONDARY) { 3312 device->state.role == R_SECONDARY) {
3311 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS), 3313 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
3312 CS_VERBOSE + CS_WAIT_COMPLETE); 3314 CS_VERBOSE + CS_WAIT_COMPLETE);
3313 idr_remove(&device->connection->volumes, device->vnr); 3315 idr_remove(&first_peer_device(device)->connection->volumes, device->vnr);
3314 idr_remove(&minors, device_to_minor(device)); 3316 idr_remove(&minors, device_to_minor(device));
3315 destroy_workqueue(device->submit.wq); 3317 destroy_workqueue(device->submit.wq);
3316 del_gendisk(device->vdisk); 3318 del_gendisk(device->vdisk);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 9c4d413655e3..f1c81c101fad 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -251,7 +251,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
251 /* reset device->congestion_reason */ 251 /* reset device->congestion_reason */
252 bdi_rw_congested(&device->rq_queue->backing_dev_info); 252 bdi_rw_congested(&device->rq_queue->backing_dev_info);
253 253
254 nc = rcu_dereference(device->connection->net_conf); 254 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
255 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; 255 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
256 seq_printf(seq, 256 seq_printf(seq,
257 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" 257 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
@@ -280,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
280 atomic_read(&device->rs_pending_cnt), 280 atomic_read(&device->rs_pending_cnt),
281 atomic_read(&device->unacked_cnt), 281 atomic_read(&device->unacked_cnt),
282 atomic_read(&device->ap_bio_cnt), 282 atomic_read(&device->ap_bio_cnt),
283 device->connection->epochs, 283 first_peer_device(device)->connection->epochs,
284 write_ordering_chars[device->connection->write_ordering] 284 write_ordering_chars[first_peer_device(device)->connection->write_ordering]
285 ); 285 );
286 seq_printf(seq, " oos:%llu\n", 286 seq_printf(seq, " oos:%llu\n",
287 Bit2KB((unsigned long long) 287 Bit2KB((unsigned long long)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 42dbf5d86a43..e08e99f756a5 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -221,9 +221,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
221 LIST_HEAD(reclaimed); 221 LIST_HEAD(reclaimed);
222 struct drbd_peer_request *peer_req, *t; 222 struct drbd_peer_request *peer_req, *t;
223 223
224 spin_lock_irq(&device->connection->req_lock); 224 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
225 reclaim_finished_net_peer_reqs(device, &reclaimed); 225 reclaim_finished_net_peer_reqs(device, &reclaimed);
226 spin_unlock_irq(&device->connection->req_lock); 226 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
227 227
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229 drbd_free_net_peer_req(device, peer_req); 229 drbd_free_net_peer_req(device, peer_req);
@@ -252,7 +252,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
252 /* Yes, we may run up to @number over max_buffers. If we 252 /* Yes, we may run up to @number over max_buffers. If we
253 * follow it strictly, the admin will get it wrong anyways. */ 253 * follow it strictly, the admin will get it wrong anyways. */
254 rcu_read_lock(); 254 rcu_read_lock();
255 nc = rcu_dereference(device->connection->net_conf); 255 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
256 mxb = nc ? nc->max_buffers : 1000000; 256 mxb = nc ? nc->max_buffers : 1000000;
257 rcu_read_unlock(); 257 rcu_read_unlock();
258 258
@@ -288,7 +288,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
288} 288}
289 289
290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. 290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
291 * Is also used from inside an other spin_lock_irq(&device->connection->req_lock); 291 * Is also used from inside an other spin_lock_irq(&first_peer_device(device)->connection->req_lock);
292 * Either links the page chain back to the global pool, 292 * Either links the page chain back to the global pool,
293 * or returns all pages to the system. */ 293 * or returns all pages to the system. */
294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
@@ -396,9 +396,9 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
396 int count = 0; 396 int count = 0;
397 int is_net = list == &device->net_ee; 397 int is_net = list == &device->net_ee;
398 398
399 spin_lock_irq(&device->connection->req_lock); 399 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
400 list_splice_init(list, &work_list); 400 list_splice_init(list, &work_list);
401 spin_unlock_irq(&device->connection->req_lock); 401 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
402 402
403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
404 __drbd_free_peer_req(device, peer_req, is_net); 404 __drbd_free_peer_req(device, peer_req, is_net);
@@ -417,10 +417,10 @@ static int drbd_finish_peer_reqs(struct drbd_device *device)
417 struct drbd_peer_request *peer_req, *t; 417 struct drbd_peer_request *peer_req, *t;
418 int err = 0; 418 int err = 0;
419 419
420 spin_lock_irq(&device->connection->req_lock); 420 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
421 reclaim_finished_net_peer_reqs(device, &reclaimed); 421 reclaim_finished_net_peer_reqs(device, &reclaimed);
422 list_splice_init(&device->done_ee, &work_list); 422 list_splice_init(&device->done_ee, &work_list);
423 spin_unlock_irq(&device->connection->req_lock); 423 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
424 424
425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
426 drbd_free_net_peer_req(device, peer_req); 426 drbd_free_net_peer_req(device, peer_req);
@@ -452,19 +452,19 @@ static void _drbd_wait_ee_list_empty(struct drbd_device *device,
452 * and calling prepare_to_wait in the fast path */ 452 * and calling prepare_to_wait in the fast path */
453 while (!list_empty(head)) { 453 while (!list_empty(head)) {
454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
455 spin_unlock_irq(&device->connection->req_lock); 455 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
456 io_schedule(); 456 io_schedule();
457 finish_wait(&device->ee_wait, &wait); 457 finish_wait(&device->ee_wait, &wait);
458 spin_lock_irq(&device->connection->req_lock); 458 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
459 } 459 }
460} 460}
461 461
462static void drbd_wait_ee_list_empty(struct drbd_device *device, 462static void drbd_wait_ee_list_empty(struct drbd_device *device,
463 struct list_head *head) 463 struct list_head *head)
464{ 464{
465 spin_lock_irq(&device->connection->req_lock); 465 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
466 _drbd_wait_ee_list_empty(device, head); 466 _drbd_wait_ee_list_empty(device, head);
467 spin_unlock_irq(&device->connection->req_lock); 467 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
468} 468}
469 469
470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
@@ -838,8 +838,8 @@ int drbd_connected(struct drbd_device *device)
838 atomic_set(&device->packet_seq, 0); 838 atomic_set(&device->packet_seq, 0);
839 device->peer_seq = 0; 839 device->peer_seq = 0;
840 840
841 device->state_mutex = device->connection->agreed_pro_version < 100 ? 841 device->state_mutex = first_peer_device(device)->connection->agreed_pro_version < 100 ?
842 &device->connection->cstate_mutex : 842 &first_peer_device(device)->connection->cstate_mutex :
843 &device->own_state_mutex; 843 &device->own_state_mutex;
844 844
845 err = drbd_send_sync_param(device); 845 err = drbd_send_sync_param(device);
@@ -1492,18 +1492,18 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1492 struct drbd_peer_request *peer_req; 1492 struct drbd_peer_request *peer_req;
1493 struct page *page; 1493 struct page *page;
1494 int dgs, ds, err; 1494 int dgs, ds, err;
1495 void *dig_in = device->connection->int_dig_in; 1495 void *dig_in = first_peer_device(device)->connection->int_dig_in;
1496 void *dig_vv = device->connection->int_dig_vv; 1496 void *dig_vv = first_peer_device(device)->connection->int_dig_vv;
1497 unsigned long *data; 1497 unsigned long *data;
1498 1498
1499 dgs = 0; 1499 dgs = 0;
1500 if (device->connection->peer_integrity_tfm) { 1500 if (first_peer_device(device)->connection->peer_integrity_tfm) {
1501 dgs = crypto_hash_digestsize(device->connection->peer_integrity_tfm); 1501 dgs = crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
1502 /* 1502 /*
1503 * FIXME: Receive the incoming digest into the receive buffer 1503 * FIXME: Receive the incoming digest into the receive buffer
1504 * here, together with its struct p_data? 1504 * here, together with its struct p_data?
1505 */ 1505 */
1506 err = drbd_recv_all_warn(device->connection, dig_in, dgs); 1506 err = drbd_recv_all_warn(first_peer_device(device)->connection, dig_in, dgs);
1507 if (err) 1507 if (err)
1508 return NULL; 1508 return NULL;
1509 data_size -= dgs; 1509 data_size -= dgs;
@@ -1539,7 +1539,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1539 page_chain_for_each(page) { 1539 page_chain_for_each(page) {
1540 unsigned len = min_t(int, ds, PAGE_SIZE); 1540 unsigned len = min_t(int, ds, PAGE_SIZE);
1541 data = kmap(page); 1541 data = kmap(page);
1542 err = drbd_recv_all_warn(device->connection, data, len); 1542 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len);
1543 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { 1543 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1544 dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 1544 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1545 data[0] = data[0] ^ (unsigned long)-1; 1545 data[0] = data[0] ^ (unsigned long)-1;
@@ -1553,7 +1553,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1553 } 1553 }
1554 1554
1555 if (dgs) { 1555 if (dgs) {
1556 drbd_csum_ee(device, device->connection->peer_integrity_tfm, peer_req, dig_vv); 1556 drbd_csum_ee(device, first_peer_device(device)->connection->peer_integrity_tfm, peer_req, dig_vv);
1557 if (memcmp(dig_in, dig_vv, dgs)) { 1557 if (memcmp(dig_in, dig_vv, dgs)) {
1558 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1558 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1559 (unsigned long long)sector, data_size); 1559 (unsigned long long)sector, data_size);
@@ -1583,7 +1583,7 @@ static int drbd_drain_block(struct drbd_device *device, int data_size)
1583 while (data_size) { 1583 while (data_size) {
1584 unsigned int len = min_t(int, data_size, PAGE_SIZE); 1584 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1585 1585
1586 err = drbd_recv_all_warn(device->connection, data, len); 1586 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len);
1587 if (err) 1587 if (err)
1588 break; 1588 break;
1589 data_size -= len; 1589 data_size -= len;
@@ -1600,13 +1600,13 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1600 struct bvec_iter iter; 1600 struct bvec_iter iter;
1601 struct bio *bio; 1601 struct bio *bio;
1602 int dgs, err, expect; 1602 int dgs, err, expect;
1603 void *dig_in = device->connection->int_dig_in; 1603 void *dig_in = first_peer_device(device)->connection->int_dig_in;
1604 void *dig_vv = device->connection->int_dig_vv; 1604 void *dig_vv = first_peer_device(device)->connection->int_dig_vv;
1605 1605
1606 dgs = 0; 1606 dgs = 0;
1607 if (device->connection->peer_integrity_tfm) { 1607 if (first_peer_device(device)->connection->peer_integrity_tfm) {
1608 dgs = crypto_hash_digestsize(device->connection->peer_integrity_tfm); 1608 dgs = crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
1609 err = drbd_recv_all_warn(device->connection, dig_in, dgs); 1609 err = drbd_recv_all_warn(first_peer_device(device)->connection, dig_in, dgs);
1610 if (err) 1610 if (err)
1611 return err; 1611 return err;
1612 data_size -= dgs; 1612 data_size -= dgs;
@@ -1622,7 +1622,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1622 bio_for_each_segment(bvec, bio, iter) { 1622 bio_for_each_segment(bvec, bio, iter) {
1623 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; 1623 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1624 expect = min_t(int, data_size, bvec.bv_len); 1624 expect = min_t(int, data_size, bvec.bv_len);
1625 err = drbd_recv_all_warn(device->connection, mapped, expect); 1625 err = drbd_recv_all_warn(first_peer_device(device)->connection, mapped, expect);
1626 kunmap(bvec.bv_page); 1626 kunmap(bvec.bv_page);
1627 if (err) 1627 if (err)
1628 return err; 1628 return err;
@@ -1630,7 +1630,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1630 } 1630 }
1631 1631
1632 if (dgs) { 1632 if (dgs) {
1633 drbd_csum_bio(device, device->connection->peer_integrity_tfm, bio, dig_vv); 1633 drbd_csum_bio(device, first_peer_device(device)->connection->peer_integrity_tfm, bio, dig_vv);
1634 if (memcmp(dig_in, dig_vv, dgs)) { 1634 if (memcmp(dig_in, dig_vv, dgs)) {
1635 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1635 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1636 return -EINVAL; 1636 return -EINVAL;
@@ -1685,9 +1685,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1685 1685
1686 peer_req->w.cb = e_end_resync_block; 1686 peer_req->w.cb = e_end_resync_block;
1687 1687
1688 spin_lock_irq(&device->connection->req_lock); 1688 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1689 list_add(&peer_req->w.list, &device->sync_ee); 1689 list_add(&peer_req->w.list, &device->sync_ee);
1690 spin_unlock_irq(&device->connection->req_lock); 1690 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1691 1691
1692 atomic_add(data_size >> 9, &device->rs_sect_ev); 1692 atomic_add(data_size >> 9, &device->rs_sect_ev);
1693 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) 1693 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1695,9 +1695,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1695 1695
1696 /* don't care for the reason here */ 1696 /* don't care for the reason here */
1697 dev_err(DEV, "submit failed, triggering re-connect\n"); 1697 dev_err(DEV, "submit failed, triggering re-connect\n");
1698 spin_lock_irq(&device->connection->req_lock); 1698 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1699 list_del(&peer_req->w.list); 1699 list_del(&peer_req->w.list);
1700 spin_unlock_irq(&device->connection->req_lock); 1700 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1701 1701
1702 drbd_free_peer_req(device, peer_req); 1702 drbd_free_peer_req(device, peer_req);
1703fail: 1703fail:
@@ -1736,9 +1736,9 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
1736 1736
1737 sector = be64_to_cpu(p->sector); 1737 sector = be64_to_cpu(p->sector);
1738 1738
1739 spin_lock_irq(&device->connection->req_lock); 1739 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1740 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); 1740 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1741 spin_unlock_irq(&device->connection->req_lock); 1741 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1742 if (unlikely(!req)) 1742 if (unlikely(!req))
1743 return -EIO; 1743 return -EIO;
1744 1744
@@ -1837,16 +1837,16 @@ static int e_end_block(struct drbd_work *w, int cancel)
1837 /* we delete from the conflict detection hash _after_ we sent out the 1837 /* we delete from the conflict detection hash _after_ we sent out the
1838 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1838 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1839 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 1839 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1840 spin_lock_irq(&device->connection->req_lock); 1840 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1841 D_ASSERT(!drbd_interval_empty(&peer_req->i)); 1841 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1842 drbd_remove_epoch_entry_interval(device, peer_req); 1842 drbd_remove_epoch_entry_interval(device, peer_req);
1843 if (peer_req->flags & EE_RESTART_REQUESTS) 1843 if (peer_req->flags & EE_RESTART_REQUESTS)
1844 restart_conflicting_writes(device, sector, peer_req->i.size); 1844 restart_conflicting_writes(device, sector, peer_req->i.size);
1845 spin_unlock_irq(&device->connection->req_lock); 1845 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1846 } else 1846 } else
1847 D_ASSERT(drbd_interval_empty(&peer_req->i)); 1847 D_ASSERT(drbd_interval_empty(&peer_req->i));
1848 1848
1849 drbd_may_finish_epoch(device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1849 drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1850 1850
1851 return err; 1851 return err;
1852} 1852}
@@ -1871,7 +1871,7 @@ static int e_send_superseded(struct drbd_work *w, int unused)
1871 1871
1872static int e_send_retry_write(struct drbd_work *w, int unused) 1872static int e_send_retry_write(struct drbd_work *w, int unused)
1873{ 1873{
1874 struct drbd_connection *connection = w->device->connection; 1874 struct drbd_connection *connection = first_peer_device(w->device)->connection;
1875 1875
1876 return e_send_ack(w, connection->agreed_pro_version >= 100 ? 1876 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
1877 P_RETRY_WRITE : P_SUPERSEDED); 1877 P_RETRY_WRITE : P_SUPERSEDED);
@@ -1896,7 +1896,7 @@ static void update_peer_seq(struct drbd_device *device, unsigned int peer_seq)
1896{ 1896{
1897 unsigned int newest_peer_seq; 1897 unsigned int newest_peer_seq;
1898 1898
1899 if (test_bit(RESOLVE_CONFLICTS, &device->connection->flags)) { 1899 if (test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)) {
1900 spin_lock(&device->peer_seq_lock); 1900 spin_lock(&device->peer_seq_lock);
1901 newest_peer_seq = seq_max(device->peer_seq, peer_seq); 1901 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
1902 device->peer_seq = newest_peer_seq; 1902 device->peer_seq = newest_peer_seq;
@@ -1918,7 +1918,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1918 struct drbd_peer_request *rs_req; 1918 struct drbd_peer_request *rs_req;
1919 bool rv = 0; 1919 bool rv = 0;
1920 1920
1921 spin_lock_irq(&device->connection->req_lock); 1921 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1922 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 1922 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
1923 if (overlaps(peer_req->i.sector, peer_req->i.size, 1923 if (overlaps(peer_req->i.sector, peer_req->i.size,
1924 rs_req->i.sector, rs_req->i.size)) { 1924 rs_req->i.sector, rs_req->i.size)) {
@@ -1926,7 +1926,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1926 break; 1926 break;
1927 } 1927 }
1928 } 1928 }
1929 spin_unlock_irq(&device->connection->req_lock); 1929 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1930 1930
1931 return rv; 1931 return rv;
1932} 1932}
@@ -1958,7 +1958,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1958 long timeout; 1958 long timeout;
1959 int ret = 0, tp; 1959 int ret = 0, tp;
1960 1960
1961 if (!test_bit(RESOLVE_CONFLICTS, &device->connection->flags)) 1961 if (!test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags))
1962 return 0; 1962 return 0;
1963 1963
1964 spin_lock(&device->peer_seq_lock); 1964 spin_lock(&device->peer_seq_lock);
@@ -1974,7 +1974,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1974 } 1974 }
1975 1975
1976 rcu_read_lock(); 1976 rcu_read_lock();
1977 tp = rcu_dereference(device->connection->net_conf)->two_primaries; 1977 tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
1978 rcu_read_unlock(); 1978 rcu_read_unlock();
1979 1979
1980 if (!tp) 1980 if (!tp)
@@ -1984,7 +1984,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1984 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); 1984 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
1985 spin_unlock(&device->peer_seq_lock); 1985 spin_unlock(&device->peer_seq_lock);
1986 rcu_read_lock(); 1986 rcu_read_lock();
1987 timeout = rcu_dereference(device->connection->net_conf)->ping_timeo*HZ/10; 1987 timeout = rcu_dereference(first_peer_device(device)->connection->net_conf)->ping_timeo*HZ/10;
1988 rcu_read_unlock(); 1988 rcu_read_unlock();
1989 timeout = schedule_timeout(timeout); 1989 timeout = schedule_timeout(timeout);
1990 spin_lock(&device->peer_seq_lock); 1990 spin_lock(&device->peer_seq_lock);
@@ -2027,10 +2027,10 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2027 continue; 2027 continue;
2028 req->rq_state &= ~RQ_POSTPONED; 2028 req->rq_state &= ~RQ_POSTPONED;
2029 __req_mod(req, NEG_ACKED, &m); 2029 __req_mod(req, NEG_ACKED, &m);
2030 spin_unlock_irq(&device->connection->req_lock); 2030 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2031 if (m.bio) 2031 if (m.bio)
2032 complete_master_bio(device, &m); 2032 complete_master_bio(device, &m);
2033 spin_lock_irq(&device->connection->req_lock); 2033 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2034 goto repeat; 2034 goto repeat;
2035 } 2035 }
2036} 2036}
@@ -2038,7 +2038,7 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2038static int handle_write_conflicts(struct drbd_device *device, 2038static int handle_write_conflicts(struct drbd_device *device,
2039 struct drbd_peer_request *peer_req) 2039 struct drbd_peer_request *peer_req)
2040{ 2040{
2041 struct drbd_connection *connection = device->connection; 2041 struct drbd_connection *connection = first_peer_device(device)->connection;
2042 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags); 2042 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2043 sector_t sector = peer_req->i.sector; 2043 sector_t sector = peer_req->i.sector;
2044 const unsigned int size = peer_req->i.size; 2044 const unsigned int size = peer_req->i.size;
@@ -2092,7 +2092,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2092 peer_req->w.cb = superseded ? e_send_superseded : 2092 peer_req->w.cb = superseded ? e_send_superseded :
2093 e_send_retry_write; 2093 e_send_retry_write;
2094 list_add_tail(&peer_req->w.list, &device->done_ee); 2094 list_add_tail(&peer_req->w.list, &device->done_ee);
2095 wake_asender(device->connection); 2095 wake_asender(first_peer_device(device)->connection);
2096 2096
2097 err = -ENOENT; 2097 err = -ENOENT;
2098 goto out; 2098 goto out;
@@ -2121,7 +2121,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2121 */ 2121 */
2122 err = drbd_wait_misc(device, &req->i); 2122 err = drbd_wait_misc(device, &req->i);
2123 if (err) { 2123 if (err) {
2124 _conn_request_state(device->connection, 2124 _conn_request_state(first_peer_device(device)->connection,
2125 NS(conn, C_TIMEOUT), 2125 NS(conn, C_TIMEOUT),
2126 CS_HARD); 2126 CS_HARD);
2127 fail_postponed_requests(device, sector, size); 2127 fail_postponed_requests(device, sector, size);
@@ -2204,17 +2204,17 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2204 spin_unlock(&connection->epoch_lock); 2204 spin_unlock(&connection->epoch_lock);
2205 2205
2206 rcu_read_lock(); 2206 rcu_read_lock();
2207 tp = rcu_dereference(device->connection->net_conf)->two_primaries; 2207 tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
2208 rcu_read_unlock(); 2208 rcu_read_unlock();
2209 if (tp) { 2209 if (tp) {
2210 peer_req->flags |= EE_IN_INTERVAL_TREE; 2210 peer_req->flags |= EE_IN_INTERVAL_TREE;
2211 err = wait_for_and_update_peer_seq(device, peer_seq); 2211 err = wait_for_and_update_peer_seq(device, peer_seq);
2212 if (err) 2212 if (err)
2213 goto out_interrupted; 2213 goto out_interrupted;
2214 spin_lock_irq(&device->connection->req_lock); 2214 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2215 err = handle_write_conflicts(device, peer_req); 2215 err = handle_write_conflicts(device, peer_req);
2216 if (err) { 2216 if (err) {
2217 spin_unlock_irq(&device->connection->req_lock); 2217 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2218 if (err == -ENOENT) { 2218 if (err == -ENOENT) {
2219 put_ldev(device); 2219 put_ldev(device);
2220 return 0; 2220 return 0;
@@ -2223,17 +2223,17 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2223 } 2223 }
2224 } else { 2224 } else {
2225 update_peer_seq(device, peer_seq); 2225 update_peer_seq(device, peer_seq);
2226 spin_lock_irq(&device->connection->req_lock); 2226 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2227 } 2227 }
2228 list_add(&peer_req->w.list, &device->active_ee); 2228 list_add(&peer_req->w.list, &device->active_ee);
2229 spin_unlock_irq(&device->connection->req_lock); 2229 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2230 2230
2231 if (device->state.conn == C_SYNC_TARGET) 2231 if (device->state.conn == C_SYNC_TARGET)
2232 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); 2232 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2233 2233
2234 if (device->connection->agreed_pro_version < 100) { 2234 if (first_peer_device(device)->connection->agreed_pro_version < 100) {
2235 rcu_read_lock(); 2235 rcu_read_lock();
2236 switch (rcu_dereference(device->connection->net_conf)->wire_protocol) { 2236 switch (rcu_dereference(first_peer_device(device)->connection->net_conf)->wire_protocol) {
2237 case DRBD_PROT_C: 2237 case DRBD_PROT_C:
2238 dp_flags |= DP_SEND_WRITE_ACK; 2238 dp_flags |= DP_SEND_WRITE_ACK;
2239 break; 2239 break;
@@ -2271,10 +2271,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2271 2271
2272 /* don't care for the reason here */ 2272 /* don't care for the reason here */
2273 dev_err(DEV, "submit failed, triggering re-connect\n"); 2273 dev_err(DEV, "submit failed, triggering re-connect\n");
2274 spin_lock_irq(&device->connection->req_lock); 2274 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2275 list_del(&peer_req->w.list); 2275 list_del(&peer_req->w.list);
2276 drbd_remove_epoch_entry_interval(device, peer_req); 2276 drbd_remove_epoch_entry_interval(device, peer_req);
2277 spin_unlock_irq(&device->connection->req_lock); 2277 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2278 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) 2278 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2279 drbd_al_complete_io(device, &peer_req->i); 2279 drbd_al_complete_io(device, &peer_req->i);
2280 2280
@@ -2450,11 +2450,11 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2450 peer_req->digest = di; 2450 peer_req->digest = di;
2451 peer_req->flags |= EE_HAS_DIGEST; 2451 peer_req->flags |= EE_HAS_DIGEST;
2452 2452
2453 if (drbd_recv_all(device->connection, di->digest, pi->size)) 2453 if (drbd_recv_all(first_peer_device(device)->connection, di->digest, pi->size))
2454 goto out_free_e; 2454 goto out_free_e;
2455 2455
2456 if (pi->cmd == P_CSUM_RS_REQUEST) { 2456 if (pi->cmd == P_CSUM_RS_REQUEST) {
2457 D_ASSERT(device->connection->agreed_pro_version >= 89); 2457 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89);
2458 peer_req->w.cb = w_e_end_csum_rs_req; 2458 peer_req->w.cb = w_e_end_csum_rs_req;
2459 /* used in the sector offset progress display */ 2459 /* used in the sector offset progress display */
2460 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2460 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2471,7 +2471,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2471 2471
2472 case P_OV_REQUEST: 2472 case P_OV_REQUEST:
2473 if (device->ov_start_sector == ~(sector_t)0 && 2473 if (device->ov_start_sector == ~(sector_t)0 &&
2474 device->connection->agreed_pro_version >= 90) { 2474 first_peer_device(device)->connection->agreed_pro_version >= 90) {
2475 unsigned long now = jiffies; 2475 unsigned long now = jiffies;
2476 int i; 2476 int i;
2477 device->ov_start_sector = sector; 2477 device->ov_start_sector = sector;
@@ -2525,18 +2525,18 @@ submit_for_resync:
2525 2525
2526submit: 2526submit:
2527 inc_unacked(device); 2527 inc_unacked(device);
2528 spin_lock_irq(&device->connection->req_lock); 2528 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2529 list_add_tail(&peer_req->w.list, &device->read_ee); 2529 list_add_tail(&peer_req->w.list, &device->read_ee);
2530 spin_unlock_irq(&device->connection->req_lock); 2530 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2531 2531
2532 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) 2532 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
2533 return 0; 2533 return 0;
2534 2534
2535 /* don't care for the reason here */ 2535 /* don't care for the reason here */
2536 dev_err(DEV, "submit failed, triggering re-connect\n"); 2536 dev_err(DEV, "submit failed, triggering re-connect\n");
2537 spin_lock_irq(&device->connection->req_lock); 2537 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2538 list_del(&peer_req->w.list); 2538 list_del(&peer_req->w.list);
2539 spin_unlock_irq(&device->connection->req_lock); 2539 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2540 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2540 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2541 2541
2542out_free_e: 2542out_free_e:
@@ -2558,7 +2558,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2558 ch_self = device->comm_bm_set; 2558 ch_self = device->comm_bm_set;
2559 2559
2560 rcu_read_lock(); 2560 rcu_read_lock();
2561 after_sb_0p = rcu_dereference(device->connection->net_conf)->after_sb_0p; 2561 after_sb_0p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_0p;
2562 rcu_read_unlock(); 2562 rcu_read_unlock();
2563 switch (after_sb_0p) { 2563 switch (after_sb_0p) {
2564 case ASB_CONSENSUS: 2564 case ASB_CONSENSUS:
@@ -2593,7 +2593,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2593 "Using discard-least-changes instead\n"); 2593 "Using discard-least-changes instead\n");
2594 case ASB_DISCARD_ZERO_CHG: 2594 case ASB_DISCARD_ZERO_CHG:
2595 if (ch_peer == 0 && ch_self == 0) { 2595 if (ch_peer == 0 && ch_self == 0) {
2596 rv = test_bit(RESOLVE_CONFLICTS, &device->connection->flags) 2596 rv = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)
2597 ? -1 : 1; 2597 ? -1 : 1;
2598 break; 2598 break;
2599 } else { 2599 } else {
@@ -2609,7 +2609,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2609 rv = 1; 2609 rv = 1;
2610 else /* ( ch_self == ch_peer ) */ 2610 else /* ( ch_self == ch_peer ) */
2611 /* Well, then use something else. */ 2611 /* Well, then use something else. */
2612 rv = test_bit(RESOLVE_CONFLICTS, &device->connection->flags) 2612 rv = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)
2613 ? -1 : 1; 2613 ? -1 : 1;
2614 break; 2614 break;
2615 case ASB_DISCARD_LOCAL: 2615 case ASB_DISCARD_LOCAL:
@@ -2628,7 +2628,7 @@ static int drbd_asb_recover_1p(struct drbd_device *device) __must_hold(local)
2628 enum drbd_after_sb_p after_sb_1p; 2628 enum drbd_after_sb_p after_sb_1p;
2629 2629
2630 rcu_read_lock(); 2630 rcu_read_lock();
2631 after_sb_1p = rcu_dereference(device->connection->net_conf)->after_sb_1p; 2631 after_sb_1p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_1p;
2632 rcu_read_unlock(); 2632 rcu_read_unlock();
2633 switch (after_sb_1p) { 2633 switch (after_sb_1p) {
2634 case ASB_DISCARD_YOUNGER_PRI: 2634 case ASB_DISCARD_YOUNGER_PRI:
@@ -2681,7 +2681,7 @@ static int drbd_asb_recover_2p(struct drbd_device *device) __must_hold(local)
2681 enum drbd_after_sb_p after_sb_2p; 2681 enum drbd_after_sb_p after_sb_2p;
2682 2682
2683 rcu_read_lock(); 2683 rcu_read_lock();
2684 after_sb_2p = rcu_dereference(device->connection->net_conf)->after_sb_2p; 2684 after_sb_2p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_2p;
2685 rcu_read_unlock(); 2685 rcu_read_unlock();
2686 switch (after_sb_2p) { 2686 switch (after_sb_2p) {
2687 case ASB_DISCARD_YOUNGER_PRI: 2687 case ASB_DISCARD_YOUNGER_PRI:
@@ -2777,7 +2777,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2777 2777
2778 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2778 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2779 2779
2780 if (device->connection->agreed_pro_version < 91) 2780 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2781 return -1091; 2781 return -1091;
2782 2782
2783 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2783 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
@@ -2800,7 +2800,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2800 2800
2801 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { 2801 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
2802 2802
2803 if (device->connection->agreed_pro_version < 91) 2803 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2804 return -1091; 2804 return -1091;
2805 2805
2806 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && 2806 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2833,7 +2833,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2833 case 1: /* self_pri && !peer_pri */ return 1; 2833 case 1: /* self_pri && !peer_pri */ return 1;
2834 case 2: /* !self_pri && peer_pri */ return -1; 2834 case 2: /* !self_pri && peer_pri */ return -1;
2835 case 3: /* self_pri && peer_pri */ 2835 case 3: /* self_pri && peer_pri */
2836 dc = test_bit(RESOLVE_CONFLICTS, &device->connection->flags); 2836 dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2837 return dc ? -1 : 1; 2837 return dc ? -1 : 1;
2838 } 2838 }
2839 } 2839 }
@@ -2846,14 +2846,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2846 *rule_nr = 51; 2846 *rule_nr = 51;
2847 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); 2847 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
2848 if (self == peer) { 2848 if (self == peer) {
2849 if (device->connection->agreed_pro_version < 96 ? 2849 if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2850 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 2850 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2851 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 2851 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2852 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { 2852 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
2853 /* The last P_SYNC_UUID did not get though. Undo the last start of 2853 /* The last P_SYNC_UUID did not get though. Undo the last start of
2854 resync as sync source modifications of the peer's UUIDs. */ 2854 resync as sync source modifications of the peer's UUIDs. */
2855 2855
2856 if (device->connection->agreed_pro_version < 91) 2856 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2857 return -1091; 2857 return -1091;
2858 2858
2859 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; 2859 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
@@ -2883,14 +2883,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2883 *rule_nr = 71; 2883 *rule_nr = 71;
2884 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2884 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2885 if (self == peer) { 2885 if (self == peer) {
2886 if (device->connection->agreed_pro_version < 96 ? 2886 if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2887 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 2887 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2888 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 2888 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2889 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 2889 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2890 /* The last P_SYNC_UUID did not get though. Undo the last start of 2890 /* The last P_SYNC_UUID did not get though. Undo the last start of
2891 resync as sync source modifications of our UUIDs. */ 2891 resync as sync source modifications of our UUIDs. */
2892 2892
2893 if (device->connection->agreed_pro_version < 91) 2893 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2894 return -1091; 2894 return -1091;
2895 2895
2896 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); 2896 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
@@ -2982,7 +2982,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
2982 drbd_khelper(device, "initial-split-brain"); 2982 drbd_khelper(device, "initial-split-brain");
2983 2983
2984 rcu_read_lock(); 2984 rcu_read_lock();
2985 nc = rcu_dereference(device->connection->net_conf); 2985 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2986 2986
2987 if (hg == 100 || (hg == -100 && nc->always_asbp)) { 2987 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2988 int pcount = (device->state.role == R_PRIMARY) 2988 int pcount = (device->state.role == R_PRIMARY)
@@ -3057,7 +3057,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3057 } 3057 }
3058 } 3058 }
3059 3059
3060 if (tentative || test_bit(CONN_DRY_RUN, &device->connection->flags)) { 3060 if (tentative || test_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags)) {
3061 if (hg == 0) 3061 if (hg == 0)
3062 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 3062 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3063 else 3063 else
@@ -3361,17 +3361,17 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3361 p = pi->data; 3361 p = pi->data;
3362 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 3362 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3363 3363
3364 err = drbd_recv_all(device->connection, p, header_size); 3364 err = drbd_recv_all(first_peer_device(device)->connection, p, header_size);
3365 if (err) 3365 if (err)
3366 return err; 3366 return err;
3367 3367
3368 mutex_lock(&device->connection->conf_update); 3368 mutex_lock(&first_peer_device(device)->connection->conf_update);
3369 old_net_conf = device->connection->net_conf; 3369 old_net_conf = first_peer_device(device)->connection->net_conf;
3370 if (get_ldev(device)) { 3370 if (get_ldev(device)) {
3371 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3371 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3372 if (!new_disk_conf) { 3372 if (!new_disk_conf) {
3373 put_ldev(device); 3373 put_ldev(device);
3374 mutex_unlock(&device->connection->conf_update); 3374 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3375 dev_err(DEV, "Allocation of new disk_conf failed\n"); 3375 dev_err(DEV, "Allocation of new disk_conf failed\n");
3376 return -ENOMEM; 3376 return -ENOMEM;
3377 } 3377 }
@@ -3392,7 +3392,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3392 goto reconnect; 3392 goto reconnect;
3393 } 3393 }
3394 3394
3395 err = drbd_recv_all(device->connection, p->verify_alg, data_size); 3395 err = drbd_recv_all(first_peer_device(device)->connection, p->verify_alg, data_size);
3396 if (err) 3396 if (err)
3397 goto reconnect; 3397 goto reconnect;
3398 /* we expect NUL terminated string */ 3398 /* we expect NUL terminated string */
@@ -3466,15 +3466,15 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3466 if (verify_tfm) { 3466 if (verify_tfm) {
3467 strcpy(new_net_conf->verify_alg, p->verify_alg); 3467 strcpy(new_net_conf->verify_alg, p->verify_alg);
3468 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3468 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3469 crypto_free_hash(device->connection->verify_tfm); 3469 crypto_free_hash(first_peer_device(device)->connection->verify_tfm);
3470 device->connection->verify_tfm = verify_tfm; 3470 first_peer_device(device)->connection->verify_tfm = verify_tfm;
3471 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 3471 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3472 } 3472 }
3473 if (csums_tfm) { 3473 if (csums_tfm) {
3474 strcpy(new_net_conf->csums_alg, p->csums_alg); 3474 strcpy(new_net_conf->csums_alg, p->csums_alg);
3475 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3475 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3476 crypto_free_hash(device->connection->csums_tfm); 3476 crypto_free_hash(first_peer_device(device)->connection->csums_tfm);
3477 device->connection->csums_tfm = csums_tfm; 3477 first_peer_device(device)->connection->csums_tfm = csums_tfm;
3478 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 3478 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3479 } 3479 }
3480 rcu_assign_pointer(connection->net_conf, new_net_conf); 3480 rcu_assign_pointer(connection->net_conf, new_net_conf);
@@ -3491,7 +3491,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3491 rcu_assign_pointer(device->rs_plan_s, new_plan); 3491 rcu_assign_pointer(device->rs_plan_s, new_plan);
3492 } 3492 }
3493 3493
3494 mutex_unlock(&device->connection->conf_update); 3494 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3495 synchronize_rcu(); 3495 synchronize_rcu();
3496 if (new_net_conf) 3496 if (new_net_conf)
3497 kfree(old_net_conf); 3497 kfree(old_net_conf);
@@ -3505,7 +3505,7 @@ reconnect:
3505 put_ldev(device); 3505 put_ldev(device);
3506 kfree(new_disk_conf); 3506 kfree(new_disk_conf);
3507 } 3507 }
3508 mutex_unlock(&device->connection->conf_update); 3508 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3509 return -EIO; 3509 return -EIO;
3510 3510
3511disconnect: 3511disconnect:
@@ -3514,13 +3514,13 @@ disconnect:
3514 put_ldev(device); 3514 put_ldev(device);
3515 kfree(new_disk_conf); 3515 kfree(new_disk_conf);
3516 } 3516 }
3517 mutex_unlock(&device->connection->conf_update); 3517 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3518 /* just for completeness: actually not needed, 3518 /* just for completeness: actually not needed,
3519 * as this is not reached if csums_tfm was ok. */ 3519 * as this is not reached if csums_tfm was ok. */
3520 crypto_free_hash(csums_tfm); 3520 crypto_free_hash(csums_tfm);
3521 /* but free the verify_tfm again, if csums_tfm did not work out */ 3521 /* but free the verify_tfm again, if csums_tfm did not work out */
3522 crypto_free_hash(verify_tfm); 3522 crypto_free_hash(verify_tfm);
3523 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3523 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3524 return -EIO; 3524 return -EIO;
3525} 3525}
3526 3526
@@ -3579,7 +3579,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3579 device->state.disk >= D_OUTDATED && 3579 device->state.disk >= D_OUTDATED &&
3580 device->state.conn < C_CONNECTED) { 3580 device->state.conn < C_CONNECTED) {
3581 dev_err(DEV, "The peer's disk size is too small!\n"); 3581 dev_err(DEV, "The peer's disk size is too small!\n");
3582 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3582 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3583 put_ldev(device); 3583 put_ldev(device);
3584 return -EIO; 3584 return -EIO;
3585 } 3585 }
@@ -3594,13 +3594,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3594 return -ENOMEM; 3594 return -ENOMEM;
3595 } 3595 }
3596 3596
3597 mutex_lock(&device->connection->conf_update); 3597 mutex_lock(&first_peer_device(device)->connection->conf_update);
3598 old_disk_conf = device->ldev->disk_conf; 3598 old_disk_conf = device->ldev->disk_conf;
3599 *new_disk_conf = *old_disk_conf; 3599 *new_disk_conf = *old_disk_conf;
3600 new_disk_conf->disk_size = p_usize; 3600 new_disk_conf->disk_size = p_usize;
3601 3601
3602 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 3602 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3603 mutex_unlock(&device->connection->conf_update); 3603 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3604 synchronize_rcu(); 3604 synchronize_rcu();
3605 kfree(old_disk_conf); 3605 kfree(old_disk_conf);
3606 3606
@@ -3687,14 +3687,14 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
3687 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3687 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3688 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3688 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3689 (unsigned long long)device->ed_uuid); 3689 (unsigned long long)device->ed_uuid);
3690 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3690 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3691 return -EIO; 3691 return -EIO;
3692 } 3692 }
3693 3693
3694 if (get_ldev(device)) { 3694 if (get_ldev(device)) {
3695 int skip_initial_sync = 3695 int skip_initial_sync =
3696 device->state.conn == C_CONNECTED && 3696 device->state.conn == C_CONNECTED &&
3697 device->connection->agreed_pro_version >= 90 && 3697 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3698 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3698 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3699 (p_uuid[UI_FLAGS] & 8); 3699 (p_uuid[UI_FLAGS] & 8);
3700 if (skip_initial_sync) { 3700 if (skip_initial_sync) {
@@ -3777,7 +3777,7 @@ static int receive_req_state(struct drbd_connection *connection, struct packet_i
3777 mask.i = be32_to_cpu(p->mask); 3777 mask.i = be32_to_cpu(p->mask);
3778 val.i = be32_to_cpu(p->val); 3778 val.i = be32_to_cpu(p->val);
3779 3779
3780 if (test_bit(RESOLVE_CONFLICTS, &device->connection->flags) && 3780 if (test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags) &&
3781 mutex_is_locked(device->state_mutex)) { 3781 mutex_is_locked(device->state_mutex)) {
3782 drbd_send_sr_reply(device, SS_CONCURRENT_ST_CHG); 3782 drbd_send_sr_reply(device, SS_CONCURRENT_ST_CHG);
3783 return 0; 3783 return 0;
@@ -3839,10 +3839,10 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3839 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3839 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3840 } 3840 }
3841 3841
3842 spin_lock_irq(&device->connection->req_lock); 3842 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3843 retry: 3843 retry:
3844 os = ns = drbd_read_state(device); 3844 os = ns = drbd_read_state(device);
3845 spin_unlock_irq(&device->connection->req_lock); 3845 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3846 3846
3847 /* If some other part of the code (asender thread, timeout) 3847 /* If some other part of the code (asender thread, timeout)
3848 * already decided to close the connection again, 3848 * already decided to close the connection again,
@@ -3936,16 +3936,16 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3936 peer_state.disk = D_DISKLESS; 3936 peer_state.disk = D_DISKLESS;
3937 real_peer_disk = D_DISKLESS; 3937 real_peer_disk = D_DISKLESS;
3938 } else { 3938 } else {
3939 if (test_and_clear_bit(CONN_DRY_RUN, &device->connection->flags)) 3939 if (test_and_clear_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags))
3940 return -EIO; 3940 return -EIO;
3941 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3941 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3942 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3942 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3943 return -EIO; 3943 return -EIO;
3944 } 3944 }
3945 } 3945 }
3946 } 3946 }
3947 3947
3948 spin_lock_irq(&device->connection->req_lock); 3948 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3949 if (os.i != drbd_read_state(device).i) 3949 if (os.i != drbd_read_state(device).i)
3950 goto retry; 3950 goto retry;
3951 clear_bit(CONSIDER_RESYNC, &device->flags); 3951 clear_bit(CONSIDER_RESYNC, &device->flags);
@@ -3959,20 +3959,20 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3959 test_bit(NEW_CUR_UUID, &device->flags)) { 3959 test_bit(NEW_CUR_UUID, &device->flags)) {
3960 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3960 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3961 for temporal network outages! */ 3961 for temporal network outages! */
3962 spin_unlock_irq(&device->connection->req_lock); 3962 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3963 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3963 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3964 tl_clear(device->connection); 3964 tl_clear(first_peer_device(device)->connection);
3965 drbd_uuid_new_current(device); 3965 drbd_uuid_new_current(device);
3966 clear_bit(NEW_CUR_UUID, &device->flags); 3966 clear_bit(NEW_CUR_UUID, &device->flags);
3967 conn_request_state(device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); 3967 conn_request_state(first_peer_device(device)->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3968 return -EIO; 3968 return -EIO;
3969 } 3969 }
3970 rv = _drbd_set_state(device, ns, cs_flags, NULL); 3970 rv = _drbd_set_state(device, ns, cs_flags, NULL);
3971 ns = drbd_read_state(device); 3971 ns = drbd_read_state(device);
3972 spin_unlock_irq(&device->connection->req_lock); 3972 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3973 3973
3974 if (rv < SS_SUCCESS) { 3974 if (rv < SS_SUCCESS) {
3975 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3975 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3976 return -EIO; 3976 return -EIO;
3977 } 3977 }
3978 3978
@@ -4038,7 +4038,7 @@ receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4038 unsigned long *p, struct bm_xfer_ctx *c) 4038 unsigned long *p, struct bm_xfer_ctx *c)
4039{ 4039{
4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - 4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4041 drbd_header_size(device->connection); 4041 drbd_header_size(first_peer_device(device)->connection);
4042 unsigned int num_words = min_t(size_t, data_size / sizeof(*p), 4042 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4043 c->bm_words - c->word_offset); 4043 c->bm_words - c->word_offset);
4044 unsigned int want = num_words * sizeof(*p); 4044 unsigned int want = num_words * sizeof(*p);
@@ -4050,7 +4050,7 @@ receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4050 } 4050 }
4051 if (want == 0) 4051 if (want == 0)
4052 return 0; 4052 return 0;
4053 err = drbd_recv_all(device->connection, p, want); 4053 err = drbd_recv_all(first_peer_device(device)->connection, p, want);
4054 if (err) 4054 if (err)
4055 return err; 4055 return err;
4056 4056
@@ -4168,7 +4168,7 @@ decode_bitmap_c(struct drbd_device *device,
4168 * during all our tests. */ 4168 * during all our tests. */
4169 4169
4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4171 conn_request_state(device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4171 conn_request_state(first_peer_device(device)->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4172 return -EIO; 4172 return -EIO;
4173} 4173}
4174 4174
@@ -4176,7 +4176,7 @@ void INFO_bm_xfer_stats(struct drbd_device *device,
4176 const char *direction, struct bm_xfer_ctx *c) 4176 const char *direction, struct bm_xfer_ctx *c)
4177{ 4177{
4178 /* what would it take to transfer it "plaintext" */ 4178 /* what would it take to transfer it "plaintext" */
4179 unsigned int header_size = drbd_header_size(device->connection); 4179 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181 unsigned int plain = 4181 unsigned int plain =
4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + 4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
@@ -4253,7 +4253,7 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4253 err = -EIO; 4253 err = -EIO;
4254 goto out; 4254 goto out;
4255 } 4255 }
4256 err = drbd_recv_all(device->connection, p, pi->size); 4256 err = drbd_recv_all(first_peer_device(device)->connection, p, pi->size);
4257 if (err) 4257 if (err)
4258 goto out; 4258 goto out;
4259 err = decode_bitmap_c(device, p, &c, pi->size); 4259 err = decode_bitmap_c(device, p, &c, pi->size);
@@ -4271,7 +4271,7 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4271 goto out; 4271 goto out;
4272 break; 4272 break;
4273 } 4273 }
4274 err = drbd_recv_header(device->connection, pi); 4274 err = drbd_recv_header(first_peer_device(device)->connection, pi);
4275 if (err) 4275 if (err)
4276 goto out; 4276 goto out;
4277 } 4277 }
@@ -4491,11 +4491,11 @@ static int drbd_disconnected(struct drbd_device *device)
4491 unsigned int i; 4491 unsigned int i;
4492 4492
4493 /* wait for current activity to cease. */ 4493 /* wait for current activity to cease. */
4494 spin_lock_irq(&device->connection->req_lock); 4494 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
4495 _drbd_wait_ee_list_empty(device, &device->active_ee); 4495 _drbd_wait_ee_list_empty(device, &device->active_ee);
4496 _drbd_wait_ee_list_empty(device, &device->sync_ee); 4496 _drbd_wait_ee_list_empty(device, &device->sync_ee);
4497 _drbd_wait_ee_list_empty(device, &device->read_ee); 4497 _drbd_wait_ee_list_empty(device, &device->read_ee);
4498 spin_unlock_irq(&device->connection->req_lock); 4498 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4499 4499
4500 /* We do not have data structures that would allow us to 4500 /* We do not have data structures that would allow us to
4501 * get the rs_pending_cnt down to 0 again. 4501 * get the rs_pending_cnt down to 0 again.
@@ -4536,7 +4536,7 @@ static int drbd_disconnected(struct drbd_device *device)
4536 device->p_uuid = NULL; 4536 device->p_uuid = NULL;
4537 4537
4538 if (!drbd_suspended(device)) 4538 if (!drbd_suspended(device))
4539 tl_clear(device->connection); 4539 tl_clear(first_peer_device(device)->connection);
4540 4540
4541 drbd_md_sync(device); 4541 drbd_md_sync(device);
4542 4542
@@ -4937,7 +4937,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
4937 if (!device) 4937 if (!device)
4938 return -EIO; 4938 return -EIO;
4939 4939
4940 D_ASSERT(device->connection->agreed_pro_version >= 89); 4940 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89);
4941 4941
4942 update_peer_seq(device, be32_to_cpu(p->seq_num)); 4942 update_peer_seq(device, be32_to_cpu(p->seq_num));
4943 4943
@@ -4962,14 +4962,14 @@ validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t secto
4962 struct drbd_request *req; 4962 struct drbd_request *req;
4963 struct bio_and_error m; 4963 struct bio_and_error m;
4964 4964
4965 spin_lock_irq(&device->connection->req_lock); 4965 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
4966 req = find_request(device, root, id, sector, missing_ok, func); 4966 req = find_request(device, root, id, sector, missing_ok, func);
4967 if (unlikely(!req)) { 4967 if (unlikely(!req)) {
4968 spin_unlock_irq(&device->connection->req_lock); 4968 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4969 return -EIO; 4969 return -EIO;
4970 } 4970 }
4971 __req_mod(req, what, &m); 4971 __req_mod(req, what, &m);
4972 spin_unlock_irq(&device->connection->req_lock); 4972 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4973 4973
4974 if (m.bio) 4974 if (m.bio)
4975 complete_master_bio(device, &m); 4975 complete_master_bio(device, &m);
@@ -5169,7 +5169,7 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
5169 if (w) { 5169 if (w) {
5170 w->cb = w_ov_finished; 5170 w->cb = w_ov_finished;
5171 w->device = device; 5171 w->device = device;
5172 drbd_queue_work(&device->connection->sender_work, w); 5172 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w);
5173 } else { 5173 } else {
5174 dev_err(DEV, "kmalloc(w) failed."); 5174 dev_err(DEV, "kmalloc(w) failed.");
5175 ov_out_of_sync_print(device); 5175 ov_out_of_sync_print(device);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a33a35e4655d..dd1033472763 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -274,8 +274,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
274 * and reset the transfer log epoch write_cnt. 274 * and reset the transfer log epoch write_cnt.
275 */ 275 */
276 if (rw == WRITE && 276 if (rw == WRITE &&
277 req->epoch == atomic_read(&device->connection->current_tle_nr)) 277 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
278 start_new_tl_epoch(device->connection); 278 start_new_tl_epoch(first_peer_device(device)->connection);
279 279
280 /* Update disk stats */ 280 /* Update disk stats */
281 _drbd_end_io_acct(device, req); 281 _drbd_end_io_acct(device, req);
@@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
477 * and from w_read_retry_remote */ 477 * and from w_read_retry_remote */
478 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 478 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
479 rcu_read_lock(); 479 rcu_read_lock();
480 nc = rcu_dereference(device->connection->net_conf); 480 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
481 p = nc->wire_protocol; 481 p = nc->wire_protocol;
482 rcu_read_unlock(); 482 rcu_read_unlock();
483 req->rq_state |= 483 req->rq_state |=
@@ -542,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
542 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); 542 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
543 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 543 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
544 req->w.cb = w_send_read_req; 544 req->w.cb = w_send_read_req;
545 drbd_queue_work(&device->connection->sender_work, &req->w); 545 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
546 break; 546 break;
547 547
548 case QUEUE_FOR_NET_WRITE: 548 case QUEUE_FOR_NET_WRITE:
@@ -577,22 +577,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
577 D_ASSERT(req->rq_state & RQ_NET_PENDING); 577 D_ASSERT(req->rq_state & RQ_NET_PENDING);
578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); 578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
579 req->w.cb = w_send_dblock; 579 req->w.cb = w_send_dblock;
580 drbd_queue_work(&device->connection->sender_work, &req->w); 580 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
581 581
582 /* close the epoch, in case it outgrew the limit */ 582 /* close the epoch, in case it outgrew the limit */
583 rcu_read_lock(); 583 rcu_read_lock();
584 nc = rcu_dereference(device->connection->net_conf); 584 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
585 p = nc->max_epoch_size; 585 p = nc->max_epoch_size;
586 rcu_read_unlock(); 586 rcu_read_unlock();
587 if (device->connection->current_tle_writes >= p) 587 if (first_peer_device(device)->connection->current_tle_writes >= p)
588 start_new_tl_epoch(device->connection); 588 start_new_tl_epoch(first_peer_device(device)->connection);
589 589
590 break; 590 break;
591 591
592 case QUEUE_FOR_SEND_OOS: 592 case QUEUE_FOR_SEND_OOS:
593 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 593 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
594 req->w.cb = w_send_out_of_sync; 594 req->w.cb = w_send_out_of_sync;
595 drbd_queue_work(&device->connection->sender_work, &req->w); 595 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
596 break; 596 break;
597 597
598 case READ_RETRY_REMOTE_CANCELED: 598 case READ_RETRY_REMOTE_CANCELED:
@@ -704,7 +704,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
704 704
705 get_ldev(device); /* always succeeds in this call path */ 705 get_ldev(device); /* always succeeds in this call path */
706 req->w.cb = w_restart_disk_io; 706 req->w.cb = w_restart_disk_io;
707 drbd_queue_work(&device->connection->sender_work, &req->w); 707 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
708 break; 708 break;
709 709
710 case RESEND: 710 case RESEND:
@@ -725,7 +725,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
725 725
726 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); 726 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
727 if (req->w.cb) { 727 if (req->w.cb) {
728 drbd_queue_work(&device->connection->sender_work, &req->w); 728 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
729 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; 729 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
730 } /* else: FIXME can this happen? */ 730 } /* else: FIXME can this happen? */
731 break; 731 break;
@@ -757,7 +757,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
757 break; 757 break;
758 758
759 case QUEUE_AS_DRBD_BARRIER: 759 case QUEUE_AS_DRBD_BARRIER:
760 start_new_tl_epoch(device->connection); 760 start_new_tl_epoch(first_peer_device(device)->connection);
761 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); 761 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
762 break; 762 break;
763 }; 763 };
@@ -851,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
851 break; 851 break;
852 /* Indicate to wake up device->misc_wait on progress. */ 852 /* Indicate to wake up device->misc_wait on progress. */
853 i->waiting = true; 853 i->waiting = true;
854 spin_unlock_irq(&device->connection->req_lock); 854 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
855 schedule(); 855 schedule();
856 spin_lock_irq(&device->connection->req_lock); 856 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
857 } 857 }
858 finish_wait(&device->misc_wait, &wait); 858 finish_wait(&device->misc_wait, &wait);
859} 859}
@@ -861,7 +861,7 @@ static void complete_conflicting_writes(struct drbd_request *req)
861/* called within req_lock and rcu_read_lock() */ 861/* called within req_lock and rcu_read_lock() */
862static void maybe_pull_ahead(struct drbd_device *device) 862static void maybe_pull_ahead(struct drbd_device *device)
863{ 863{
864 struct drbd_connection *connection = device->connection; 864 struct drbd_connection *connection = first_peer_device(device)->connection;
865 struct net_conf *nc; 865 struct net_conf *nc;
866 bool congested = false; 866 bool congested = false;
867 enum drbd_on_congestion on_congestion; 867 enum drbd_on_congestion on_congestion;
@@ -894,7 +894,7 @@ static void maybe_pull_ahead(struct drbd_device *device)
894 894
895 if (congested) { 895 if (congested) {
896 /* start a new epoch for non-mirrored writes */ 896 /* start a new epoch for non-mirrored writes */
897 start_new_tl_epoch(device->connection); 897 start_new_tl_epoch(first_peer_device(device)->connection);
898 898
899 if (on_congestion == OC_PULL_AHEAD) 899 if (on_congestion == OC_PULL_AHEAD)
900 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); 900 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
@@ -1078,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1078 struct bio_and_error m = { NULL, }; 1078 struct bio_and_error m = { NULL, };
1079 bool no_remote = false; 1079 bool no_remote = false;
1080 1080
1081 spin_lock_irq(&device->connection->req_lock); 1081 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1082 if (rw == WRITE) { 1082 if (rw == WRITE) {
1083 /* This may temporarily give up the req_lock, 1083 /* This may temporarily give up the req_lock,
1084 * but will re-aquire it before it returns here. 1084 * but will re-aquire it before it returns here.
@@ -1112,15 +1112,15 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1112 } 1112 }
1113 1113
1114 /* which transfer log epoch does this belong to? */ 1114 /* which transfer log epoch does this belong to? */
1115 req->epoch = atomic_read(&device->connection->current_tle_nr); 1115 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1116 1116
1117 /* no point in adding empty flushes to the transfer log, 1117 /* no point in adding empty flushes to the transfer log,
1118 * they are mapped to drbd barriers already. */ 1118 * they are mapped to drbd barriers already. */
1119 if (likely(req->i.size!=0)) { 1119 if (likely(req->i.size!=0)) {
1120 if (rw == WRITE) 1120 if (rw == WRITE)
1121 device->connection->current_tle_writes++; 1121 first_peer_device(device)->connection->current_tle_writes++;
1122 1122
1123 list_add_tail(&req->tl_requests, &device->connection->transfer_log); 1123 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1124 } 1124 }
1125 1125
1126 if (rw == WRITE) { 1126 if (rw == WRITE) {
@@ -1140,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1140 /* needs to be marked within the same spinlock */ 1140 /* needs to be marked within the same spinlock */
1141 _req_mod(req, TO_BE_SUBMITTED); 1141 _req_mod(req, TO_BE_SUBMITTED);
1142 /* but we need to give up the spinlock to submit */ 1142 /* but we need to give up the spinlock to submit */
1143 spin_unlock_irq(&device->connection->req_lock); 1143 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1144 drbd_submit_req_private_bio(req); 1144 drbd_submit_req_private_bio(req);
1145 spin_lock_irq(&device->connection->req_lock); 1145 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1146 } else if (no_remote) { 1146 } else if (no_remote) {
1147nodata: 1147nodata:
1148 if (__ratelimit(&drbd_ratelimit_state)) 1148 if (__ratelimit(&drbd_ratelimit_state))
@@ -1155,7 +1155,7 @@ nodata:
1155out: 1155out:
1156 if (drbd_req_put_completion_ref(req, &m, 1)) 1156 if (drbd_req_put_completion_ref(req, &m, 1))
1157 kref_put(&req->kref, drbd_req_destroy); 1157 kref_put(&req->kref, drbd_req_destroy);
1158 spin_unlock_irq(&device->connection->req_lock); 1158 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1159 1159
1160 if (m.bio) 1160 if (m.bio)
1161 complete_master_bio(device, &m); 1161 complete_master_bio(device, &m);
@@ -1336,7 +1336,7 @@ static struct drbd_request *find_oldest_request(struct drbd_connection *connecti
1336void request_timer_fn(unsigned long data) 1336void request_timer_fn(unsigned long data)
1337{ 1337{
1338 struct drbd_device *device = (struct drbd_device *) data; 1338 struct drbd_device *device = (struct drbd_device *) data;
1339 struct drbd_connection *connection = device->connection; 1339 struct drbd_connection *connection = first_peer_device(device)->connection;
1340 struct drbd_request *req; /* oldest request */ 1340 struct drbd_request *req; /* oldest request */
1341 struct net_conf *nc; 1341 struct net_conf *nc;
1342 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ 1342 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 407404bb8807..27283e619a07 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
318 struct bio_and_error m; 318 struct bio_and_error m;
319 int rv; 319 int rv;
320 320
321 spin_lock_irqsave(&device->connection->req_lock, flags); 321 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
322 rv = __req_mod(req, what, &m); 322 rv = __req_mod(req, what, &m);
323 spin_unlock_irqrestore(&device->connection->req_lock, flags); 323 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
324 324
325 if (m.bio) 325 if (m.bio)
326 complete_master_bio(device, &m); 326 complete_master_bio(device, &m);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index ecc63cf85d85..22c4e7d57a80 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -237,10 +237,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
237 union drbd_state ns; 237 union drbd_state ns;
238 enum drbd_state_rv rv; 238 enum drbd_state_rv rv;
239 239
240 spin_lock_irqsave(&device->connection->req_lock, flags); 240 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
241 ns = apply_mask_val(drbd_read_state(device), mask, val); 241 ns = apply_mask_val(drbd_read_state(device), mask, val);
242 rv = _drbd_set_state(device, ns, f, NULL); 242 rv = _drbd_set_state(device, ns, f, NULL);
243 spin_unlock_irqrestore(&device->connection->req_lock, flags); 243 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
244 244
245 return rv; 245 return rv;
246} 246}
@@ -271,7 +271,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
271 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) 271 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
272 return SS_CW_FAILED_BY_PEER; 272 return SS_CW_FAILED_BY_PEER;
273 273
274 spin_lock_irqsave(&device->connection->req_lock, flags); 274 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
275 os = drbd_read_state(device); 275 os = drbd_read_state(device);
276 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 276 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
277 rv = is_valid_transition(os, ns); 277 rv = is_valid_transition(os, ns);
@@ -283,12 +283,12 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
283 if (rv == SS_UNKNOWN_ERROR) { 283 if (rv == SS_UNKNOWN_ERROR) {
284 rv = is_valid_state(device, ns); 284 rv = is_valid_state(device, ns);
285 if (rv >= SS_SUCCESS) { 285 if (rv >= SS_SUCCESS) {
286 rv = is_valid_soft_transition(os, ns, device->connection); 286 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
287 if (rv >= SS_SUCCESS) 287 if (rv >= SS_SUCCESS)
288 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 288 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
289 } 289 }
290 } 290 }
291 spin_unlock_irqrestore(&device->connection->req_lock, flags); 291 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
292 292
293 return rv; 293 return rv;
294} 294}
@@ -317,20 +317,20 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
317 if (f & CS_SERIALIZE) 317 if (f & CS_SERIALIZE)
318 mutex_lock(device->state_mutex); 318 mutex_lock(device->state_mutex);
319 319
320 spin_lock_irqsave(&device->connection->req_lock, flags); 320 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
321 os = drbd_read_state(device); 321 os = drbd_read_state(device);
322 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 322 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
323 rv = is_valid_transition(os, ns); 323 rv = is_valid_transition(os, ns);
324 if (rv < SS_SUCCESS) { 324 if (rv < SS_SUCCESS) {
325 spin_unlock_irqrestore(&device->connection->req_lock, flags); 325 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
326 goto abort; 326 goto abort;
327 } 327 }
328 328
329 if (cl_wide_st_chg(device, os, ns)) { 329 if (cl_wide_st_chg(device, os, ns)) {
330 rv = is_valid_state(device, ns); 330 rv = is_valid_state(device, ns);
331 if (rv == SS_SUCCESS) 331 if (rv == SS_SUCCESS)
332 rv = is_valid_soft_transition(os, ns, device->connection); 332 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
333 spin_unlock_irqrestore(&device->connection->req_lock, flags); 333 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
334 334
335 if (rv < SS_SUCCESS) { 335 if (rv < SS_SUCCESS) {
336 if (f & CS_VERBOSE) 336 if (f & CS_VERBOSE)
@@ -353,17 +353,17 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
353 print_st_err(device, os, ns, rv); 353 print_st_err(device, os, ns, rv);
354 goto abort; 354 goto abort;
355 } 355 }
356 spin_lock_irqsave(&device->connection->req_lock, flags); 356 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
357 ns = apply_mask_val(drbd_read_state(device), mask, val); 357 ns = apply_mask_val(drbd_read_state(device), mask, val);
358 rv = _drbd_set_state(device, ns, f, &done); 358 rv = _drbd_set_state(device, ns, f, &done);
359 } else { 359 } else {
360 rv = _drbd_set_state(device, ns, f, &done); 360 rv = _drbd_set_state(device, ns, f, &done);
361 } 361 }
362 362
363 spin_unlock_irqrestore(&device->connection->req_lock, flags); 363 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
364 364
365 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 365 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
366 D_ASSERT(current != device->connection->worker.task); 366 D_ASSERT(current != first_peer_device(device)->connection->worker.task);
367 wait_for_completion(&done); 367 wait_for_completion(&done);
368 } 368 }
369 369
@@ -519,12 +519,12 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
519 put_ldev(device); 519 put_ldev(device);
520 } 520 }
521 521
522 nc = rcu_dereference(device->connection->net_conf); 522 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
523 if (nc) { 523 if (nc) {
524 if (!nc->two_primaries && ns.role == R_PRIMARY) { 524 if (!nc->two_primaries && ns.role == R_PRIMARY) {
525 if (ns.peer == R_PRIMARY) 525 if (ns.peer == R_PRIMARY)
526 rv = SS_TWO_PRIMARIES; 526 rv = SS_TWO_PRIMARIES;
527 else if (conn_highest_peer(device->connection) == R_PRIMARY) 527 else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY)
528 rv = SS_O_VOL_PEER_PRI; 528 rv = SS_O_VOL_PEER_PRI;
529 } 529 }
530 } 530 }
@@ -565,7 +565,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
565 rv = SS_NO_VERIFY_ALG; 565 rv = SS_NO_VERIFY_ALG;
566 566
567 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && 567 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
568 device->connection->agreed_pro_version < 88) 568 first_peer_device(device)->connection->agreed_pro_version < 88)
569 rv = SS_NOT_SUPPORTED; 569 rv = SS_NOT_SUPPORTED;
570 570
571 else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) 571 else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
@@ -871,7 +871,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
871 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) 871 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
872 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ 872 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
873 873
874 if (device->connection->res_opts.on_no_data == OND_SUSPEND_IO && 874 if (first_peer_device(device)->connection->res_opts.on_no_data == OND_SUSPEND_IO &&
875 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) 875 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
876 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ 876 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
877 877
@@ -899,7 +899,7 @@ void drbd_resume_al(struct drbd_device *device)
899/* helper for __drbd_set_state */ 899/* helper for __drbd_set_state */
900static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) 900static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
901{ 901{
902 if (device->connection->agreed_pro_version < 90) 902 if (first_peer_device(device)->connection->agreed_pro_version < 90)
903 device->ov_start_sector = 0; 903 device->ov_start_sector = 0;
904 device->rs_total = drbd_bm_bits(device); 904 device->rs_total = drbd_bm_bits(device);
905 device->ov_position = 0; 905 device->ov_position = 0;
@@ -962,9 +962,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
962 this happen...*/ 962 this happen...*/
963 963
964 if (is_valid_state(device, os) == rv) 964 if (is_valid_state(device, os) == rv)
965 rv = is_valid_soft_transition(os, ns, device->connection); 965 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
966 } else 966 } else
967 rv = is_valid_soft_transition(os, ns, device->connection); 967 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
968 } 968 }
969 969
970 if (rv < SS_SUCCESS) { 970 if (rv < SS_SUCCESS) {
@@ -981,7 +981,8 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
981 sanitize_state(). Only display it here if we where not called from 981 sanitize_state(). Only display it here if we where not called from
982 _conn_request_state() */ 982 _conn_request_state() */
983 if (!(flags & CS_DC_SUSP)) 983 if (!(flags & CS_DC_SUSP))
984 conn_pr_state_change(device->connection, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP); 984 conn_pr_state_change(first_peer_device(device)->connection, os, ns,
985 (flags & ~CS_DC_MASK) | CS_DC_SUSP);
985 986
986 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference 987 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
987 * on the ldev here, to be sure the transition -> D_DISKLESS resp. 988 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -994,25 +995,25 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
994 did_remote = drbd_should_do_remote(device->state); 995 did_remote = drbd_should_do_remote(device->state);
995 device->state.i = ns.i; 996 device->state.i = ns.i;
996 should_do_remote = drbd_should_do_remote(device->state); 997 should_do_remote = drbd_should_do_remote(device->state);
997 device->connection->susp = ns.susp; 998 first_peer_device(device)->connection->susp = ns.susp;
998 device->connection->susp_nod = ns.susp_nod; 999 first_peer_device(device)->connection->susp_nod = ns.susp_nod;
999 device->connection->susp_fen = ns.susp_fen; 1000 first_peer_device(device)->connection->susp_fen = ns.susp_fen;
1000 1001
1001 /* put replicated vs not-replicated requests in seperate epochs */ 1002 /* put replicated vs not-replicated requests in seperate epochs */
1002 if (did_remote != should_do_remote) 1003 if (did_remote != should_do_remote)
1003 start_new_tl_epoch(device->connection); 1004 start_new_tl_epoch(first_peer_device(device)->connection);
1004 1005
1005 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 1006 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1006 drbd_print_uuids(device, "attached to UUIDs"); 1007 drbd_print_uuids(device, "attached to UUIDs");
1007 1008
1008 /* Wake up role changes, that were delayed because of connection establishing */ 1009 /* Wake up role changes, that were delayed because of connection establishing */
1009 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && 1010 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
1010 no_peer_wf_report_params(device->connection)) 1011 no_peer_wf_report_params(first_peer_device(device)->connection))
1011 clear_bit(STATE_SENT, &device->connection->flags); 1012 clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags);
1012 1013
1013 wake_up(&device->misc_wait); 1014 wake_up(&device->misc_wait);
1014 wake_up(&device->state_wait); 1015 wake_up(&device->state_wait);
1015 wake_up(&device->connection->ping_wait); 1016 wake_up(&first_peer_device(device)->connection->ping_wait);
1016 1017
1017 /* Aborted verify run, or we reached the stop sector. 1018 /* Aborted verify run, or we reached the stop sector.
1018 * Log the last position, unless end-of-device. */ 1019 * Log the last position, unless end-of-device. */
@@ -1101,21 +1102,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1101 1102
1102 /* Receiver should clean up itself */ 1103 /* Receiver should clean up itself */
1103 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) 1104 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1104 drbd_thread_stop_nowait(&device->connection->receiver); 1105 drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
1105 1106
1106 /* Now the receiver finished cleaning up itself, it should die */ 1107 /* Now the receiver finished cleaning up itself, it should die */
1107 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) 1108 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1108 drbd_thread_stop_nowait(&device->connection->receiver); 1109 drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver);
1109 1110
1110 /* Upon network failure, we need to restart the receiver. */ 1111 /* Upon network failure, we need to restart the receiver. */
1111 if (os.conn > C_WF_CONNECTION && 1112 if (os.conn > C_WF_CONNECTION &&
1112 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) 1113 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1113 drbd_thread_restart_nowait(&device->connection->receiver); 1114 drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver);
1114 1115
1115 /* Resume AL writing if we get a connection */ 1116 /* Resume AL writing if we get a connection */
1116 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1117 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1117 drbd_resume_al(device); 1118 drbd_resume_al(device);
1118 device->connection->connect_cnt++; 1119 first_peer_device(device)->connection->connect_cnt++;
1119 } 1120 }
1120 1121
1121 /* remember last attach time so request_timer_fn() won't 1122 /* remember last attach time so request_timer_fn() won't
@@ -1133,7 +1134,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1133 ascw->w.cb = w_after_state_ch; 1134 ascw->w.cb = w_after_state_ch;
1134 ascw->w.device = device; 1135 ascw->w.device = device;
1135 ascw->done = done; 1136 ascw->done = done;
1136 drbd_queue_work(&device->connection->sender_work, &ascw->w); 1137 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &ascw->w);
1137 } else { 1138 } else {
1138 dev_err(DEV, "Could not kmalloc an ascw\n"); 1139 dev_err(DEV, "Could not kmalloc an ascw\n");
1139 } 1140 }
@@ -1181,7 +1182,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
1181{ 1182{
1182 int rv; 1183 int rv;
1183 1184
1184 D_ASSERT(current == device->connection->worker.task); 1185 D_ASSERT(current == first_peer_device(device)->connection->worker.task);
1185 1186
1186 /* open coded non-blocking drbd_suspend_io(device); */ 1187 /* open coded non-blocking drbd_suspend_io(device); */
1187 set_bit(SUSPEND_IO, &device->flags); 1188 set_bit(SUSPEND_IO, &device->flags);
@@ -1228,7 +1229,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1228 state change. This function might sleep */ 1229 state change. This function might sleep */
1229 1230
1230 if (ns.susp_nod) { 1231 if (ns.susp_nod) {
1231 struct drbd_connection *connection = device->connection; 1232 struct drbd_connection *connection = first_peer_device(device)->connection;
1232 enum drbd_req_event what = NOTHING; 1233 enum drbd_req_event what = NOTHING;
1233 1234
1234 spin_lock_irq(&connection->req_lock); 1235 spin_lock_irq(&connection->req_lock);
@@ -1250,7 +1251,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1250 } 1251 }
1251 1252
1252 if (ns.susp_fen) { 1253 if (ns.susp_fen) {
1253 struct drbd_connection *connection = device->connection; 1254 struct drbd_connection *connection = first_peer_device(device)->connection;
1254 1255
1255 spin_lock_irq(&connection->req_lock); 1256 spin_lock_irq(&connection->req_lock);
1256 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { 1257 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
@@ -1277,7 +1278,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1277 * which is unexpected. */ 1278 * which is unexpected. */
1278 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && 1279 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1279 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && 1280 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1280 device->connection->agreed_pro_version >= 96 && get_ldev(device)) { 1281 first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) {
1281 drbd_gen_and_send_sync_uuid(device); 1282 drbd_gen_and_send_sync_uuid(device);
1282 put_ldev(device); 1283 put_ldev(device);
1283 } 1284 }
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5b3f12a42230..aa1ad7f39786 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,16 +102,16 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
102 unsigned long flags = 0; 102 unsigned long flags = 0;
103 struct drbd_device *device = peer_req->w.device; 103 struct drbd_device *device = peer_req->w.device;
104 104
105 spin_lock_irqsave(&device->connection->req_lock, flags); 105 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
106 device->read_cnt += peer_req->i.size >> 9; 106 device->read_cnt += peer_req->i.size >> 9;
107 list_del(&peer_req->w.list); 107 list_del(&peer_req->w.list);
108 if (list_empty(&device->read_ee)) 108 if (list_empty(&device->read_ee))
109 wake_up(&device->ee_wait); 109 wake_up(&device->ee_wait);
110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
111 __drbd_chk_io_error(device, DRBD_READ_ERROR); 111 __drbd_chk_io_error(device, DRBD_READ_ERROR);
112 spin_unlock_irqrestore(&device->connection->req_lock, flags); 112 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
113 113
114 drbd_queue_work(&device->connection->sender_work, &peer_req->w); 114 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w);
115 put_ldev(device); 115 put_ldev(device);
116} 116}
117 117
@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; 134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
135 block_id = peer_req->block_id; 135 block_id = peer_req->block_id;
136 136
137 spin_lock_irqsave(&device->connection->req_lock, flags); 137 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
138 device->writ_cnt += peer_req->i.size >> 9; 138 device->writ_cnt += peer_req->i.size >> 9;
139 list_move_tail(&peer_req->w.list, &device->done_ee); 139 list_move_tail(&peer_req->w.list, &device->done_ee);
140 140
@@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
150 150
151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); 152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
153 spin_unlock_irqrestore(&device->connection->req_lock, flags); 153 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
154 154
155 if (block_id == ID_SYNCER) 155 if (block_id == ID_SYNCER)
156 drbd_rs_complete_io(device, i.sector); 156 drbd_rs_complete_io(device, i.sector);
@@ -161,7 +161,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
161 if (do_al_complete_io) 161 if (do_al_complete_io)
162 drbd_al_complete_io(device, &i); 162 drbd_al_complete_io(device, &i);
163 163
164 wake_asender(device->connection); 164 wake_asender(first_peer_device(device)->connection);
165 put_ldev(device); 165 put_ldev(device);
166} 166}
167 167
@@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
273 req->private_bio = ERR_PTR(error); 273 req->private_bio = ERR_PTR(error);
274 274
275 /* not req_mod(), we need irqsave here! */ 275 /* not req_mod(), we need irqsave here! */
276 spin_lock_irqsave(&device->connection->req_lock, flags); 276 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
277 __req_mod(req, what, &m); 277 __req_mod(req, what, &m);
278 spin_unlock_irqrestore(&device->connection->req_lock, flags); 278 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
279 put_ldev(device); 279 put_ldev(device);
280 280
281 if (m.bio) 281 if (m.bio)
@@ -345,12 +345,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
345 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) 345 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
346 goto out; 346 goto out;
347 347
348 digest_size = crypto_hash_digestsize(device->connection->csums_tfm); 348 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm);
349 digest = kmalloc(digest_size, GFP_NOIO); 349 digest = kmalloc(digest_size, GFP_NOIO);
350 if (digest) { 350 if (digest) {
351 sector_t sector = peer_req->i.sector; 351 sector_t sector = peer_req->i.sector;
352 unsigned int size = peer_req->i.size; 352 unsigned int size = peer_req->i.size;
353 drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest); 353 drbd_csum_ee(device, first_peer_device(device)->connection->csums_tfm, peer_req, digest);
354 /* Free peer_req and pages before send. 354 /* Free peer_req and pages before send.
355 * In case we block on congestion, we could otherwise run into 355 * In case we block on congestion, we could otherwise run into
356 * some distributed deadlock, if the other side blocks on 356 * some distributed deadlock, if the other side blocks on
@@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
397 goto defer; 397 goto defer;
398 398
399 peer_req->w.cb = w_e_send_csum; 399 peer_req->w.cb = w_e_send_csum;
400 spin_lock_irq(&device->connection->req_lock); 400 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
401 list_add(&peer_req->w.list, &device->read_ee); 401 list_add(&peer_req->w.list, &device->read_ee);
402 spin_unlock_irq(&device->connection->req_lock); 402 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
403 403
404 atomic_add(size >> 9, &device->rs_sect_ev); 404 atomic_add(size >> 9, &device->rs_sect_ev);
405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) 405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
@@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
409 * because bio_add_page failed (probably broken lower level driver), 409 * because bio_add_page failed (probably broken lower level driver),
410 * retry may or may not help. 410 * retry may or may not help.
411 * If it does not, you may need to force disconnect. */ 411 * If it does not, you may need to force disconnect. */
412 spin_lock_irq(&device->connection->req_lock); 412 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
413 list_del(&peer_req->w.list); 413 list_del(&peer_req->w.list);
414 spin_unlock_irq(&device->connection->req_lock); 414 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
415 415
416 drbd_free_peer_req(device, peer_req); 416 drbd_free_peer_req(device, peer_req);
417defer: 417defer:
@@ -439,7 +439,7 @@ void resync_timer_fn(unsigned long data)
439 struct drbd_device *device = (struct drbd_device *) data; 439 struct drbd_device *device = (struct drbd_device *) data;
440 440
441 if (list_empty(&device->resync_work.list)) 441 if (list_empty(&device->resync_work.list))
442 drbd_queue_work(&device->connection->sender_work, &device->resync_work); 442 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->resync_work);
443} 443}
444 444
445static void fifo_set(struct fifo_buffer *fb, int value) 445static void fifo_set(struct fifo_buffer *fb, int value)
@@ -597,15 +597,15 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
597 597
598 for (i = 0; i < number; i++) { 598 for (i = 0; i < number; i++) {
599 /* Stop generating RS requests, when half of the send buffer is filled */ 599 /* Stop generating RS requests, when half of the send buffer is filled */
600 mutex_lock(&device->connection->data.mutex); 600 mutex_lock(&first_peer_device(device)->connection->data.mutex);
601 if (device->connection->data.socket) { 601 if (first_peer_device(device)->connection->data.socket) {
602 queued = device->connection->data.socket->sk->sk_wmem_queued; 602 queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued;
603 sndbuf = device->connection->data.socket->sk->sk_sndbuf; 603 sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf;
604 } else { 604 } else {
605 queued = 1; 605 queued = 1;
606 sndbuf = 0; 606 sndbuf = 0;
607 } 607 }
608 mutex_unlock(&device->connection->data.mutex); 608 mutex_unlock(&first_peer_device(device)->connection->data.mutex);
609 if (queued > sndbuf / 2) 609 if (queued > sndbuf / 2)
610 goto requeue; 610 goto requeue;
611 611
@@ -675,7 +675,8 @@ next_sector:
675 /* adjust very last sectors, in case we are oddly sized */ 675 /* adjust very last sectors, in case we are oddly sized */
676 if (sector + (size>>9) > capacity) 676 if (sector + (size>>9) > capacity)
677 size = (capacity-sector)<<9; 677 size = (capacity-sector)<<9;
678 if (device->connection->agreed_pro_version >= 89 && device->connection->csums_tfm) { 678 if (first_peer_device(device)->connection->agreed_pro_version >= 89 &&
679 first_peer_device(device)->connection->csums_tfm) {
679 switch (read_for_csum(device, sector, size)) { 680 switch (read_for_csum(device, sector, size)) {
680 case -EIO: /* Disk failure */ 681 case -EIO: /* Disk failure */
681 put_ldev(device); 682 put_ldev(device);
@@ -800,7 +801,7 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
800 801
801static void ping_peer(struct drbd_device *device) 802static void ping_peer(struct drbd_device *device)
802{ 803{
803 struct drbd_connection *connection = device->connection; 804 struct drbd_connection *connection = first_peer_device(device)->connection;
804 805
805 clear_bit(GOT_PING_ACK, &connection->flags); 806 clear_bit(GOT_PING_ACK, &connection->flags);
806 request_ping(connection); 807 request_ping(connection);
@@ -831,7 +832,7 @@ int drbd_resync_finished(struct drbd_device *device)
831 if (w) { 832 if (w) {
832 w->cb = w_resync_finished; 833 w->cb = w_resync_finished;
833 w->device = device; 834 w->device = device;
834 drbd_queue_work(&device->connection->sender_work, w); 835 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w);
835 return 1; 836 return 1;
836 } 837 }
837 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); 838 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -854,7 +855,7 @@ int drbd_resync_finished(struct drbd_device *device)
854 855
855 ping_peer(device); 856 ping_peer(device);
856 857
857 spin_lock_irq(&device->connection->req_lock); 858 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
858 os = drbd_read_state(device); 859 os = drbd_read_state(device);
859 860
860 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 861 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -885,7 +886,7 @@ int drbd_resync_finished(struct drbd_device *device)
885 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) 886 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
886 khelper_cmd = "after-resync-target"; 887 khelper_cmd = "after-resync-target";
887 888
888 if (device->connection->csums_tfm && device->rs_total) { 889 if (first_peer_device(device)->connection->csums_tfm && device->rs_total) {
889 const unsigned long s = device->rs_same_csum; 890 const unsigned long s = device->rs_same_csum;
890 const unsigned long t = device->rs_total; 891 const unsigned long t = device->rs_total;
891 const int ratio = 892 const int ratio =
@@ -943,7 +944,7 @@ int drbd_resync_finished(struct drbd_device *device)
943 944
944 _drbd_set_state(device, ns, CS_VERBOSE, NULL); 945 _drbd_set_state(device, ns, CS_VERBOSE, NULL);
945out_unlock: 946out_unlock:
946 spin_unlock_irq(&device->connection->req_lock); 947 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
947 put_ldev(device); 948 put_ldev(device);
948out: 949out:
949 device->rs_total = 0; 950 device->rs_total = 0;
@@ -970,9 +971,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
970 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 971 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
971 atomic_add(i, &device->pp_in_use_by_net); 972 atomic_add(i, &device->pp_in_use_by_net);
972 atomic_sub(i, &device->pp_in_use); 973 atomic_sub(i, &device->pp_in_use);
973 spin_lock_irq(&device->connection->req_lock); 974 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
974 list_add_tail(&peer_req->w.list, &device->net_ee); 975 list_add_tail(&peer_req->w.list, &device->net_ee);
975 spin_unlock_irq(&device->connection->req_lock); 976 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
976 wake_up(&drbd_pp_wait); 977 wake_up(&drbd_pp_wait);
977 } else 978 } else
978 drbd_free_peer_req(device, peer_req); 979 drbd_free_peer_req(device, peer_req);
@@ -1096,13 +1097,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1096 /* quick hack to try to avoid a race against reconfiguration. 1097 /* quick hack to try to avoid a race against reconfiguration.
1097 * a real fix would be much more involved, 1098 * a real fix would be much more involved,
1098 * introducing more locking mechanisms */ 1099 * introducing more locking mechanisms */
1099 if (device->connection->csums_tfm) { 1100 if (first_peer_device(device)->connection->csums_tfm) {
1100 digest_size = crypto_hash_digestsize(device->connection->csums_tfm); 1101 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm);
1101 D_ASSERT(digest_size == di->digest_size); 1102 D_ASSERT(digest_size == di->digest_size);
1102 digest = kmalloc(digest_size, GFP_NOIO); 1103 digest = kmalloc(digest_size, GFP_NOIO);
1103 } 1104 }
1104 if (digest) { 1105 if (digest) {
1105 drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest); 1106 drbd_csum_ee(device, first_peer_device(device)->connection->csums_tfm, peer_req, digest);
1106 eq = !memcmp(digest, di->digest, digest_size); 1107 eq = !memcmp(digest, di->digest, digest_size);
1107 kfree(digest); 1108 kfree(digest);
1108 } 1109 }
@@ -1146,7 +1147,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1146 if (unlikely(cancel)) 1147 if (unlikely(cancel))
1147 goto out; 1148 goto out;
1148 1149
1149 digest_size = crypto_hash_digestsize(device->connection->verify_tfm); 1150 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm);
1150 digest = kmalloc(digest_size, GFP_NOIO); 1151 digest = kmalloc(digest_size, GFP_NOIO);
1151 if (!digest) { 1152 if (!digest) {
1152 err = 1; /* terminate the connection in case the allocation failed */ 1153 err = 1; /* terminate the connection in case the allocation failed */
@@ -1154,7 +1155,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1154 } 1155 }
1155 1156
1156 if (likely(!(peer_req->flags & EE_WAS_ERROR))) 1157 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1157 drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest); 1158 drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest);
1158 else 1159 else
1159 memset(digest, 0, digest_size); 1160 memset(digest, 0, digest_size);
1160 1161
@@ -1217,10 +1218,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1217 di = peer_req->digest; 1218 di = peer_req->digest;
1218 1219
1219 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1220 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1220 digest_size = crypto_hash_digestsize(device->connection->verify_tfm); 1221 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->verify_tfm);
1221 digest = kmalloc(digest_size, GFP_NOIO); 1222 digest = kmalloc(digest_size, GFP_NOIO);
1222 if (digest) { 1223 if (digest) {
1223 drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest); 1224 drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest);
1224 1225
1225 D_ASSERT(digest_size == di->digest_size); 1226 D_ASSERT(digest_size == di->digest_size);
1226 eq = !memcmp(digest, di->digest, digest_size); 1227 eq = !memcmp(digest, di->digest, digest_size);
@@ -1297,7 +1298,7 @@ int w_send_write_hint(struct drbd_work *w, int cancel)
1297 1298
1298 if (cancel) 1299 if (cancel)
1299 return 0; 1300 return 0;
1300 sock = &device->connection->data; 1301 sock = &first_peer_device(device)->connection->data;
1301 if (!drbd_prepare_command(device, sock)) 1302 if (!drbd_prepare_command(device, sock))
1302 return -EIO; 1303 return -EIO;
1303 return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0); 1304 return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
@@ -1328,7 +1329,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
1328{ 1329{
1329 struct drbd_request *req = container_of(w, struct drbd_request, w); 1330 struct drbd_request *req = container_of(w, struct drbd_request, w);
1330 struct drbd_device *device = w->device; 1331 struct drbd_device *device = w->device;
1331 struct drbd_connection *connection = device->connection; 1332 struct drbd_connection *connection = first_peer_device(device)->connection;
1332 int err; 1333 int err;
1333 1334
1334 if (unlikely(cancel)) { 1335 if (unlikely(cancel)) {
@@ -1358,7 +1359,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
1358{ 1359{
1359 struct drbd_request *req = container_of(w, struct drbd_request, w); 1360 struct drbd_request *req = container_of(w, struct drbd_request, w);
1360 struct drbd_device *device = w->device; 1361 struct drbd_device *device = w->device;
1361 struct drbd_connection *connection = device->connection; 1362 struct drbd_connection *connection = first_peer_device(device)->connection;
1362 int err; 1363 int err;
1363 1364
1364 if (unlikely(cancel)) { 1365 if (unlikely(cancel)) {
@@ -1386,7 +1387,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
1386{ 1387{
1387 struct drbd_request *req = container_of(w, struct drbd_request, w); 1388 struct drbd_request *req = container_of(w, struct drbd_request, w);
1388 struct drbd_device *device = w->device; 1389 struct drbd_device *device = w->device;
1389 struct drbd_connection *connection = device->connection; 1390 struct drbd_connection *connection = first_peer_device(device)->connection;
1390 int err; 1391 int err;
1391 1392
1392 if (unlikely(cancel)) { 1393 if (unlikely(cancel)) {
@@ -1581,7 +1582,7 @@ void start_resync_timer_fn(unsigned long data)
1581{ 1582{
1582 struct drbd_device *device = (struct drbd_device *) data; 1583 struct drbd_device *device = (struct drbd_device *) data;
1583 1584
1584 drbd_queue_work(&device->connection->sender_work, &device->start_resync_work); 1585 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->start_resync_work);
1585} 1586}
1586 1587
1587int w_start_resync(struct drbd_work *w, int cancel) 1588int w_start_resync(struct drbd_work *w, int cancel)
@@ -1628,7 +1629,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1628 if (r > 0) { 1629 if (r > 0) {
1629 dev_info(DEV, "before-resync-target handler returned %d, " 1630 dev_info(DEV, "before-resync-target handler returned %d, "
1630 "dropping connection.\n", r); 1631 "dropping connection.\n", r);
1631 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 1632 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
1632 return; 1633 return;
1633 } 1634 }
1634 } else /* C_SYNC_SOURCE */ { 1635 } else /* C_SYNC_SOURCE */ {
@@ -1641,14 +1642,15 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1641 } else { 1642 } else {
1642 dev_info(DEV, "before-resync-source handler returned %d, " 1643 dev_info(DEV, "before-resync-source handler returned %d, "
1643 "dropping connection.\n", r); 1644 "dropping connection.\n", r);
1644 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD); 1645 conn_request_state(first_peer_device(device)->connection,
1646 NS(conn, C_DISCONNECTING), CS_HARD);
1645 return; 1647 return;
1646 } 1648 }
1647 } 1649 }
1648 } 1650 }
1649 } 1651 }
1650 1652
1651 if (current == device->connection->worker.task) { 1653 if (current == first_peer_device(device)->connection->worker.task) {
1652 /* The worker should not sleep waiting for state_mutex, 1654 /* The worker should not sleep waiting for state_mutex,
1653 that can take long */ 1655 that can take long */
1654 if (!mutex_trylock(device->state_mutex)) { 1656 if (!mutex_trylock(device->state_mutex)) {
@@ -1727,10 +1729,12 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1727 * drbd_resync_finished from here in that case. 1729 * drbd_resync_finished from here in that case.
1728 * We drbd_gen_and_send_sync_uuid here for protocol < 96, 1730 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1729 * and from after_state_ch otherwise. */ 1731 * and from after_state_ch otherwise. */
1730 if (side == C_SYNC_SOURCE && device->connection->agreed_pro_version < 96) 1732 if (side == C_SYNC_SOURCE &&
1733 first_peer_device(device)->connection->agreed_pro_version < 96)
1731 drbd_gen_and_send_sync_uuid(device); 1734 drbd_gen_and_send_sync_uuid(device);
1732 1735
1733 if (device->connection->agreed_pro_version < 95 && device->rs_total == 0) { 1736 if (first_peer_device(device)->connection->agreed_pro_version < 95 &&
1737 device->rs_total == 0) {
1734 /* This still has a race (about when exactly the peers 1738 /* This still has a race (about when exactly the peers
1735 * detect connection loss) that can lead to a full sync 1739 * detect connection loss) that can lead to a full sync
1736 * on next handshake. In 8.3.9 we fixed this with explicit 1740 * on next handshake. In 8.3.9 we fixed this with explicit
@@ -1746,7 +1750,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1746 int timeo; 1750 int timeo;
1747 1751
1748 rcu_read_lock(); 1752 rcu_read_lock();
1749 nc = rcu_dereference(device->connection->net_conf); 1753 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
1750 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; 1754 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
1751 rcu_read_unlock(); 1755 rcu_read_unlock();
1752 schedule_timeout_interruptible(timeo); 1756 schedule_timeout_interruptible(timeo);