aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-05-30 10:32:41 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:44:47 -0500
commitbde89a9e151b482765ed40e04307a6190236b387 (patch)
tree1154a0261466fa426dede7cce2b9370d48133b61
parentb30ab7913b0a7b1d3b1091c8cb3abb1a9f1e0824 (diff)
drbd: Rename drbd_tconn -> drbd_connection
sed -i -e 's:all_tconn:connections:g' -e 's:tconn:connection:g' Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_bitmap.c8
-rw-r--r--drivers/block/drbd/drbd_int.h134
-rw-r--r--drivers/block/drbd/drbd_main.c646
-rw-r--r--drivers/block/drbd/drbd_nl.c519
-rw-r--r--drivers/block/drbd/drbd_proc.c6
-rw-r--r--drivers/block/drbd/drbd_receiver.c1020
-rw-r--r--drivers/block/drbd/drbd_req.c83
-rw-r--r--drivers/block/drbd/drbd_req.h10
-rw-r--r--drivers/block/drbd/drbd_state.c274
-rw-r--r--drivers/block/drbd/drbd_state.h20
-rw-r--r--drivers/block/drbd/drbd_worker.c166
12 files changed, 1447 insertions, 1447 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index b33836d72f3c..8b507455f71e 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -315,7 +315,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
315{ 315{
316 bool locked = false; 316 bool locked = false;
317 317
318 BUG_ON(delegate && current == device->tconn->worker.task); 318 BUG_ON(delegate && current == device->connection->worker.task);
319 319
320 /* Serialize multiple transactions. 320 /* Serialize multiple transactions.
321 * This uses test_and_set_bit, memory barrier is implicit. 321 * This uses test_and_set_bit, memory barrier is implicit.
@@ -354,7 +354,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
354 */ 354 */
355void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate) 355void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
356{ 356{
357 BUG_ON(delegate && current == device->tconn->worker.task); 357 BUG_ON(delegate && current == device->connection->worker.task);
358 358
359 if (drbd_al_begin_io_prepare(device, i)) 359 if (drbd_al_begin_io_prepare(device, i))
360 drbd_al_begin_io_commit(device, delegate); 360 drbd_al_begin_io_commit(device, delegate);
@@ -614,7 +614,7 @@ static int al_write_transaction(struct drbd_device *device, bool delegate)
614 init_completion(&al_work.event); 614 init_completion(&al_work.event);
615 al_work.w.cb = w_al_write_transaction; 615 al_work.w.cb = w_al_write_transaction;
616 al_work.w.device = device; 616 al_work.w.device = device;
617 drbd_queue_work_front(&device->tconn->sender_work, &al_work.w); 617 drbd_queue_work_front(&device->connection->sender_work, &al_work.w);
618 wait_for_completion(&al_work.event); 618 wait_for_completion(&al_work.event);
619 return al_work.err; 619 return al_work.err;
620 } else 620 } else
@@ -796,7 +796,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
796 udw->enr = ext->lce.lc_number; 796 udw->enr = ext->lce.lc_number;
797 udw->w.cb = w_update_odbm; 797 udw->w.cb = w_update_odbm;
798 udw->w.device = device; 798 udw->w.device = device;
799 drbd_queue_work_front(&device->tconn->sender_work, &udw->w); 799 drbd_queue_work_front(&device->connection->sender_work, &udw->w);
800 } else { 800 } else {
801 dev_warn(DEV, "Could not kmalloc an udw\n"); 801 dev_warn(DEV, "Could not kmalloc an udw\n");
802 } 802 }
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 001bf43dfc8f..cd3e0dea7a5d 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -119,9 +119,9 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func)
119 if (!__ratelimit(&drbd_ratelimit_state)) 119 if (!__ratelimit(&drbd_ratelimit_state))
120 return; 120 return;
121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122 drbd_task_to_thread_name(device->tconn, current), 122 drbd_task_to_thread_name(device->connection, current),
123 func, b->bm_why ?: "?", 123 func, b->bm_why ?: "?",
124 drbd_task_to_thread_name(device->tconn, b->bm_task)); 124 drbd_task_to_thread_name(device->connection, b->bm_task));
125} 125}
126 126
127void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) 127void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
@@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
138 138
139 if (trylock_failed) { 139 if (trylock_failed) {
140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141 drbd_task_to_thread_name(device->tconn, current), 141 drbd_task_to_thread_name(device->connection, current),
142 why, b->bm_why ?: "?", 142 why, b->bm_why ?: "?",
143 drbd_task_to_thread_name(device->tconn, b->bm_task)); 143 drbd_task_to_thread_name(device->connection, b->bm_task));
144 mutex_lock(&b->bm_change); 144 mutex_lock(&b->bm_change);
145 } 145 }
146 if (BM_LOCKED_MASK & b->bm_flags) 146 if (BM_LOCKED_MASK & b->bm_flags)
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b871c34f0107..32517a0cbc62 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -98,7 +98,7 @@ extern char usermode_helper[];
98#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 98#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
99 99
100struct drbd_device; 100struct drbd_device;
101struct drbd_tconn; 101struct drbd_connection;
102 102
103 103
104/* to shorten dev_warn(DEV, "msg"); and relatives statements */ 104/* to shorten dev_warn(DEV, "msg"); and relatives statements */
@@ -167,7 +167,7 @@ drbd_insert_fault(struct drbd_device *device, unsigned int type) {
167 167
168extern struct ratelimit_state drbd_ratelimit_state; 168extern struct ratelimit_state drbd_ratelimit_state;
169extern struct idr minors; /* RCU, updates: genl_lock() */ 169extern struct idr minors; /* RCU, updates: genl_lock() */
170extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */ 170extern struct list_head drbd_connections; /* RCU, updates: genl_lock() */
171 171
172extern const char *cmdname(enum drbd_packet cmd); 172extern const char *cmdname(enum drbd_packet cmd);
173 173
@@ -211,7 +211,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
211#endif 211#endif
212} 212}
213 213
214extern unsigned int drbd_header_size(struct drbd_tconn *tconn); 214extern unsigned int drbd_header_size(struct drbd_connection *connection);
215 215
216/**********************************************************************/ 216/**********************************************************************/
217enum drbd_thread_state { 217enum drbd_thread_state {
@@ -227,7 +227,7 @@ struct drbd_thread {
227 struct completion stop; 227 struct completion stop;
228 enum drbd_thread_state t_state; 228 enum drbd_thread_state t_state;
229 int (*function) (struct drbd_thread *); 229 int (*function) (struct drbd_thread *);
230 struct drbd_tconn *tconn; 230 struct drbd_connection *connection;
231 int reset_cpu_mask; 231 int reset_cpu_mask;
232 char name[9]; 232 char name[9];
233}; 233};
@@ -247,7 +247,7 @@ struct drbd_work {
247 int (*cb)(struct drbd_work *, int cancel); 247 int (*cb)(struct drbd_work *, int cancel);
248 union { 248 union {
249 struct drbd_device *device; 249 struct drbd_device *device;
250 struct drbd_tconn *tconn; 250 struct drbd_connection *connection;
251 }; 251 };
252}; 252};
253 253
@@ -289,7 +289,7 @@ struct drbd_request {
289}; 289};
290 290
291struct drbd_epoch { 291struct drbd_epoch {
292 struct drbd_tconn *tconn; 292 struct drbd_connection *connection;
293 struct list_head list; 293 struct list_head list;
294 unsigned int barrier_nr; 294 unsigned int barrier_nr;
295 atomic_t epoch_size; /* increased on every request added. */ 295 atomic_t epoch_size; /* increased on every request added. */
@@ -483,7 +483,7 @@ struct drbd_backing_dev {
483 struct block_device *backing_bdev; 483 struct block_device *backing_bdev;
484 struct block_device *md_bdev; 484 struct block_device *md_bdev;
485 struct drbd_md md; 485 struct drbd_md md;
486 struct disk_conf *disk_conf; /* RCU, for updates: device->tconn->conf_update */ 486 struct disk_conf *disk_conf; /* RCU, for updates: device->connection->conf_update */
487 sector_t known_size; /* last known size of that backing device */ 487 sector_t known_size; /* last known size of that backing device */
488}; 488};
489 489
@@ -514,7 +514,7 @@ struct fifo_buffer {
514}; 514};
515extern struct fifo_buffer *fifo_alloc(int fifo_size); 515extern struct fifo_buffer *fifo_alloc(int fifo_size);
516 516
517/* flag bits per tconn */ 517/* flag bits per connection */
518enum { 518enum {
519 NET_CONGESTED, /* The data socket is congested */ 519 NET_CONGESTED, /* The data socket is congested */
520 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ 520 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
@@ -536,11 +536,11 @@ enum {
536 DISCONNECT_SENT, 536 DISCONNECT_SENT,
537}; 537};
538 538
539struct drbd_tconn { /* is a resource from the config file */ 539struct drbd_connection { /* is a resource from the config file */
540 char *name; /* Resource name */ 540 char *name; /* Resource name */
541 struct list_head all_tconn; /* linked on global drbd_tconns */ 541 struct list_head connections; /* linked on global drbd_connections */
542 struct kref kref; 542 struct kref kref;
543 struct idr volumes; /* <tconn, vnr> to device mapping */ 543 struct idr volumes; /* <connection, vnr> to device mapping */
544 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ 544 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
545 unsigned susp:1; /* IO suspended by user */ 545 unsigned susp:1; /* IO suspended by user */
546 unsigned susp_nod:1; /* IO suspended because no data */ 546 unsigned susp_nod:1; /* IO suspended because no data */
@@ -570,7 +570,7 @@ struct drbd_tconn { /* is a resource from the config file */
570 struct list_head transfer_log; /* all requests not yet fully processed */ 570 struct list_head transfer_log; /* all requests not yet fully processed */
571 571
572 struct crypto_hash *cram_hmac_tfm; 572 struct crypto_hash *cram_hmac_tfm;
573 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */ 573 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
574 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ 574 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
575 struct crypto_hash *csums_tfm; 575 struct crypto_hash *csums_tfm;
576 struct crypto_hash *verify_tfm; 576 struct crypto_hash *verify_tfm;
@@ -618,7 +618,7 @@ struct submit_worker {
618}; 618};
619 619
620struct drbd_device { 620struct drbd_device {
621 struct drbd_tconn *tconn; 621 struct drbd_connection *connection;
622 int vnr; /* volume number within the connection */ 622 int vnr; /* volume number within the connection */
623 struct kref kref; 623 struct kref kref;
624 624
@@ -744,7 +744,7 @@ struct drbd_device {
744 struct bm_io_work bm_io_work; 744 struct bm_io_work bm_io_work;
745 u64 ed_uuid; /* UUID of the exposed data */ 745 u64 ed_uuid; /* UUID of the exposed data */
746 struct mutex own_state_mutex; 746 struct mutex own_state_mutex;
747 struct mutex *state_mutex; /* either own_state_mutex or device->tconn->cstate_mutex */ 747 struct mutex *state_mutex; /* either own_state_mutex or device->connection->cstate_mutex */
748 char congestion_reason; /* Why we where congested... */ 748 char congestion_reason; /* Why we where congested... */
749 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ 749 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
750 atomic_t rs_sect_ev; /* for submitted resync data rate, both */ 750 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
@@ -752,7 +752,7 @@ struct drbd_device {
752 int rs_last_events; /* counter of read or write "events" (unit sectors) 752 int rs_last_events; /* counter of read or write "events" (unit sectors)
753 * on the lower level device when we last looked. */ 753 * on the lower level device when we last looked. */
754 int c_sync_rate; /* current resync rate after syncer throttle magic */ 754 int c_sync_rate; /* current resync rate after syncer throttle magic */
755 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */ 755 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
756 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 756 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
757 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 757 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
758 unsigned int peer_max_bio_size; 758 unsigned int peer_max_bio_size;
@@ -773,9 +773,9 @@ static inline unsigned int device_to_minor(struct drbd_device *device)
773 return device->minor; 773 return device->minor;
774} 774}
775 775
776static inline struct drbd_device *vnr_to_device(struct drbd_tconn *tconn, int vnr) 776static inline struct drbd_device *vnr_to_device(struct drbd_connection *connection, int vnr)
777{ 777{
778 return (struct drbd_device *)idr_find(&tconn->volumes, vnr); 778 return (struct drbd_device *)idr_find(&connection->volumes, vnr);
779} 779}
780 780
781/* 781/*
@@ -792,25 +792,25 @@ enum dds_flags {
792extern void drbd_init_set_defaults(struct drbd_device *device); 792extern void drbd_init_set_defaults(struct drbd_device *device);
793extern int drbd_thread_start(struct drbd_thread *thi); 793extern int drbd_thread_start(struct drbd_thread *thi);
794extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 794extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
795extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task); 795extern char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task);
796#ifdef CONFIG_SMP 796#ifdef CONFIG_SMP
797extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); 797extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
798extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn); 798extern void drbd_calc_cpu_mask(struct drbd_connection *connection);
799#else 799#else
800#define drbd_thread_current_set_cpu(A) ({}) 800#define drbd_thread_current_set_cpu(A) ({})
801#define drbd_calc_cpu_mask(A) ({}) 801#define drbd_calc_cpu_mask(A) ({})
802#endif 802#endif
803extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr, 803extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
804 unsigned int set_size); 804 unsigned int set_size);
805extern void tl_clear(struct drbd_tconn *); 805extern void tl_clear(struct drbd_connection *);
806extern void drbd_free_sock(struct drbd_tconn *tconn); 806extern void drbd_free_sock(struct drbd_connection *connection);
807extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock, 807extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
808 void *buf, size_t size, unsigned msg_flags); 808 void *buf, size_t size, unsigned msg_flags);
809extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t, 809extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
810 unsigned); 810 unsigned);
811 811
812extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd); 812extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
813extern int drbd_send_protocol(struct drbd_tconn *tconn); 813extern int drbd_send_protocol(struct drbd_connection *connection);
814extern int drbd_send_uuids(struct drbd_device *device); 814extern int drbd_send_uuids(struct drbd_device *device);
815extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device); 815extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device);
816extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device); 816extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device);
@@ -818,7 +818,7 @@ extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum d
818extern int drbd_send_state(struct drbd_device *device, union drbd_state s); 818extern int drbd_send_state(struct drbd_device *device, union drbd_state s);
819extern int drbd_send_current_state(struct drbd_device *device); 819extern int drbd_send_current_state(struct drbd_device *device);
820extern int drbd_send_sync_param(struct drbd_device *device); 820extern int drbd_send_sync_param(struct drbd_device *device);
821extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, 821extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
822 u32 set_size); 822 u32 set_size);
823extern int drbd_send_ack(struct drbd_device *, enum drbd_packet, 823extern int drbd_send_ack(struct drbd_device *, enum drbd_packet,
824 struct drbd_peer_request *); 824 struct drbd_peer_request *);
@@ -841,12 +841,12 @@ extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int
841 841
842extern int drbd_send_bitmap(struct drbd_device *device); 842extern int drbd_send_bitmap(struct drbd_device *device);
843extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode); 843extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode);
844extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode); 844extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
845extern void drbd_free_bc(struct drbd_backing_dev *ldev); 845extern void drbd_free_bc(struct drbd_backing_dev *ldev);
846extern void drbd_device_cleanup(struct drbd_device *device); 846extern void drbd_device_cleanup(struct drbd_device *device);
847void drbd_print_uuids(struct drbd_device *device, const char *text); 847void drbd_print_uuids(struct drbd_device *device, const char *text);
848 848
849extern void conn_md_sync(struct drbd_tconn *tconn); 849extern void conn_md_sync(struct drbd_connection *connection);
850extern void drbd_md_write(struct drbd_device *device, void *buffer); 850extern void drbd_md_write(struct drbd_device *device, void *buffer);
851extern void drbd_md_sync(struct drbd_device *device); 851extern void drbd_md_sync(struct drbd_device *device);
852extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev); 852extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
@@ -1153,17 +1153,17 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1153 1153
1154extern rwlock_t global_state_lock; 1154extern rwlock_t global_state_lock;
1155 1155
1156extern int conn_lowest_minor(struct drbd_tconn *tconn); 1156extern int conn_lowest_minor(struct drbd_connection *connection);
1157enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr); 1157enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr);
1158extern void drbd_minor_destroy(struct kref *kref); 1158extern void drbd_minor_destroy(struct kref *kref);
1159 1159
1160extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts); 1160extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts);
1161extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts); 1161extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1162extern void conn_destroy(struct kref *kref); 1162extern void conn_destroy(struct kref *kref);
1163struct drbd_tconn *conn_get_by_name(const char *name); 1163struct drbd_connection *conn_get_by_name(const char *name);
1164extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, 1164extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1165 void *peer_addr, int peer_addr_len); 1165 void *peer_addr, int peer_addr_len);
1166extern void conn_free_crypto(struct drbd_tconn *tconn); 1166extern void conn_free_crypto(struct drbd_connection *connection);
1167 1167
1168extern int proc_details; 1168extern int proc_details;
1169 1169
@@ -1198,8 +1198,8 @@ extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
1198extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, 1198extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1199 enum drbd_role new_role, 1199 enum drbd_role new_role,
1200 int force); 1200 int force);
1201extern bool conn_try_outdate_peer(struct drbd_tconn *tconn); 1201extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1202extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn); 1202extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1203extern int drbd_khelper(struct drbd_device *device, char *cmd); 1203extern int drbd_khelper(struct drbd_device *device, char *cmd);
1204 1204
1205/* drbd_worker.c */ 1205/* drbd_worker.c */
@@ -1271,11 +1271,11 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request
1271extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool); 1271extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool);
1272extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); 1272extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1273extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); 1273extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1274extern void conn_flush_workqueue(struct drbd_tconn *tconn); 1274extern void conn_flush_workqueue(struct drbd_connection *connection);
1275extern int drbd_connected(struct drbd_device *device); 1275extern int drbd_connected(struct drbd_device *device);
1276static inline void drbd_flush_workqueue(struct drbd_device *device) 1276static inline void drbd_flush_workqueue(struct drbd_device *device)
1277{ 1277{
1278 conn_flush_workqueue(device->tconn); 1278 conn_flush_workqueue(device->connection);
1279} 1279}
1280 1280
1281/* Yes, there is kernel_setsockopt, but only since 2.6.18. 1281/* Yes, there is kernel_setsockopt, but only since 2.6.18.
@@ -1327,7 +1327,7 @@ static inline void drbd_tcp_quickack(struct socket *sock)
1327 (char*)&val, sizeof(val)); 1327 (char*)&val, sizeof(val));
1328} 1328}
1329 1329
1330void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo); 1330void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
1331 1331
1332/* drbd_proc.c */ 1332/* drbd_proc.c */
1333extern struct proc_dir_entry *drbd_proc; 1333extern struct proc_dir_entry *drbd_proc;
@@ -1421,9 +1421,9 @@ static inline union drbd_state drbd_read_state(struct drbd_device *device)
1421 union drbd_state rv; 1421 union drbd_state rv;
1422 1422
1423 rv.i = device->state.i; 1423 rv.i = device->state.i;
1424 rv.susp = device->tconn->susp; 1424 rv.susp = device->connection->susp;
1425 rv.susp_nod = device->tconn->susp_nod; 1425 rv.susp_nod = device->connection->susp_nod;
1426 rv.susp_fen = device->tconn->susp_fen; 1426 rv.susp_fen = device->connection->susp_fen;
1427 1427
1428 return rv; 1428 return rv;
1429} 1429}
@@ -1505,9 +1505,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
1505{ 1505{
1506 if (error) { 1506 if (error) {
1507 unsigned long flags; 1507 unsigned long flags;
1508 spin_lock_irqsave(&device->tconn->req_lock, flags); 1508 spin_lock_irqsave(&device->connection->req_lock, flags);
1509 __drbd_chk_io_error_(device, forcedetach, where); 1509 __drbd_chk_io_error_(device, forcedetach, where);
1510 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 1510 spin_unlock_irqrestore(&device->connection->req_lock, flags);
1511 } 1511 }
1512} 1512}
1513 1513
@@ -1630,31 +1630,31 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1630 wake_up(&q->q_wait); 1630 wake_up(&q->q_wait);
1631} 1631}
1632 1632
1633static inline void wake_asender(struct drbd_tconn *tconn) 1633static inline void wake_asender(struct drbd_connection *connection)
1634{ 1634{
1635 if (test_bit(SIGNAL_ASENDER, &tconn->flags)) 1635 if (test_bit(SIGNAL_ASENDER, &connection->flags))
1636 force_sig(DRBD_SIG, tconn->asender.task); 1636 force_sig(DRBD_SIG, connection->asender.task);
1637} 1637}
1638 1638
1639static inline void request_ping(struct drbd_tconn *tconn) 1639static inline void request_ping(struct drbd_connection *connection)
1640{ 1640{
1641 set_bit(SEND_PING, &tconn->flags); 1641 set_bit(SEND_PING, &connection->flags);
1642 wake_asender(tconn); 1642 wake_asender(connection);
1643} 1643}
1644 1644
1645extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *); 1645extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1646extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *); 1646extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *);
1647extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *, 1647extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1648 enum drbd_packet, unsigned int, void *, 1648 enum drbd_packet, unsigned int, void *,
1649 unsigned int); 1649 unsigned int);
1650extern int drbd_send_command(struct drbd_device *, struct drbd_socket *, 1650extern int drbd_send_command(struct drbd_device *, struct drbd_socket *,
1651 enum drbd_packet, unsigned int, void *, 1651 enum drbd_packet, unsigned int, void *,
1652 unsigned int); 1652 unsigned int);
1653 1653
1654extern int drbd_send_ping(struct drbd_tconn *tconn); 1654extern int drbd_send_ping(struct drbd_connection *connection);
1655extern int drbd_send_ping_ack(struct drbd_tconn *tconn); 1655extern int drbd_send_ping_ack(struct drbd_connection *connection);
1656extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state); 1656extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state);
1657extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state); 1657extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1658 1658
1659static inline void drbd_thread_stop(struct drbd_thread *thi) 1659static inline void drbd_thread_stop(struct drbd_thread *thi)
1660{ 1660{
@@ -1783,7 +1783,7 @@ static inline void put_ldev(struct drbd_device *device)
1783 if (device->state.disk == D_FAILED) { 1783 if (device->state.disk == D_FAILED) {
1784 /* all application IO references gone. */ 1784 /* all application IO references gone. */
1785 if (!test_and_set_bit(GO_DISKLESS, &device->flags)) 1785 if (!test_and_set_bit(GO_DISKLESS, &device->flags))
1786 drbd_queue_work(&device->tconn->sender_work, &device->go_diskless); 1786 drbd_queue_work(&device->connection->sender_work, &device->go_diskless);
1787 } 1787 }
1788 wake_up(&device->misc_wait); 1788 wake_up(&device->misc_wait);
1789 } 1789 }
@@ -1865,7 +1865,7 @@ static inline int drbd_get_max_buffers(struct drbd_device *device)
1865 int mxb; 1865 int mxb;
1866 1866
1867 rcu_read_lock(); 1867 rcu_read_lock();
1868 nc = rcu_dereference(device->tconn->net_conf); 1868 nc = rcu_dereference(device->connection->net_conf);
1869 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ 1869 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
1870 rcu_read_unlock(); 1870 rcu_read_unlock();
1871 1871
@@ -1908,7 +1908,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
1908 1908
1909 /* Allow IO in BM exchange states with new protocols */ 1909 /* Allow IO in BM exchange states with new protocols */
1910 case C_WF_BITMAP_S: 1910 case C_WF_BITMAP_S:
1911 if (device->tconn->agreed_pro_version < 96) 1911 if (device->connection->agreed_pro_version < 96)
1912 return 0; 1912 return 0;
1913 break; 1913 break;
1914 1914
@@ -1944,9 +1944,9 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
1944 1944
1945static inline int drbd_suspended(struct drbd_device *device) 1945static inline int drbd_suspended(struct drbd_device *device)
1946{ 1946{
1947 struct drbd_tconn *tconn = device->tconn; 1947 struct drbd_connection *connection = device->connection;
1948 1948
1949 return tconn->susp || tconn->susp_fen || tconn->susp_nod; 1949 return connection->susp || connection->susp_fen || connection->susp_nod;
1950} 1950}
1951 1951
1952static inline bool may_inc_ap_bio(struct drbd_device *device) 1952static inline bool may_inc_ap_bio(struct drbd_device *device)
@@ -1979,11 +1979,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
1979{ 1979{
1980 bool rv = false; 1980 bool rv = false;
1981 1981
1982 spin_lock_irq(&device->tconn->req_lock); 1982 spin_lock_irq(&device->connection->req_lock);
1983 rv = may_inc_ap_bio(device); 1983 rv = may_inc_ap_bio(device);
1984 if (rv) 1984 if (rv)
1985 atomic_inc(&device->ap_bio_cnt); 1985 atomic_inc(&device->ap_bio_cnt);
1986 spin_unlock_irq(&device->tconn->req_lock); 1986 spin_unlock_irq(&device->connection->req_lock);
1987 1987
1988 return rv; 1988 return rv;
1989} 1989}
@@ -2010,7 +2010,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
2010 2010
2011 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2011 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2012 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 2012 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2013 drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w); 2013 drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w);
2014 } 2014 }
2015 2015
2016 /* this currently does wake_up for every dec_ap_bio! 2016 /* this currently does wake_up for every dec_ap_bio!
@@ -2022,8 +2022,8 @@ static inline void dec_ap_bio(struct drbd_device *device)
2022 2022
2023static inline bool verify_can_do_stop_sector(struct drbd_device *device) 2023static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2024{ 2024{
2025 return device->tconn->agreed_pro_version >= 97 && 2025 return device->connection->agreed_pro_version >= 97 &&
2026 device->tconn->agreed_pro_version != 100; 2026 device->connection->agreed_pro_version != 100;
2027} 2027}
2028 2028
2029static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) 2029static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index cc3b451d465f..e4fd1806dc25 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -118,7 +118,7 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
118 * as member "struct gendisk *vdisk;" 118 * as member "struct gendisk *vdisk;"
119 */ 119 */
120struct idr minors; 120struct idr minors;
121struct list_head drbd_tconns; /* list of struct drbd_tconn */ 121struct list_head drbd_connections; /* list of struct drbd_connection */
122 122
123struct kmem_cache *drbd_request_cache; 123struct kmem_cache *drbd_request_cache;
124struct kmem_cache *drbd_ee_cache; /* peer requests */ 124struct kmem_cache *drbd_ee_cache; /* peer requests */
@@ -182,7 +182,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
182 182
183/** 183/**
184 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch 184 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
185 * @tconn: DRBD connection. 185 * @connection: DRBD connection.
186 * @barrier_nr: Expected identifier of the DRBD write barrier packet. 186 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
187 * @set_size: Expected number of requests before that barrier. 187 * @set_size: Expected number of requests before that barrier.
188 * 188 *
@@ -190,7 +190,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
190 * epoch of not yet barrier-acked requests, this function will cause a 190 * epoch of not yet barrier-acked requests, this function will cause a
191 * termination of the connection. 191 * termination of the connection.
192 */ 192 */
193void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, 193void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
194 unsigned int set_size) 194 unsigned int set_size)
195{ 195{
196 struct drbd_request *r; 196 struct drbd_request *r;
@@ -198,11 +198,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
198 int expect_epoch = 0; 198 int expect_epoch = 0;
199 int expect_size = 0; 199 int expect_size = 0;
200 200
201 spin_lock_irq(&tconn->req_lock); 201 spin_lock_irq(&connection->req_lock);
202 202
203 /* find oldest not yet barrier-acked write request, 203 /* find oldest not yet barrier-acked write request,
204 * count writes in its epoch. */ 204 * count writes in its epoch. */
205 list_for_each_entry(r, &tconn->transfer_log, tl_requests) { 205 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
206 const unsigned s = r->rq_state; 206 const unsigned s = r->rq_state;
207 if (!req) { 207 if (!req) {
208 if (!(s & RQ_WRITE)) 208 if (!(s & RQ_WRITE))
@@ -227,18 +227,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
227 227
228 /* first some paranoia code */ 228 /* first some paranoia code */
229 if (req == NULL) { 229 if (req == NULL) {
230 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", 230 conn_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
231 barrier_nr); 231 barrier_nr);
232 goto bail; 232 goto bail;
233 } 233 }
234 if (expect_epoch != barrier_nr) { 234 if (expect_epoch != barrier_nr) {
235 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n", 235 conn_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
236 barrier_nr, expect_epoch); 236 barrier_nr, expect_epoch);
237 goto bail; 237 goto bail;
238 } 238 }
239 239
240 if (expect_size != set_size) { 240 if (expect_size != set_size) {
241 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", 241 conn_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
242 barrier_nr, set_size, expect_size); 242 barrier_nr, set_size, expect_size);
243 goto bail; 243 goto bail;
244 } 244 }
@@ -247,21 +247,21 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
247 /* this extra list walk restart is paranoia, 247 /* this extra list walk restart is paranoia,
248 * to catch requests being barrier-acked "unexpectedly". 248 * to catch requests being barrier-acked "unexpectedly".
249 * It usually should find the same req again, or some READ preceding it. */ 249 * It usually should find the same req again, or some READ preceding it. */
250 list_for_each_entry(req, &tconn->transfer_log, tl_requests) 250 list_for_each_entry(req, &connection->transfer_log, tl_requests)
251 if (req->epoch == expect_epoch) 251 if (req->epoch == expect_epoch)
252 break; 252 break;
253 list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) { 253 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
254 if (req->epoch != expect_epoch) 254 if (req->epoch != expect_epoch)
255 break; 255 break;
256 _req_mod(req, BARRIER_ACKED); 256 _req_mod(req, BARRIER_ACKED);
257 } 257 }
258 spin_unlock_irq(&tconn->req_lock); 258 spin_unlock_irq(&connection->req_lock);
259 259
260 return; 260 return;
261 261
262bail: 262bail:
263 spin_unlock_irq(&tconn->req_lock); 263 spin_unlock_irq(&connection->req_lock);
264 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
265} 265}
266 266
267 267
@@ -274,19 +274,19 @@ bail:
274 * RESTART_FROZEN_DISK_IO. 274 * RESTART_FROZEN_DISK_IO.
275 */ 275 */
276/* must hold resource->req_lock */ 276/* must hold resource->req_lock */
277void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) 277void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
278{ 278{
279 struct drbd_request *req, *r; 279 struct drbd_request *req, *r;
280 280
281 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) 281 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
282 _req_mod(req, what); 282 _req_mod(req, what);
283} 283}
284 284
285void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) 285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
286{ 286{
287 spin_lock_irq(&tconn->req_lock); 287 spin_lock_irq(&connection->req_lock);
288 _tl_restart(tconn, what); 288 _tl_restart(connection, what);
289 spin_unlock_irq(&tconn->req_lock); 289 spin_unlock_irq(&connection->req_lock);
290} 290}
291 291
292/** 292/**
@@ -297,9 +297,9 @@ void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
297 * by the requests on the transfer gets marked as our of sync. Called from the 297 * by the requests on the transfer gets marked as our of sync. Called from the
298 * receiver thread and the worker thread. 298 * receiver thread and the worker thread.
299 */ 299 */
300void tl_clear(struct drbd_tconn *tconn) 300void tl_clear(struct drbd_connection *connection)
301{ 301{
302 tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); 302 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
303} 303}
304 304
305/** 305/**
@@ -308,29 +308,29 @@ void tl_clear(struct drbd_tconn *tconn)
308 */ 308 */
309void tl_abort_disk_io(struct drbd_device *device) 309void tl_abort_disk_io(struct drbd_device *device)
310{ 310{
311 struct drbd_tconn *tconn = device->tconn; 311 struct drbd_connection *connection = device->connection;
312 struct drbd_request *req, *r; 312 struct drbd_request *req, *r;
313 313
314 spin_lock_irq(&tconn->req_lock); 314 spin_lock_irq(&connection->req_lock);
315 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) { 315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
316 if (!(req->rq_state & RQ_LOCAL_PENDING)) 316 if (!(req->rq_state & RQ_LOCAL_PENDING))
317 continue; 317 continue;
318 if (req->w.device != device) 318 if (req->w.device != device)
319 continue; 319 continue;
320 _req_mod(req, ABORT_DISK_IO); 320 _req_mod(req, ABORT_DISK_IO);
321 } 321 }
322 spin_unlock_irq(&tconn->req_lock); 322 spin_unlock_irq(&connection->req_lock);
323} 323}
324 324
325static int drbd_thread_setup(void *arg) 325static int drbd_thread_setup(void *arg)
326{ 326{
327 struct drbd_thread *thi = (struct drbd_thread *) arg; 327 struct drbd_thread *thi = (struct drbd_thread *) arg;
328 struct drbd_tconn *tconn = thi->tconn; 328 struct drbd_connection *connection = thi->connection;
329 unsigned long flags; 329 unsigned long flags;
330 int retval; 330 int retval;
331 331
332 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", 332 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
333 thi->name[0], thi->tconn->name); 333 thi->name[0], thi->connection->name);
334 334
335restart: 335restart:
336 retval = thi->function(thi); 336 retval = thi->function(thi);
@@ -348,7 +348,7 @@ restart:
348 */ 348 */
349 349
350 if (thi->t_state == RESTARTING) { 350 if (thi->t_state == RESTARTING) {
351 conn_info(tconn, "Restarting %s thread\n", thi->name); 351 conn_info(connection, "Restarting %s thread\n", thi->name);
352 thi->t_state = RUNNING; 352 thi->t_state = RUNNING;
353 spin_unlock_irqrestore(&thi->t_lock, flags); 353 spin_unlock_irqrestore(&thi->t_lock, flags);
354 goto restart; 354 goto restart;
@@ -360,29 +360,29 @@ restart:
360 complete_all(&thi->stop); 360 complete_all(&thi->stop);
361 spin_unlock_irqrestore(&thi->t_lock, flags); 361 spin_unlock_irqrestore(&thi->t_lock, flags);
362 362
363 conn_info(tconn, "Terminating %s\n", current->comm); 363 conn_info(connection, "Terminating %s\n", current->comm);
364 364
365 /* Release mod reference taken when thread was started */ 365 /* Release mod reference taken when thread was started */
366 366
367 kref_put(&tconn->kref, &conn_destroy); 367 kref_put(&connection->kref, &conn_destroy);
368 module_put(THIS_MODULE); 368 module_put(THIS_MODULE);
369 return retval; 369 return retval;
370} 370}
371 371
372static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi, 372static void drbd_thread_init(struct drbd_connection *connection, struct drbd_thread *thi,
373 int (*func) (struct drbd_thread *), char *name) 373 int (*func) (struct drbd_thread *), char *name)
374{ 374{
375 spin_lock_init(&thi->t_lock); 375 spin_lock_init(&thi->t_lock);
376 thi->task = NULL; 376 thi->task = NULL;
377 thi->t_state = NONE; 377 thi->t_state = NONE;
378 thi->function = func; 378 thi->function = func;
379 thi->tconn = tconn; 379 thi->connection = connection;
380 strncpy(thi->name, name, ARRAY_SIZE(thi->name)); 380 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
381} 381}
382 382
383int drbd_thread_start(struct drbd_thread *thi) 383int drbd_thread_start(struct drbd_thread *thi)
384{ 384{
385 struct drbd_tconn *tconn = thi->tconn; 385 struct drbd_connection *connection = thi->connection;
386 struct task_struct *nt; 386 struct task_struct *nt;
387 unsigned long flags; 387 unsigned long flags;
388 388
@@ -392,17 +392,17 @@ int drbd_thread_start(struct drbd_thread *thi)
392 392
393 switch (thi->t_state) { 393 switch (thi->t_state) {
394 case NONE: 394 case NONE:
395 conn_info(tconn, "Starting %s thread (from %s [%d])\n", 395 conn_info(connection, "Starting %s thread (from %s [%d])\n",
396 thi->name, current->comm, current->pid); 396 thi->name, current->comm, current->pid);
397 397
398 /* Get ref on module for thread - this is released when thread exits */ 398 /* Get ref on module for thread - this is released when thread exits */
399 if (!try_module_get(THIS_MODULE)) { 399 if (!try_module_get(THIS_MODULE)) {
400 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n"); 400 conn_err(connection, "Failed to get module reference in drbd_thread_start\n");
401 spin_unlock_irqrestore(&thi->t_lock, flags); 401 spin_unlock_irqrestore(&thi->t_lock, flags);
402 return false; 402 return false;
403 } 403 }
404 404
405 kref_get(&thi->tconn->kref); 405 kref_get(&thi->connection->kref);
406 406
407 init_completion(&thi->stop); 407 init_completion(&thi->stop);
408 thi->reset_cpu_mask = 1; 408 thi->reset_cpu_mask = 1;
@@ -411,12 +411,12 @@ int drbd_thread_start(struct drbd_thread *thi)
411 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ 411 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
412 412
413 nt = kthread_create(drbd_thread_setup, (void *) thi, 413 nt = kthread_create(drbd_thread_setup, (void *) thi,
414 "drbd_%c_%s", thi->name[0], thi->tconn->name); 414 "drbd_%c_%s", thi->name[0], thi->connection->name);
415 415
416 if (IS_ERR(nt)) { 416 if (IS_ERR(nt)) {
417 conn_err(tconn, "Couldn't start thread\n"); 417 conn_err(connection, "Couldn't start thread\n");
418 418
419 kref_put(&tconn->kref, &conn_destroy); 419 kref_put(&connection->kref, &conn_destroy);
420 module_put(THIS_MODULE); 420 module_put(THIS_MODULE);
421 return false; 421 return false;
422 } 422 }
@@ -428,7 +428,7 @@ int drbd_thread_start(struct drbd_thread *thi)
428 break; 428 break;
429 case EXITING: 429 case EXITING:
430 thi->t_state = RESTARTING; 430 thi->t_state = RESTARTING;
431 conn_info(tconn, "Restarting %s thread (from %s [%d])\n", 431 conn_info(connection, "Restarting %s thread (from %s [%d])\n",
432 thi->name, current->comm, current->pid); 432 thi->name, current->comm, current->pid);
433 /* fall through */ 433 /* fall through */
434 case RUNNING: 434 case RUNNING:
@@ -477,29 +477,29 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
477 wait_for_completion(&thi->stop); 477 wait_for_completion(&thi->stop);
478} 478}
479 479
480static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task) 480static struct drbd_thread *drbd_task_to_thread(struct drbd_connection *connection, struct task_struct *task)
481{ 481{
482 struct drbd_thread *thi = 482 struct drbd_thread *thi =
483 task == tconn->receiver.task ? &tconn->receiver : 483 task == connection->receiver.task ? &connection->receiver :
484 task == tconn->asender.task ? &tconn->asender : 484 task == connection->asender.task ? &connection->asender :
485 task == tconn->worker.task ? &tconn->worker : NULL; 485 task == connection->worker.task ? &connection->worker : NULL;
486 486
487 return thi; 487 return thi;
488} 488}
489 489
490char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task) 490char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task)
491{ 491{
492 struct drbd_thread *thi = drbd_task_to_thread(tconn, task); 492 struct drbd_thread *thi = drbd_task_to_thread(connection, task);
493 return thi ? thi->name : task->comm; 493 return thi ? thi->name : task->comm;
494} 494}
495 495
496int conn_lowest_minor(struct drbd_tconn *tconn) 496int conn_lowest_minor(struct drbd_connection *connection)
497{ 497{
498 struct drbd_device *device; 498 struct drbd_device *device;
499 int vnr = 0, m; 499 int vnr = 0, m;
500 500
501 rcu_read_lock(); 501 rcu_read_lock();
502 device = idr_get_next(&tconn->volumes, &vnr); 502 device = idr_get_next(&connection->volumes, &vnr);
503 m = device ? device_to_minor(device) : -1; 503 m = device ? device_to_minor(device) : -1;
504 rcu_read_unlock(); 504 rcu_read_unlock();
505 505
@@ -514,23 +514,23 @@ int conn_lowest_minor(struct drbd_tconn *tconn)
514 * Forces all threads of a device onto the same CPU. This is beneficial for 514 * Forces all threads of a device onto the same CPU. This is beneficial for
515 * DRBD's performance. May be overwritten by user's configuration. 515 * DRBD's performance. May be overwritten by user's configuration.
516 */ 516 */
517void drbd_calc_cpu_mask(struct drbd_tconn *tconn) 517void drbd_calc_cpu_mask(struct drbd_connection *connection)
518{ 518{
519 int ord, cpu; 519 int ord, cpu;
520 520
521 /* user override. */ 521 /* user override. */
522 if (cpumask_weight(tconn->cpu_mask)) 522 if (cpumask_weight(connection->cpu_mask))
523 return; 523 return;
524 524
525 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask); 525 ord = conn_lowest_minor(connection) % cpumask_weight(cpu_online_mask);
526 for_each_online_cpu(cpu) { 526 for_each_online_cpu(cpu) {
527 if (ord-- == 0) { 527 if (ord-- == 0) {
528 cpumask_set_cpu(cpu, tconn->cpu_mask); 528 cpumask_set_cpu(cpu, connection->cpu_mask);
529 return; 529 return;
530 } 530 }
531 } 531 }
532 /* should not be reached */ 532 /* should not be reached */
533 cpumask_setall(tconn->cpu_mask); 533 cpumask_setall(connection->cpu_mask);
534} 534}
535 535
536/** 536/**
@@ -548,7 +548,7 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
548 if (!thi->reset_cpu_mask) 548 if (!thi->reset_cpu_mask)
549 return; 549 return;
550 thi->reset_cpu_mask = 0; 550 thi->reset_cpu_mask = 0;
551 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask); 551 set_cpus_allowed_ptr(p, thi->connection->cpu_mask);
552} 552}
553#endif 553#endif
554 554
@@ -559,9 +559,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
559 * word aligned on 64-bit architectures. (The bitmap send and receive code 559 * word aligned on 64-bit architectures. (The bitmap send and receive code
560 * relies on this.) 560 * relies on this.)
561 */ 561 */
562unsigned int drbd_header_size(struct drbd_tconn *tconn) 562unsigned int drbd_header_size(struct drbd_connection *connection)
563{ 563{
564 if (tconn->agreed_pro_version >= 100) { 564 if (connection->agreed_pro_version >= 100) {
565 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8)); 565 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
566 return sizeof(struct p_header100); 566 return sizeof(struct p_header100);
567 } else { 567 } else {
@@ -599,32 +599,32 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm
599 return sizeof(struct p_header100); 599 return sizeof(struct p_header100);
600} 600}
601 601
602static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr, 602static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
603 void *buffer, enum drbd_packet cmd, int size) 603 void *buffer, enum drbd_packet cmd, int size)
604{ 604{
605 if (tconn->agreed_pro_version >= 100) 605 if (connection->agreed_pro_version >= 100)
606 return prepare_header100(buffer, cmd, size, vnr); 606 return prepare_header100(buffer, cmd, size, vnr);
607 else if (tconn->agreed_pro_version >= 95 && 607 else if (connection->agreed_pro_version >= 95 &&
608 size > DRBD_MAX_SIZE_H80_PACKET) 608 size > DRBD_MAX_SIZE_H80_PACKET)
609 return prepare_header95(buffer, cmd, size); 609 return prepare_header95(buffer, cmd, size);
610 else 610 else
611 return prepare_header80(buffer, cmd, size); 611 return prepare_header80(buffer, cmd, size);
612} 612}
613 613
614static void *__conn_prepare_command(struct drbd_tconn *tconn, 614static void *__conn_prepare_command(struct drbd_connection *connection,
615 struct drbd_socket *sock) 615 struct drbd_socket *sock)
616{ 616{
617 if (!sock->socket) 617 if (!sock->socket)
618 return NULL; 618 return NULL;
619 return sock->sbuf + drbd_header_size(tconn); 619 return sock->sbuf + drbd_header_size(connection);
620} 620}
621 621
622void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock) 622void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
623{ 623{
624 void *p; 624 void *p;
625 625
626 mutex_lock(&sock->mutex); 626 mutex_lock(&sock->mutex);
627 p = __conn_prepare_command(tconn, sock); 627 p = __conn_prepare_command(connection, sock);
628 if (!p) 628 if (!p)
629 mutex_unlock(&sock->mutex); 629 mutex_unlock(&sock->mutex);
630 630
@@ -633,10 +633,10 @@ void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
633 633
634void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock) 634void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock)
635{ 635{
636 return conn_prepare_command(device->tconn, sock); 636 return conn_prepare_command(device->connection, sock);
637} 637}
638 638
639static int __send_command(struct drbd_tconn *tconn, int vnr, 639static int __send_command(struct drbd_connection *connection, int vnr,
640 struct drbd_socket *sock, enum drbd_packet cmd, 640 struct drbd_socket *sock, enum drbd_packet cmd,
641 unsigned int header_size, void *data, 641 unsigned int header_size, void *data,
642 unsigned int size) 642 unsigned int size)
@@ -653,29 +653,29 @@ static int __send_command(struct drbd_tconn *tconn, int vnr,
653 */ 653 */
654 msg_flags = data ? MSG_MORE : 0; 654 msg_flags = data ? MSG_MORE : 0;
655 655
656 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd, 656 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
657 header_size + size); 657 header_size + size);
658 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size, 658 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
659 msg_flags); 659 msg_flags);
660 if (data && !err) 660 if (data && !err)
661 err = drbd_send_all(tconn, sock->socket, data, size, 0); 661 err = drbd_send_all(connection, sock->socket, data, size, 0);
662 return err; 662 return err;
663} 663}
664 664
665static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, 665static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
666 enum drbd_packet cmd, unsigned int header_size, 666 enum drbd_packet cmd, unsigned int header_size,
667 void *data, unsigned int size) 667 void *data, unsigned int size)
668{ 668{
669 return __send_command(tconn, 0, sock, cmd, header_size, data, size); 669 return __send_command(connection, 0, sock, cmd, header_size, data, size);
670} 670}
671 671
672int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, 672int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
673 enum drbd_packet cmd, unsigned int header_size, 673 enum drbd_packet cmd, unsigned int header_size,
674 void *data, unsigned int size) 674 void *data, unsigned int size)
675{ 675{
676 int err; 676 int err;
677 677
678 err = __conn_send_command(tconn, sock, cmd, header_size, data, size); 678 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
679 mutex_unlock(&sock->mutex); 679 mutex_unlock(&sock->mutex);
680 return err; 680 return err;
681} 681}
@@ -686,30 +686,30 @@ int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock,
686{ 686{
687 int err; 687 int err;
688 688
689 err = __send_command(device->tconn, device->vnr, sock, cmd, header_size, 689 err = __send_command(device->connection, device->vnr, sock, cmd, header_size,
690 data, size); 690 data, size);
691 mutex_unlock(&sock->mutex); 691 mutex_unlock(&sock->mutex);
692 return err; 692 return err;
693} 693}
694 694
695int drbd_send_ping(struct drbd_tconn *tconn) 695int drbd_send_ping(struct drbd_connection *connection)
696{ 696{
697 struct drbd_socket *sock; 697 struct drbd_socket *sock;
698 698
699 sock = &tconn->meta; 699 sock = &connection->meta;
700 if (!conn_prepare_command(tconn, sock)) 700 if (!conn_prepare_command(connection, sock))
701 return -EIO; 701 return -EIO;
702 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0); 702 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
703} 703}
704 704
705int drbd_send_ping_ack(struct drbd_tconn *tconn) 705int drbd_send_ping_ack(struct drbd_connection *connection)
706{ 706{
707 struct drbd_socket *sock; 707 struct drbd_socket *sock;
708 708
709 sock = &tconn->meta; 709 sock = &connection->meta;
710 if (!conn_prepare_command(tconn, sock)) 710 if (!conn_prepare_command(connection, sock))
711 return -EIO; 711 return -EIO;
712 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0); 712 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
713} 713}
714 714
715int drbd_send_sync_param(struct drbd_device *device) 715int drbd_send_sync_param(struct drbd_device *device)
@@ -717,18 +717,18 @@ int drbd_send_sync_param(struct drbd_device *device)
717 struct drbd_socket *sock; 717 struct drbd_socket *sock;
718 struct p_rs_param_95 *p; 718 struct p_rs_param_95 *p;
719 int size; 719 int size;
720 const int apv = device->tconn->agreed_pro_version; 720 const int apv = device->connection->agreed_pro_version;
721 enum drbd_packet cmd; 721 enum drbd_packet cmd;
722 struct net_conf *nc; 722 struct net_conf *nc;
723 struct disk_conf *dc; 723 struct disk_conf *dc;
724 724
725 sock = &device->tconn->data; 725 sock = &device->connection->data;
726 p = drbd_prepare_command(device, sock); 726 p = drbd_prepare_command(device, sock);
727 if (!p) 727 if (!p)
728 return -EIO; 728 return -EIO;
729 729
730 rcu_read_lock(); 730 rcu_read_lock();
731 nc = rcu_dereference(device->tconn->net_conf); 731 nc = rcu_dereference(device->connection->net_conf);
732 732
733 size = apv <= 87 ? sizeof(struct p_rs_param) 733 size = apv <= 87 ? sizeof(struct p_rs_param)
734 : apv == 88 ? sizeof(struct p_rs_param) 734 : apv == 88 ? sizeof(struct p_rs_param)
@@ -766,30 +766,30 @@ int drbd_send_sync_param(struct drbd_device *device)
766 return drbd_send_command(device, sock, cmd, size, NULL, 0); 766 return drbd_send_command(device, sock, cmd, size, NULL, 0);
767} 767}
768 768
769int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd) 769int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
770{ 770{
771 struct drbd_socket *sock; 771 struct drbd_socket *sock;
772 struct p_protocol *p; 772 struct p_protocol *p;
773 struct net_conf *nc; 773 struct net_conf *nc;
774 int size, cf; 774 int size, cf;
775 775
776 sock = &tconn->data; 776 sock = &connection->data;
777 p = __conn_prepare_command(tconn, sock); 777 p = __conn_prepare_command(connection, sock);
778 if (!p) 778 if (!p)
779 return -EIO; 779 return -EIO;
780 780
781 rcu_read_lock(); 781 rcu_read_lock();
782 nc = rcu_dereference(tconn->net_conf); 782 nc = rcu_dereference(connection->net_conf);
783 783
784 if (nc->tentative && tconn->agreed_pro_version < 92) { 784 if (nc->tentative && connection->agreed_pro_version < 92) {
785 rcu_read_unlock(); 785 rcu_read_unlock();
786 mutex_unlock(&sock->mutex); 786 mutex_unlock(&sock->mutex);
787 conn_err(tconn, "--dry-run is not supported by peer"); 787 conn_err(connection, "--dry-run is not supported by peer");
788 return -EOPNOTSUPP; 788 return -EOPNOTSUPP;
789 } 789 }
790 790
791 size = sizeof(*p); 791 size = sizeof(*p);
792 if (tconn->agreed_pro_version >= 87) 792 if (connection->agreed_pro_version >= 87)
793 size += strlen(nc->integrity_alg) + 1; 793 size += strlen(nc->integrity_alg) + 1;
794 794
795 p->protocol = cpu_to_be32(nc->wire_protocol); 795 p->protocol = cpu_to_be32(nc->wire_protocol);
@@ -804,20 +804,20 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
804 cf |= CF_DRY_RUN; 804 cf |= CF_DRY_RUN;
805 p->conn_flags = cpu_to_be32(cf); 805 p->conn_flags = cpu_to_be32(cf);
806 806
807 if (tconn->agreed_pro_version >= 87) 807 if (connection->agreed_pro_version >= 87)
808 strcpy(p->integrity_alg, nc->integrity_alg); 808 strcpy(p->integrity_alg, nc->integrity_alg);
809 rcu_read_unlock(); 809 rcu_read_unlock();
810 810
811 return __conn_send_command(tconn, sock, cmd, size, NULL, 0); 811 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
812} 812}
813 813
814int drbd_send_protocol(struct drbd_tconn *tconn) 814int drbd_send_protocol(struct drbd_connection *connection)
815{ 815{
816 int err; 816 int err;
817 817
818 mutex_lock(&tconn->data.mutex); 818 mutex_lock(&connection->data.mutex);
819 err = __drbd_send_protocol(tconn, P_PROTOCOL); 819 err = __drbd_send_protocol(connection, P_PROTOCOL);
820 mutex_unlock(&tconn->data.mutex); 820 mutex_unlock(&connection->data.mutex);
821 821
822 return err; 822 return err;
823} 823}
@@ -831,7 +831,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
831 if (!get_ldev_if_state(device, D_NEGOTIATING)) 831 if (!get_ldev_if_state(device, D_NEGOTIATING))
832 return 0; 832 return 0;
833 833
834 sock = &device->tconn->data; 834 sock = &device->connection->data;
835 p = drbd_prepare_command(device, sock); 835 p = drbd_prepare_command(device, sock);
836 if (!p) { 836 if (!p) {
837 put_ldev(device); 837 put_ldev(device);
@@ -845,7 +845,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
845 device->comm_bm_set = drbd_bm_total_weight(device); 845 device->comm_bm_set = drbd_bm_total_weight(device);
846 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); 846 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
847 rcu_read_lock(); 847 rcu_read_lock();
848 uuid_flags |= rcu_dereference(device->tconn->net_conf)->discard_my_data ? 1 : 0; 848 uuid_flags |= rcu_dereference(device->connection->net_conf)->discard_my_data ? 1 : 0;
849 rcu_read_unlock(); 849 rcu_read_unlock();
850 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; 850 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
851 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; 851 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
@@ -900,7 +900,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
900 drbd_print_uuids(device, "updated sync UUID"); 900 drbd_print_uuids(device, "updated sync UUID");
901 drbd_md_sync(device); 901 drbd_md_sync(device);
902 902
903 sock = &device->tconn->data; 903 sock = &device->connection->data;
904 p = drbd_prepare_command(device, sock); 904 p = drbd_prepare_command(device, sock);
905 if (p) { 905 if (p) {
906 p->uuid = cpu_to_be64(uuid); 906 p->uuid = cpu_to_be64(uuid);
@@ -933,14 +933,14 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
933 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ 933 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
934 } 934 }
935 935
936 sock = &device->tconn->data; 936 sock = &device->connection->data;
937 p = drbd_prepare_command(device, sock); 937 p = drbd_prepare_command(device, sock);
938 if (!p) 938 if (!p)
939 return -EIO; 939 return -EIO;
940 940
941 if (device->tconn->agreed_pro_version <= 94) 941 if (device->connection->agreed_pro_version <= 94)
942 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 942 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
943 else if (device->tconn->agreed_pro_version < 100) 943 else if (device->connection->agreed_pro_version < 100)
944 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95); 944 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
945 945
946 p->d_size = cpu_to_be64(d_size); 946 p->d_size = cpu_to_be64(d_size);
@@ -961,7 +961,7 @@ int drbd_send_current_state(struct drbd_device *device)
961 struct drbd_socket *sock; 961 struct drbd_socket *sock;
962 struct p_state *p; 962 struct p_state *p;
963 963
964 sock = &device->tconn->data; 964 sock = &device->connection->data;
965 p = drbd_prepare_command(device, sock); 965 p = drbd_prepare_command(device, sock);
966 if (!p) 966 if (!p)
967 return -EIO; 967 return -EIO;
@@ -984,7 +984,7 @@ int drbd_send_state(struct drbd_device *device, union drbd_state state)
984 struct drbd_socket *sock; 984 struct drbd_socket *sock;
985 struct p_state *p; 985 struct p_state *p;
986 986
987 sock = &device->tconn->data; 987 sock = &device->connection->data;
988 p = drbd_prepare_command(device, sock); 988 p = drbd_prepare_command(device, sock);
989 if (!p) 989 if (!p)
990 return -EIO; 990 return -EIO;
@@ -997,7 +997,7 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union
997 struct drbd_socket *sock; 997 struct drbd_socket *sock;
998 struct p_req_state *p; 998 struct p_req_state *p;
999 999
1000 sock = &device->tconn->data; 1000 sock = &device->connection->data;
1001 p = drbd_prepare_command(device, sock); 1001 p = drbd_prepare_command(device, sock);
1002 if (!p) 1002 if (!p)
1003 return -EIO; 1003 return -EIO;
@@ -1006,20 +1006,20 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union
1006 return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); 1006 return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1007} 1007}
1008 1008
1009int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val) 1009int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1010{ 1010{
1011 enum drbd_packet cmd; 1011 enum drbd_packet cmd;
1012 struct drbd_socket *sock; 1012 struct drbd_socket *sock;
1013 struct p_req_state *p; 1013 struct p_req_state *p;
1014 1014
1015 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; 1015 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1016 sock = &tconn->data; 1016 sock = &connection->data;
1017 p = conn_prepare_command(tconn, sock); 1017 p = conn_prepare_command(connection, sock);
1018 if (!p) 1018 if (!p)
1019 return -EIO; 1019 return -EIO;
1020 p->mask = cpu_to_be32(mask.i); 1020 p->mask = cpu_to_be32(mask.i);
1021 p->val = cpu_to_be32(val.i); 1021 p->val = cpu_to_be32(val.i);
1022 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); 1022 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1023} 1023}
1024 1024
1025void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode) 1025void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
@@ -1027,7 +1027,7 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
1027 struct drbd_socket *sock; 1027 struct drbd_socket *sock;
1028 struct p_req_state_reply *p; 1028 struct p_req_state_reply *p;
1029 1029
1030 sock = &device->tconn->meta; 1030 sock = &device->connection->meta;
1031 p = drbd_prepare_command(device, sock); 1031 p = drbd_prepare_command(device, sock);
1032 if (p) { 1032 if (p) {
1033 p->retcode = cpu_to_be32(retcode); 1033 p->retcode = cpu_to_be32(retcode);
@@ -1035,17 +1035,17 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
1035 } 1035 }
1036} 1036}
1037 1037
1038void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode) 1038void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1039{ 1039{
1040 struct drbd_socket *sock; 1040 struct drbd_socket *sock;
1041 struct p_req_state_reply *p; 1041 struct p_req_state_reply *p;
1042 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; 1042 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1043 1043
1044 sock = &tconn->meta; 1044 sock = &connection->meta;
1045 p = conn_prepare_command(tconn, sock); 1045 p = conn_prepare_command(connection, sock);
1046 if (p) { 1046 if (p) {
1047 p->retcode = cpu_to_be32(retcode); 1047 p->retcode = cpu_to_be32(retcode);
1048 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); 1048 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1049 } 1049 }
1050} 1050}
1051 1051
@@ -1081,9 +1081,9 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1081 1081
1082 /* may we use this feature? */ 1082 /* may we use this feature? */
1083 rcu_read_lock(); 1083 rcu_read_lock();
1084 use_rle = rcu_dereference(device->tconn->net_conf)->use_rle; 1084 use_rle = rcu_dereference(device->connection->net_conf)->use_rle;
1085 rcu_read_unlock(); 1085 rcu_read_unlock();
1086 if (!use_rle || device->tconn->agreed_pro_version < 90) 1086 if (!use_rle || device->connection->agreed_pro_version < 90)
1087 return 0; 1087 return 0;
1088 1088
1089 if (c->bit_offset >= c->bm_bits) 1089 if (c->bit_offset >= c->bm_bits)
@@ -1172,8 +1172,8 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1172static int 1172static int
1173send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) 1173send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1174{ 1174{
1175 struct drbd_socket *sock = &device->tconn->data; 1175 struct drbd_socket *sock = &device->connection->data;
1176 unsigned int header_size = drbd_header_size(device->tconn); 1176 unsigned int header_size = drbd_header_size(device->connection);
1177 struct p_compressed_bm *p = sock->sbuf + header_size; 1177 struct p_compressed_bm *p = sock->sbuf + header_size;
1178 int len, err; 1178 int len, err;
1179 1179
@@ -1184,7 +1184,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1184 1184
1185 if (len) { 1185 if (len) {
1186 dcbp_set_code(p, RLE_VLI_Bits); 1186 dcbp_set_code(p, RLE_VLI_Bits);
1187 err = __send_command(device->tconn, device->vnr, sock, 1187 err = __send_command(device->connection, device->vnr, sock,
1188 P_COMPRESSED_BITMAP, sizeof(*p) + len, 1188 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1189 NULL, 0); 1189 NULL, 0);
1190 c->packets[0]++; 1190 c->packets[0]++;
@@ -1205,7 +1205,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1205 len = num_words * sizeof(*p); 1205 len = num_words * sizeof(*p);
1206 if (len) 1206 if (len)
1207 drbd_bm_get_lel(device, c->word_offset, num_words, p); 1207 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1208 err = __send_command(device->tconn, device->vnr, sock, P_BITMAP, len, NULL, 0); 1208 err = __send_command(device->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1209 c->word_offset += num_words; 1209 c->word_offset += num_words;
1210 c->bit_offset = c->word_offset * BITS_PER_LONG; 1210 c->bit_offset = c->word_offset * BITS_PER_LONG;
1211 1211
@@ -1265,7 +1265,7 @@ static int _drbd_send_bitmap(struct drbd_device *device)
1265 1265
1266int drbd_send_bitmap(struct drbd_device *device) 1266int drbd_send_bitmap(struct drbd_device *device)
1267{ 1267{
1268 struct drbd_socket *sock = &device->tconn->data; 1268 struct drbd_socket *sock = &device->connection->data;
1269 int err = -1; 1269 int err = -1;
1270 1270
1271 mutex_lock(&sock->mutex); 1271 mutex_lock(&sock->mutex);
@@ -1275,21 +1275,21 @@ int drbd_send_bitmap(struct drbd_device *device)
1275 return err; 1275 return err;
1276} 1276}
1277 1277
1278void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size) 1278void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1279{ 1279{
1280 struct drbd_socket *sock; 1280 struct drbd_socket *sock;
1281 struct p_barrier_ack *p; 1281 struct p_barrier_ack *p;
1282 1282
1283 if (tconn->cstate < C_WF_REPORT_PARAMS) 1283 if (connection->cstate < C_WF_REPORT_PARAMS)
1284 return; 1284 return;
1285 1285
1286 sock = &tconn->meta; 1286 sock = &connection->meta;
1287 p = conn_prepare_command(tconn, sock); 1287 p = conn_prepare_command(connection, sock);
1288 if (!p) 1288 if (!p)
1289 return; 1289 return;
1290 p->barrier = barrier_nr; 1290 p->barrier = barrier_nr;
1291 p->set_size = cpu_to_be32(set_size); 1291 p->set_size = cpu_to_be32(set_size);
1292 conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0); 1292 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1293} 1293}
1294 1294
1295/** 1295/**
@@ -1309,7 +1309,7 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
1309 if (device->state.conn < C_CONNECTED) 1309 if (device->state.conn < C_CONNECTED)
1310 return -EIO; 1310 return -EIO;
1311 1311
1312 sock = &device->tconn->meta; 1312 sock = &device->connection->meta;
1313 p = drbd_prepare_command(device, sock); 1313 p = drbd_prepare_command(device, sock);
1314 if (!p) 1314 if (!p)
1315 return -EIO; 1315 return -EIO;
@@ -1326,8 +1326,8 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
1326void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd, 1326void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
1327 struct p_data *dp, int data_size) 1327 struct p_data *dp, int data_size)
1328{ 1328{
1329 if (device->tconn->peer_integrity_tfm) 1329 if (device->connection->peer_integrity_tfm)
1330 data_size -= crypto_hash_digestsize(device->tconn->peer_integrity_tfm); 1330 data_size -= crypto_hash_digestsize(device->connection->peer_integrity_tfm);
1331 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size), 1331 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size),
1332 dp->block_id); 1332 dp->block_id);
1333} 1333}
@@ -1370,7 +1370,7 @@ int drbd_send_drequest(struct drbd_device *device, int cmd,
1370 struct drbd_socket *sock; 1370 struct drbd_socket *sock;
1371 struct p_block_req *p; 1371 struct p_block_req *p;
1372 1372
1373 sock = &device->tconn->data; 1373 sock = &device->connection->data;
1374 p = drbd_prepare_command(device, sock); 1374 p = drbd_prepare_command(device, sock);
1375 if (!p) 1375 if (!p)
1376 return -EIO; 1376 return -EIO;
@@ -1388,7 +1388,7 @@ int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int siz
1388 1388
1389 /* FIXME: Put the digest into the preallocated socket buffer. */ 1389 /* FIXME: Put the digest into the preallocated socket buffer. */
1390 1390
1391 sock = &device->tconn->data; 1391 sock = &device->connection->data;
1392 p = drbd_prepare_command(device, sock); 1392 p = drbd_prepare_command(device, sock);
1393 if (!p) 1393 if (!p)
1394 return -EIO; 1394 return -EIO;
@@ -1404,7 +1404,7 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
1404 struct drbd_socket *sock; 1404 struct drbd_socket *sock;
1405 struct p_block_req *p; 1405 struct p_block_req *p;
1406 1406
1407 sock = &device->tconn->data; 1407 sock = &device->connection->data;
1408 p = drbd_prepare_command(device, sock); 1408 p = drbd_prepare_command(device, sock);
1409 if (!p) 1409 if (!p)
1410 return -EIO; 1410 return -EIO;
@@ -1418,34 +1418,34 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
1418 * returns false if we should retry, 1418 * returns false if we should retry,
1419 * true if we think connection is dead 1419 * true if we think connection is dead
1420 */ 1420 */
1421static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock) 1421static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1422{ 1422{
1423 int drop_it; 1423 int drop_it;
1424 /* long elapsed = (long)(jiffies - device->last_received); */ 1424 /* long elapsed = (long)(jiffies - device->last_received); */
1425 1425
1426 drop_it = tconn->meta.socket == sock 1426 drop_it = connection->meta.socket == sock
1427 || !tconn->asender.task 1427 || !connection->asender.task
1428 || get_t_state(&tconn->asender) != RUNNING 1428 || get_t_state(&connection->asender) != RUNNING
1429 || tconn->cstate < C_WF_REPORT_PARAMS; 1429 || connection->cstate < C_WF_REPORT_PARAMS;
1430 1430
1431 if (drop_it) 1431 if (drop_it)
1432 return true; 1432 return true;
1433 1433
1434 drop_it = !--tconn->ko_count; 1434 drop_it = !--connection->ko_count;
1435 if (!drop_it) { 1435 if (!drop_it) {
1436 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n", 1436 conn_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1437 current->comm, current->pid, tconn->ko_count); 1437 current->comm, current->pid, connection->ko_count);
1438 request_ping(tconn); 1438 request_ping(connection);
1439 } 1439 }
1440 1440
1441 return drop_it; /* && (device->state == R_PRIMARY) */; 1441 return drop_it; /* && (device->state == R_PRIMARY) */;
1442} 1442}
1443 1443
1444static void drbd_update_congested(struct drbd_tconn *tconn) 1444static void drbd_update_congested(struct drbd_connection *connection)
1445{ 1445{
1446 struct sock *sk = tconn->data.socket->sk; 1446 struct sock *sk = connection->data.socket->sk;
1447 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) 1447 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1448 set_bit(NET_CONGESTED, &tconn->flags); 1448 set_bit(NET_CONGESTED, &connection->flags);
1449} 1449}
1450 1450
1451/* The idea of sendpage seems to be to put some kind of reference 1451/* The idea of sendpage seems to be to put some kind of reference
@@ -1476,9 +1476,9 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
1476 void *addr; 1476 void *addr;
1477 int err; 1477 int err;
1478 1478
1479 socket = device->tconn->data.socket; 1479 socket = device->connection->data.socket;
1480 addr = kmap(page) + offset; 1480 addr = kmap(page) + offset;
1481 err = drbd_send_all(device->tconn, socket, addr, size, msg_flags); 1481 err = drbd_send_all(device->connection, socket, addr, size, msg_flags);
1482 kunmap(page); 1482 kunmap(page);
1483 if (!err) 1483 if (!err)
1484 device->send_cnt += size >> 9; 1484 device->send_cnt += size >> 9;
@@ -1488,7 +1488,7 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
1488static int _drbd_send_page(struct drbd_device *device, struct page *page, 1488static int _drbd_send_page(struct drbd_device *device, struct page *page,
1489 int offset, size_t size, unsigned msg_flags) 1489 int offset, size_t size, unsigned msg_flags)
1490{ 1490{
1491 struct socket *socket = device->tconn->data.socket; 1491 struct socket *socket = device->connection->data.socket;
1492 mm_segment_t oldfs = get_fs(); 1492 mm_segment_t oldfs = get_fs();
1493 int len = size; 1493 int len = size;
1494 int err = -EIO; 1494 int err = -EIO;
@@ -1503,7 +1503,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1503 return _drbd_no_send_page(device, page, offset, size, msg_flags); 1503 return _drbd_no_send_page(device, page, offset, size, msg_flags);
1504 1504
1505 msg_flags |= MSG_NOSIGNAL; 1505 msg_flags |= MSG_NOSIGNAL;
1506 drbd_update_congested(device->tconn); 1506 drbd_update_congested(device->connection);
1507 set_fs(KERNEL_DS); 1507 set_fs(KERNEL_DS);
1508 do { 1508 do {
1509 int sent; 1509 int sent;
@@ -1511,7 +1511,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1511 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags); 1511 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1512 if (sent <= 0) { 1512 if (sent <= 0) {
1513 if (sent == -EAGAIN) { 1513 if (sent == -EAGAIN) {
1514 if (we_should_drop_the_connection(device->tconn, socket)) 1514 if (we_should_drop_the_connection(device->connection, socket))
1515 break; 1515 break;
1516 continue; 1516 continue;
1517 } 1517 }
@@ -1525,7 +1525,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1525 offset += sent; 1525 offset += sent;
1526 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); 1526 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1527 set_fs(oldfs); 1527 set_fs(oldfs);
1528 clear_bit(NET_CONGESTED, &device->tconn->flags); 1528 clear_bit(NET_CONGESTED, &device->connection->flags);
1529 1529
1530 if (len == 0) { 1530 if (len == 0) {
1531 err = 0; 1531 err = 0;
@@ -1593,7 +1593,7 @@ static int _drbd_send_zc_ee(struct drbd_device *device,
1593 1593
1594static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw) 1594static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw)
1595{ 1595{
1596 if (device->tconn->agreed_pro_version >= 95) 1596 if (device->connection->agreed_pro_version >= 95)
1597 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | 1597 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1598 (bi_rw & REQ_FUA ? DP_FUA : 0) | 1598 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1599 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | 1599 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
@@ -1613,9 +1613,9 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1613 int dgs; 1613 int dgs;
1614 int err; 1614 int err;
1615 1615
1616 sock = &device->tconn->data; 1616 sock = &device->connection->data;
1617 p = drbd_prepare_command(device, sock); 1617 p = drbd_prepare_command(device, sock);
1618 dgs = device->tconn->integrity_tfm ? crypto_hash_digestsize(device->tconn->integrity_tfm) : 0; 1618 dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0;
1619 1619
1620 if (!p) 1620 if (!p)
1621 return -EIO; 1621 return -EIO;
@@ -1626,7 +1626,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1626 if (device->state.conn >= C_SYNC_SOURCE && 1626 if (device->state.conn >= C_SYNC_SOURCE &&
1627 device->state.conn <= C_PAUSED_SYNC_T) 1627 device->state.conn <= C_PAUSED_SYNC_T)
1628 dp_flags |= DP_MAY_SET_IN_SYNC; 1628 dp_flags |= DP_MAY_SET_IN_SYNC;
1629 if (device->tconn->agreed_pro_version >= 100) { 1629 if (device->connection->agreed_pro_version >= 100) {
1630 if (req->rq_state & RQ_EXP_RECEIVE_ACK) 1630 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1631 dp_flags |= DP_SEND_RECEIVE_ACK; 1631 dp_flags |= DP_SEND_RECEIVE_ACK;
1632 if (req->rq_state & RQ_EXP_WRITE_ACK) 1632 if (req->rq_state & RQ_EXP_WRITE_ACK)
@@ -1634,8 +1634,8 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1634 } 1634 }
1635 p->dp_flags = cpu_to_be32(dp_flags); 1635 p->dp_flags = cpu_to_be32(dp_flags);
1636 if (dgs) 1636 if (dgs)
1637 drbd_csum_bio(device, device->tconn->integrity_tfm, req->master_bio, p + 1); 1637 drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, p + 1);
1638 err = __send_command(device->tconn, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); 1638 err = __send_command(device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
1639 if (!err) { 1639 if (!err) {
1640 /* For protocol A, we have to memcpy the payload into 1640 /* For protocol A, we have to memcpy the payload into
1641 * socket buffers, as we may complete right away 1641 * socket buffers, as we may complete right away
@@ -1658,7 +1658,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1658 /* 64 byte, 512 bit, is the largest digest size 1658 /* 64 byte, 512 bit, is the largest digest size
1659 * currently supported in kernel crypto. */ 1659 * currently supported in kernel crypto. */
1660 unsigned char digest[64]; 1660 unsigned char digest[64];
1661 drbd_csum_bio(device, device->tconn->integrity_tfm, req->master_bio, digest); 1661 drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, digest);
1662 if (memcmp(p + 1, digest, dgs)) { 1662 if (memcmp(p + 1, digest, dgs)) {
1663 dev_warn(DEV, 1663 dev_warn(DEV,
1664 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1664 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
@@ -1685,10 +1685,10 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
1685 int err; 1685 int err;
1686 int dgs; 1686 int dgs;
1687 1687
1688 sock = &device->tconn->data; 1688 sock = &device->connection->data;
1689 p = drbd_prepare_command(device, sock); 1689 p = drbd_prepare_command(device, sock);
1690 1690
1691 dgs = device->tconn->integrity_tfm ? crypto_hash_digestsize(device->tconn->integrity_tfm) : 0; 1691 dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0;
1692 1692
1693 if (!p) 1693 if (!p)
1694 return -EIO; 1694 return -EIO;
@@ -1697,8 +1697,8 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
1697 p->seq_num = 0; /* unused */ 1697 p->seq_num = 0; /* unused */
1698 p->dp_flags = 0; 1698 p->dp_flags = 0;
1699 if (dgs) 1699 if (dgs)
1700 drbd_csum_ee(device, device->tconn->integrity_tfm, peer_req, p + 1); 1700 drbd_csum_ee(device, device->connection->integrity_tfm, peer_req, p + 1);
1701 err = __send_command(device->tconn, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); 1701 err = __send_command(device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
1702 if (!err) 1702 if (!err)
1703 err = _drbd_send_zc_ee(device, peer_req); 1703 err = _drbd_send_zc_ee(device, peer_req);
1704 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ 1704 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
@@ -1711,7 +1711,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
1711 struct drbd_socket *sock; 1711 struct drbd_socket *sock;
1712 struct p_block_desc *p; 1712 struct p_block_desc *p;
1713 1713
1714 sock = &device->tconn->data; 1714 sock = &device->connection->data;
1715 p = drbd_prepare_command(device, sock); 1715 p = drbd_prepare_command(device, sock);
1716 if (!p) 1716 if (!p)
1717 return -EIO; 1717 return -EIO;
@@ -1736,7 +1736,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
1736/* 1736/*
1737 * you must have down()ed the appropriate [m]sock_mutex elsewhere! 1737 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1738 */ 1738 */
1739int drbd_send(struct drbd_tconn *tconn, struct socket *sock, 1739int drbd_send(struct drbd_connection *connection, struct socket *sock,
1740 void *buf, size_t size, unsigned msg_flags) 1740 void *buf, size_t size, unsigned msg_flags)
1741{ 1741{
1742 struct kvec iov; 1742 struct kvec iov;
@@ -1757,11 +1757,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1757 msg.msg_controllen = 0; 1757 msg.msg_controllen = 0;
1758 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 1758 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1759 1759
1760 if (sock == tconn->data.socket) { 1760 if (sock == connection->data.socket) {
1761 rcu_read_lock(); 1761 rcu_read_lock();
1762 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count; 1762 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1763 rcu_read_unlock(); 1763 rcu_read_unlock();
1764 drbd_update_congested(tconn); 1764 drbd_update_congested(connection);
1765 } 1765 }
1766 do { 1766 do {
1767 /* STRANGE 1767 /* STRANGE
@@ -1775,7 +1775,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1775 */ 1775 */
1776 rv = kernel_sendmsg(sock, &msg, &iov, 1, size); 1776 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1777 if (rv == -EAGAIN) { 1777 if (rv == -EAGAIN) {
1778 if (we_should_drop_the_connection(tconn, sock)) 1778 if (we_should_drop_the_connection(connection, sock))
1779 break; 1779 break;
1780 else 1780 else
1781 continue; 1781 continue;
@@ -1791,17 +1791,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1791 iov.iov_len -= rv; 1791 iov.iov_len -= rv;
1792 } while (sent < size); 1792 } while (sent < size);
1793 1793
1794 if (sock == tconn->data.socket) 1794 if (sock == connection->data.socket)
1795 clear_bit(NET_CONGESTED, &tconn->flags); 1795 clear_bit(NET_CONGESTED, &connection->flags);
1796 1796
1797 if (rv <= 0) { 1797 if (rv <= 0) {
1798 if (rv != -EAGAIN) { 1798 if (rv != -EAGAIN) {
1799 conn_err(tconn, "%s_sendmsg returned %d\n", 1799 conn_err(connection, "%s_sendmsg returned %d\n",
1800 sock == tconn->meta.socket ? "msock" : "sock", 1800 sock == connection->meta.socket ? "msock" : "sock",
1801 rv); 1801 rv);
1802 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); 1802 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1803 } else 1803 } else
1804 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD); 1804 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1805 } 1805 }
1806 1806
1807 return sent; 1807 return sent;
@@ -1812,12 +1812,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1812 * 1812 *
1813 * Returns 0 upon success and a negative error value otherwise. 1813 * Returns 0 upon success and a negative error value otherwise.
1814 */ 1814 */
1815int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer, 1815int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1816 size_t size, unsigned msg_flags) 1816 size_t size, unsigned msg_flags)
1817{ 1817{
1818 int err; 1818 int err;
1819 1819
1820 err = drbd_send(tconn, sock, buffer, size, msg_flags); 1820 err = drbd_send(connection, sock, buffer, size, msg_flags);
1821 if (err < 0) 1821 if (err < 0)
1822 return err; 1822 return err;
1823 if (err != size) 1823 if (err != size)
@@ -1832,7 +1832,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1832 int rv = 0; 1832 int rv = 0;
1833 1833
1834 mutex_lock(&drbd_main_mutex); 1834 mutex_lock(&drbd_main_mutex);
1835 spin_lock_irqsave(&device->tconn->req_lock, flags); 1835 spin_lock_irqsave(&device->connection->req_lock, flags);
1836 /* to have a stable device->state.role 1836 /* to have a stable device->state.role
1837 * and no race with updating open_cnt */ 1837 * and no race with updating open_cnt */
1838 1838
@@ -1845,7 +1845,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1845 1845
1846 if (!rv) 1846 if (!rv)
1847 device->open_cnt++; 1847 device->open_cnt++;
1848 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 1848 spin_unlock_irqrestore(&device->connection->req_lock, flags);
1849 mutex_unlock(&drbd_main_mutex); 1849 mutex_unlock(&drbd_main_mutex);
1850 1850
1851 return rv; 1851 return rv;
@@ -1950,9 +1950,9 @@ void drbd_init_set_defaults(struct drbd_device *device)
1950void drbd_device_cleanup(struct drbd_device *device) 1950void drbd_device_cleanup(struct drbd_device *device)
1951{ 1951{
1952 int i; 1952 int i;
1953 if (device->tconn->receiver.t_state != NONE) 1953 if (device->connection->receiver.t_state != NONE)
1954 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 1954 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1955 device->tconn->receiver.t_state); 1955 device->connection->receiver.t_state);
1956 1956
1957 device->al_writ_cnt = 1957 device->al_writ_cnt =
1958 device->bm_writ_cnt = 1958 device->bm_writ_cnt =
@@ -1970,7 +1970,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1970 device->rs_mark_left[i] = 0; 1970 device->rs_mark_left[i] = 0;
1971 device->rs_mark_time[i] = 0; 1971 device->rs_mark_time[i] = 0;
1972 } 1972 }
1973 D_ASSERT(device->tconn->net_conf == NULL); 1973 D_ASSERT(device->connection->net_conf == NULL);
1974 1974
1975 drbd_set_my_capacity(device, 0); 1975 drbd_set_my_capacity(device, 0);
1976 if (device->bitmap) { 1976 if (device->bitmap) {
@@ -1990,7 +1990,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1990 D_ASSERT(list_empty(&device->read_ee)); 1990 D_ASSERT(list_empty(&device->read_ee));
1991 D_ASSERT(list_empty(&device->net_ee)); 1991 D_ASSERT(list_empty(&device->net_ee));
1992 D_ASSERT(list_empty(&device->resync_reads)); 1992 D_ASSERT(list_empty(&device->resync_reads));
1993 D_ASSERT(list_empty(&device->tconn->sender_work.q)); 1993 D_ASSERT(list_empty(&device->connection->sender_work.q));
1994 D_ASSERT(list_empty(&device->resync_work.list)); 1994 D_ASSERT(list_empty(&device->resync_work.list));
1995 D_ASSERT(list_empty(&device->unplug_work.list)); 1995 D_ASSERT(list_empty(&device->unplug_work.list));
1996 D_ASSERT(list_empty(&device->go_diskless.list)); 1996 D_ASSERT(list_empty(&device->go_diskless.list));
@@ -2159,7 +2159,7 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
2159void drbd_minor_destroy(struct kref *kref) 2159void drbd_minor_destroy(struct kref *kref)
2160{ 2160{
2161 struct drbd_device *device = container_of(kref, struct drbd_device, kref); 2161 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2162 struct drbd_tconn *tconn = device->tconn; 2162 struct drbd_connection *connection = device->connection;
2163 2163
2164 del_timer_sync(&device->request_timer); 2164 del_timer_sync(&device->request_timer);
2165 2165
@@ -2192,7 +2192,7 @@ void drbd_minor_destroy(struct kref *kref)
2192 kfree(device->rs_plan_s); 2192 kfree(device->rs_plan_s);
2193 kfree(device); 2193 kfree(device);
2194 2194
2195 kref_put(&tconn->kref, &conn_destroy); 2195 kref_put(&connection->kref, &conn_destroy);
2196} 2196}
2197 2197
2198/* One global retry thread, if we need to push back some bio and have it 2198/* One global retry thread, if we need to push back some bio and have it
@@ -2278,7 +2278,7 @@ static void drbd_cleanup(void)
2278{ 2278{
2279 unsigned int i; 2279 unsigned int i;
2280 struct drbd_device *device; 2280 struct drbd_device *device;
2281 struct drbd_tconn *tconn, *tmp; 2281 struct drbd_connection *connection, *tmp;
2282 2282
2283 unregister_reboot_notifier(&drbd_notifier); 2283 unregister_reboot_notifier(&drbd_notifier);
2284 2284
@@ -2300,7 +2300,7 @@ static void drbd_cleanup(void)
2300 2300
2301 idr_for_each_entry(&minors, device, i) { 2301 idr_for_each_entry(&minors, device, i) {
2302 idr_remove(&minors, device_to_minor(device)); 2302 idr_remove(&minors, device_to_minor(device));
2303 idr_remove(&device->tconn->volumes, device->vnr); 2303 idr_remove(&device->connection->volumes, device->vnr);
2304 destroy_workqueue(device->submit.wq); 2304 destroy_workqueue(device->submit.wq);
2305 del_gendisk(device->vdisk); 2305 del_gendisk(device->vdisk);
2306 /* synchronize_rcu(); No other threads running at this point */ 2306 /* synchronize_rcu(); No other threads running at this point */
@@ -2308,10 +2308,10 @@ static void drbd_cleanup(void)
2308 } 2308 }
2309 2309
2310 /* not _rcu since, no other updater anymore. Genl already unregistered */ 2310 /* not _rcu since, no other updater anymore. Genl already unregistered */
2311 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { 2311 list_for_each_entry_safe(connection, tmp, &drbd_connections, connections) {
2312 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */ 2312 list_del(&connection->connections); /* not _rcu no proc, not other threads */
2313 /* synchronize_rcu(); */ 2313 /* synchronize_rcu(); */
2314 kref_put(&tconn->kref, &conn_destroy); 2314 kref_put(&connection->kref, &conn_destroy);
2315 } 2315 }
2316 2316
2317 drbd_destroy_mempools(); 2317 drbd_destroy_mempools();
@@ -2343,7 +2343,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
2343 goto out; 2343 goto out;
2344 } 2344 }
2345 2345
2346 if (test_bit(CALLBACK_PENDING, &device->tconn->flags)) { 2346 if (test_bit(CALLBACK_PENDING, &device->connection->flags)) {
2347 r |= (1 << BDI_async_congested); 2347 r |= (1 << BDI_async_congested);
2348 /* Without good local data, we would need to read from remote, 2348 /* Without good local data, we would need to read from remote,
2349 * and that would need the worker thread as well, which is 2349 * and that would need the worker thread as well, which is
@@ -2367,7 +2367,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
2367 reason = 'b'; 2367 reason = 'b';
2368 } 2368 }
2369 2369
2370 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &device->tconn->flags)) { 2370 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &device->connection->flags)) {
2371 r |= (1 << BDI_async_congested); 2371 r |= (1 << BDI_async_congested);
2372 reason = reason == 'b' ? 'a' : 'n'; 2372 reason = reason == 'b' ? 'a' : 'n';
2373 } 2373 }
@@ -2384,45 +2384,45 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq)
2384 init_waitqueue_head(&wq->q_wait); 2384 init_waitqueue_head(&wq->q_wait);
2385} 2385}
2386 2386
2387struct drbd_tconn *conn_get_by_name(const char *name) 2387struct drbd_connection *conn_get_by_name(const char *name)
2388{ 2388{
2389 struct drbd_tconn *tconn; 2389 struct drbd_connection *connection;
2390 2390
2391 if (!name || !name[0]) 2391 if (!name || !name[0])
2392 return NULL; 2392 return NULL;
2393 2393
2394 rcu_read_lock(); 2394 rcu_read_lock();
2395 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { 2395 list_for_each_entry_rcu(connection, &drbd_connections, connections) {
2396 if (!strcmp(tconn->name, name)) { 2396 if (!strcmp(connection->name, name)) {
2397 kref_get(&tconn->kref); 2397 kref_get(&connection->kref);
2398 goto found; 2398 goto found;
2399 } 2399 }
2400 } 2400 }
2401 tconn = NULL; 2401 connection = NULL;
2402found: 2402found:
2403 rcu_read_unlock(); 2403 rcu_read_unlock();
2404 return tconn; 2404 return connection;
2405} 2405}
2406 2406
2407struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, 2407struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2408 void *peer_addr, int peer_addr_len) 2408 void *peer_addr, int peer_addr_len)
2409{ 2409{
2410 struct drbd_tconn *tconn; 2410 struct drbd_connection *connection;
2411 2411
2412 rcu_read_lock(); 2412 rcu_read_lock();
2413 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { 2413 list_for_each_entry_rcu(connection, &drbd_connections, connections) {
2414 if (tconn->my_addr_len == my_addr_len && 2414 if (connection->my_addr_len == my_addr_len &&
2415 tconn->peer_addr_len == peer_addr_len && 2415 connection->peer_addr_len == peer_addr_len &&
2416 !memcmp(&tconn->my_addr, my_addr, my_addr_len) && 2416 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2417 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) { 2417 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2418 kref_get(&tconn->kref); 2418 kref_get(&connection->kref);
2419 goto found; 2419 goto found;
2420 } 2420 }
2421 } 2421 }
2422 tconn = NULL; 2422 connection = NULL;
2423found: 2423found:
2424 rcu_read_unlock(); 2424 rcu_read_unlock();
2425 return tconn; 2425 return connection;
2426} 2426}
2427 2427
2428static int drbd_alloc_socket(struct drbd_socket *socket) 2428static int drbd_alloc_socket(struct drbd_socket *socket)
@@ -2442,28 +2442,28 @@ static void drbd_free_socket(struct drbd_socket *socket)
2442 free_page((unsigned long) socket->rbuf); 2442 free_page((unsigned long) socket->rbuf);
2443} 2443}
2444 2444
2445void conn_free_crypto(struct drbd_tconn *tconn) 2445void conn_free_crypto(struct drbd_connection *connection)
2446{ 2446{
2447 drbd_free_sock(tconn); 2447 drbd_free_sock(connection);
2448 2448
2449 crypto_free_hash(tconn->csums_tfm); 2449 crypto_free_hash(connection->csums_tfm);
2450 crypto_free_hash(tconn->verify_tfm); 2450 crypto_free_hash(connection->verify_tfm);
2451 crypto_free_hash(tconn->cram_hmac_tfm); 2451 crypto_free_hash(connection->cram_hmac_tfm);
2452 crypto_free_hash(tconn->integrity_tfm); 2452 crypto_free_hash(connection->integrity_tfm);
2453 crypto_free_hash(tconn->peer_integrity_tfm); 2453 crypto_free_hash(connection->peer_integrity_tfm);
2454 kfree(tconn->int_dig_in); 2454 kfree(connection->int_dig_in);
2455 kfree(tconn->int_dig_vv); 2455 kfree(connection->int_dig_vv);
2456 2456
2457 tconn->csums_tfm = NULL; 2457 connection->csums_tfm = NULL;
2458 tconn->verify_tfm = NULL; 2458 connection->verify_tfm = NULL;
2459 tconn->cram_hmac_tfm = NULL; 2459 connection->cram_hmac_tfm = NULL;
2460 tconn->integrity_tfm = NULL; 2460 connection->integrity_tfm = NULL;
2461 tconn->peer_integrity_tfm = NULL; 2461 connection->peer_integrity_tfm = NULL;
2462 tconn->int_dig_in = NULL; 2462 connection->int_dig_in = NULL;
2463 tconn->int_dig_vv = NULL; 2463 connection->int_dig_vv = NULL;
2464} 2464}
2465 2465
2466int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts) 2466int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts)
2467{ 2467{
2468 cpumask_var_t new_cpu_mask; 2468 cpumask_var_t new_cpu_mask;
2469 int err; 2469 int err;
@@ -2481,18 +2481,18 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2481 err = bitmap_parse(res_opts->cpu_mask, 32, 2481 err = bitmap_parse(res_opts->cpu_mask, 32,
2482 cpumask_bits(new_cpu_mask), nr_cpu_ids); 2482 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2483 if (err) { 2483 if (err) {
2484 conn_warn(tconn, "bitmap_parse() failed with %d\n", err); 2484 conn_warn(connection, "bitmap_parse() failed with %d\n", err);
2485 /* retcode = ERR_CPU_MASK_PARSE; */ 2485 /* retcode = ERR_CPU_MASK_PARSE; */
2486 goto fail; 2486 goto fail;
2487 } 2487 }
2488 } 2488 }
2489 tconn->res_opts = *res_opts; 2489 connection->res_opts = *res_opts;
2490 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) { 2490 if (!cpumask_equal(connection->cpu_mask, new_cpu_mask)) {
2491 cpumask_copy(tconn->cpu_mask, new_cpu_mask); 2491 cpumask_copy(connection->cpu_mask, new_cpu_mask);
2492 drbd_calc_cpu_mask(tconn); 2492 drbd_calc_cpu_mask(connection);
2493 tconn->receiver.reset_cpu_mask = 1; 2493 connection->receiver.reset_cpu_mask = 1;
2494 tconn->asender.reset_cpu_mask = 1; 2494 connection->asender.reset_cpu_mask = 1;
2495 tconn->worker.reset_cpu_mask = 1; 2495 connection->worker.reset_cpu_mask = 1;
2496 } 2496 }
2497 err = 0; 2497 err = 0;
2498 2498
@@ -2503,92 +2503,92 @@ fail:
2503} 2503}
2504 2504
2505/* caller must be under genl_lock() */ 2505/* caller must be under genl_lock() */
2506struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts) 2506struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2507{ 2507{
2508 struct drbd_tconn *tconn; 2508 struct drbd_connection *connection;
2509 2509
2510 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL); 2510 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2511 if (!tconn) 2511 if (!connection)
2512 return NULL; 2512 return NULL;
2513 2513
2514 tconn->name = kstrdup(name, GFP_KERNEL); 2514 connection->name = kstrdup(name, GFP_KERNEL);
2515 if (!tconn->name) 2515 if (!connection->name)
2516 goto fail; 2516 goto fail;
2517 2517
2518 if (drbd_alloc_socket(&tconn->data)) 2518 if (drbd_alloc_socket(&connection->data))
2519 goto fail; 2519 goto fail;
2520 if (drbd_alloc_socket(&tconn->meta)) 2520 if (drbd_alloc_socket(&connection->meta))
2521 goto fail; 2521 goto fail;
2522 2522
2523 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL)) 2523 if (!zalloc_cpumask_var(&connection->cpu_mask, GFP_KERNEL))
2524 goto fail; 2524 goto fail;
2525 2525
2526 if (set_resource_options(tconn, res_opts)) 2526 if (set_resource_options(connection, res_opts))
2527 goto fail; 2527 goto fail;
2528 2528
2529 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); 2529 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2530 if (!tconn->current_epoch) 2530 if (!connection->current_epoch)
2531 goto fail; 2531 goto fail;
2532 2532
2533 INIT_LIST_HEAD(&tconn->transfer_log); 2533 INIT_LIST_HEAD(&connection->transfer_log);
2534 2534
2535 INIT_LIST_HEAD(&tconn->current_epoch->list); 2535 INIT_LIST_HEAD(&connection->current_epoch->list);
2536 tconn->epochs = 1; 2536 connection->epochs = 1;
2537 spin_lock_init(&tconn->epoch_lock); 2537 spin_lock_init(&connection->epoch_lock);
2538 tconn->write_ordering = WO_bdev_flush; 2538 connection->write_ordering = WO_bdev_flush;
2539 2539
2540 tconn->send.seen_any_write_yet = false; 2540 connection->send.seen_any_write_yet = false;
2541 tconn->send.current_epoch_nr = 0; 2541 connection->send.current_epoch_nr = 0;
2542 tconn->send.current_epoch_writes = 0; 2542 connection->send.current_epoch_writes = 0;
2543 2543
2544 tconn->cstate = C_STANDALONE; 2544 connection->cstate = C_STANDALONE;
2545 mutex_init(&tconn->cstate_mutex); 2545 mutex_init(&connection->cstate_mutex);
2546 spin_lock_init(&tconn->req_lock); 2546 spin_lock_init(&connection->req_lock);
2547 mutex_init(&tconn->conf_update); 2547 mutex_init(&connection->conf_update);
2548 init_waitqueue_head(&tconn->ping_wait); 2548 init_waitqueue_head(&connection->ping_wait);
2549 idr_init(&tconn->volumes); 2549 idr_init(&connection->volumes);
2550 2550
2551 drbd_init_workqueue(&tconn->sender_work); 2551 drbd_init_workqueue(&connection->sender_work);
2552 mutex_init(&tconn->data.mutex); 2552 mutex_init(&connection->data.mutex);
2553 mutex_init(&tconn->meta.mutex); 2553 mutex_init(&connection->meta.mutex);
2554 2554
2555 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver"); 2555 drbd_thread_init(connection, &connection->receiver, drbdd_init, "receiver");
2556 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker"); 2556 drbd_thread_init(connection, &connection->worker, drbd_worker, "worker");
2557 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender"); 2557 drbd_thread_init(connection, &connection->asender, drbd_asender, "asender");
2558 2558
2559 kref_init(&tconn->kref); 2559 kref_init(&connection->kref);
2560 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns); 2560 list_add_tail_rcu(&connection->connections, &drbd_connections);
2561 2561
2562 return tconn; 2562 return connection;
2563 2563
2564fail: 2564fail:
2565 kfree(tconn->current_epoch); 2565 kfree(connection->current_epoch);
2566 free_cpumask_var(tconn->cpu_mask); 2566 free_cpumask_var(connection->cpu_mask);
2567 drbd_free_socket(&tconn->meta); 2567 drbd_free_socket(&connection->meta);
2568 drbd_free_socket(&tconn->data); 2568 drbd_free_socket(&connection->data);
2569 kfree(tconn->name); 2569 kfree(connection->name);
2570 kfree(tconn); 2570 kfree(connection);
2571 2571
2572 return NULL; 2572 return NULL;
2573} 2573}
2574 2574
2575void conn_destroy(struct kref *kref) 2575void conn_destroy(struct kref *kref)
2576{ 2576{
2577 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref); 2577 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2578 2578
2579 if (atomic_read(&tconn->current_epoch->epoch_size) != 0) 2579 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2580 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size)); 2580 conn_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2581 kfree(tconn->current_epoch); 2581 kfree(connection->current_epoch);
2582 2582
2583 idr_destroy(&tconn->volumes); 2583 idr_destroy(&connection->volumes);
2584 2584
2585 free_cpumask_var(tconn->cpu_mask); 2585 free_cpumask_var(connection->cpu_mask);
2586 drbd_free_socket(&tconn->meta); 2586 drbd_free_socket(&connection->meta);
2587 drbd_free_socket(&tconn->data); 2587 drbd_free_socket(&connection->data);
2588 kfree(tconn->name); 2588 kfree(connection->name);
2589 kfree(tconn->int_dig_in); 2589 kfree(connection->int_dig_in);
2590 kfree(tconn->int_dig_vv); 2590 kfree(connection->int_dig_vv);
2591 kfree(tconn); 2591 kfree(connection);
2592} 2592}
2593 2593
2594static int init_submitter(struct drbd_device *device) 2594static int init_submitter(struct drbd_device *device)
@@ -2606,7 +2606,7 @@ static int init_submitter(struct drbd_device *device)
2606 return 0; 2606 return 0;
2607} 2607}
2608 2608
2609enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) 2609enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr)
2610{ 2610{
2611 struct drbd_device *device; 2611 struct drbd_device *device;
2612 struct gendisk *disk; 2612 struct gendisk *disk;
@@ -2624,8 +2624,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2624 if (!device) 2624 if (!device)
2625 return ERR_NOMEM; 2625 return ERR_NOMEM;
2626 2626
2627 kref_get(&tconn->kref); 2627 kref_get(&connection->kref);
2628 device->tconn = tconn; 2628 device->connection = connection;
2629 2629
2630 device->minor = minor; 2630 device->minor = minor;
2631 device->vnr = vnr; 2631 device->vnr = vnr;
@@ -2666,7 +2666,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2666 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 2666 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2667 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 2667 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2668 blk_queue_merge_bvec(q, drbd_merge_bvec); 2668 blk_queue_merge_bvec(q, drbd_merge_bvec);
2669 q->queue_lock = &device->tconn->req_lock; /* needed since we use */ 2669 q->queue_lock = &device->connection->req_lock; /* needed since we use */
2670 2670
2671 device->md_io_page = alloc_page(GFP_KERNEL); 2671 device->md_io_page = alloc_page(GFP_KERNEL);
2672 if (!device->md_io_page) 2672 if (!device->md_io_page)
@@ -2686,7 +2686,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2686 goto out_no_minor_idr; 2686 goto out_no_minor_idr;
2687 } 2687 }
2688 2688
2689 vnr_got = idr_alloc(&tconn->volumes, device, vnr, vnr + 1, GFP_KERNEL); 2689 vnr_got = idr_alloc(&connection->volumes, device, vnr, vnr + 1, GFP_KERNEL);
2690 if (vnr_got < 0) { 2690 if (vnr_got < 0) {
2691 if (vnr_got == -ENOSPC) { 2691 if (vnr_got == -ENOSPC) {
2692 err = ERR_INVALID_REQUEST; 2692 err = ERR_INVALID_REQUEST;
@@ -2705,14 +2705,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
2705 kref_init(&device->kref); /* one ref for both idrs and the the add_disk */ 2705 kref_init(&device->kref); /* one ref for both idrs and the the add_disk */
2706 2706
2707 /* inherit the connection state */ 2707 /* inherit the connection state */
2708 device->state.conn = tconn->cstate; 2708 device->state.conn = connection->cstate;
2709 if (device->state.conn == C_WF_REPORT_PARAMS) 2709 if (device->state.conn == C_WF_REPORT_PARAMS)
2710 drbd_connected(device); 2710 drbd_connected(device);
2711 2711
2712 return NO_ERROR; 2712 return NO_ERROR;
2713 2713
2714out_idr_remove_vol: 2714out_idr_remove_vol:
2715 idr_remove(&tconn->volumes, vnr_got); 2715 idr_remove(&connection->volumes, vnr_got);
2716out_idr_remove_minor: 2716out_idr_remove_minor:
2717 idr_remove(&minors, minor_got); 2717 idr_remove(&minors, minor_got);
2718 synchronize_rcu(); 2718 synchronize_rcu();
@@ -2726,7 +2726,7 @@ out_no_disk:
2726 blk_cleanup_queue(q); 2726 blk_cleanup_queue(q);
2727out_no_q: 2727out_no_q:
2728 kfree(device); 2728 kfree(device);
2729 kref_put(&tconn->kref, &conn_destroy); 2729 kref_put(&connection->kref, &conn_destroy);
2730 return err; 2730 return err;
2731} 2731}
2732 2732
@@ -2763,7 +2763,7 @@ int __init drbd_init(void)
2763 idr_init(&minors); 2763 idr_init(&minors);
2764 2764
2765 rwlock_init(&global_state_lock); 2765 rwlock_init(&global_state_lock);
2766 INIT_LIST_HEAD(&drbd_tconns); 2766 INIT_LIST_HEAD(&drbd_connections);
2767 2767
2768 err = drbd_genl_register(); 2768 err = drbd_genl_register();
2769 if (err) { 2769 if (err) {
@@ -2821,33 +2821,33 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
2821 kfree(ldev); 2821 kfree(ldev);
2822} 2822}
2823 2823
2824void drbd_free_sock(struct drbd_tconn *tconn) 2824void drbd_free_sock(struct drbd_connection *connection)
2825{ 2825{
2826 if (tconn->data.socket) { 2826 if (connection->data.socket) {
2827 mutex_lock(&tconn->data.mutex); 2827 mutex_lock(&connection->data.mutex);
2828 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); 2828 kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
2829 sock_release(tconn->data.socket); 2829 sock_release(connection->data.socket);
2830 tconn->data.socket = NULL; 2830 connection->data.socket = NULL;
2831 mutex_unlock(&tconn->data.mutex); 2831 mutex_unlock(&connection->data.mutex);
2832 } 2832 }
2833 if (tconn->meta.socket) { 2833 if (connection->meta.socket) {
2834 mutex_lock(&tconn->meta.mutex); 2834 mutex_lock(&connection->meta.mutex);
2835 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); 2835 kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
2836 sock_release(tconn->meta.socket); 2836 sock_release(connection->meta.socket);
2837 tconn->meta.socket = NULL; 2837 connection->meta.socket = NULL;
2838 mutex_unlock(&tconn->meta.mutex); 2838 mutex_unlock(&connection->meta.mutex);
2839 } 2839 }
2840} 2840}
2841 2841
2842/* meta data management */ 2842/* meta data management */
2843 2843
2844void conn_md_sync(struct drbd_tconn *tconn) 2844void conn_md_sync(struct drbd_connection *connection)
2845{ 2845{
2846 struct drbd_device *device; 2846 struct drbd_device *device;
2847 int vnr; 2847 int vnr;
2848 2848
2849 rcu_read_lock(); 2849 rcu_read_lock();
2850 idr_for_each_entry(&tconn->volumes, device, vnr) { 2850 idr_for_each_entry(&connection->volumes, device, vnr) {
2851 kref_get(&device->kref); 2851 kref_get(&device->kref);
2852 rcu_read_unlock(); 2852 rcu_read_unlock();
2853 drbd_md_sync(device); 2853 drbd_md_sync(device);
@@ -3172,14 +3172,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3172 3172
3173 rv = NO_ERROR; 3173 rv = NO_ERROR;
3174 3174
3175 spin_lock_irq(&device->tconn->req_lock); 3175 spin_lock_irq(&device->connection->req_lock);
3176 if (device->state.conn < C_CONNECTED) { 3176 if (device->state.conn < C_CONNECTED) {
3177 unsigned int peer; 3177 unsigned int peer;
3178 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3178 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3179 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); 3179 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3180 device->peer_max_bio_size = peer; 3180 device->peer_max_bio_size = peer;
3181 } 3181 }
3182 spin_unlock_irq(&device->tconn->req_lock); 3182 spin_unlock_irq(&device->connection->req_lock);
3183 3183
3184 err: 3184 err:
3185 drbd_md_put_buffer(device); 3185 drbd_md_put_buffer(device);
@@ -3454,7 +3454,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3454 void (*done)(struct drbd_device *, int), 3454 void (*done)(struct drbd_device *, int),
3455 char *why, enum bm_flag flags) 3455 char *why, enum bm_flag flags)
3456{ 3456{
3457 D_ASSERT(current == device->tconn->worker.task); 3457 D_ASSERT(current == device->connection->worker.task);
3458 3458
3459 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags)); 3459 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
3460 D_ASSERT(!test_bit(BITMAP_IO, &device->flags)); 3460 D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
@@ -3468,13 +3468,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3468 device->bm_io_work.why = why; 3468 device->bm_io_work.why = why;
3469 device->bm_io_work.flags = flags; 3469 device->bm_io_work.flags = flags;
3470 3470
3471 spin_lock_irq(&device->tconn->req_lock); 3471 spin_lock_irq(&device->connection->req_lock);
3472 set_bit(BITMAP_IO, &device->flags); 3472 set_bit(BITMAP_IO, &device->flags);
3473 if (atomic_read(&device->ap_bio_cnt) == 0) { 3473 if (atomic_read(&device->ap_bio_cnt) == 0) {
3474 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 3474 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3475 drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w); 3475 drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w);
3476 } 3476 }
3477 spin_unlock_irq(&device->tconn->req_lock); 3477 spin_unlock_irq(&device->connection->req_lock);
3478} 3478}
3479 3479
3480/** 3480/**
@@ -3491,7 +3491,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
3491{ 3491{
3492 int rv; 3492 int rv;
3493 3493
3494 D_ASSERT(current != device->tconn->worker.task); 3494 D_ASSERT(current != device->connection->worker.task);
3495 3495
3496 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3496 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3497 drbd_suspend_io(device); 3497 drbd_suspend_io(device);
@@ -3532,7 +3532,7 @@ static void md_sync_timer_fn(unsigned long data)
3532 3532
3533 /* must not double-queue! */ 3533 /* must not double-queue! */
3534 if (list_empty(&device->md_sync_work.list)) 3534 if (list_empty(&device->md_sync_work.list))
3535 drbd_queue_work_front(&device->tconn->sender_work, &device->md_sync_work); 3535 drbd_queue_work_front(&device->connection->sender_work, &device->md_sync_work);
3536} 3536}
3537 3537
3538static int w_md_sync(struct drbd_work *w, int unused) 3538static int w_md_sync(struct drbd_work *w, int unused)
@@ -3631,7 +3631,7 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3631 long timeout; 3631 long timeout;
3632 3632
3633 rcu_read_lock(); 3633 rcu_read_lock();
3634 nc = rcu_dereference(device->tconn->net_conf); 3634 nc = rcu_dereference(device->connection->net_conf);
3635 if (!nc) { 3635 if (!nc) {
3636 rcu_read_unlock(); 3636 rcu_read_unlock();
3637 return -ETIMEDOUT; 3637 return -ETIMEDOUT;
@@ -3642,10 +3642,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3642 /* Indicate to wake up device->misc_wait on progress. */ 3642 /* Indicate to wake up device->misc_wait on progress. */
3643 i->waiting = true; 3643 i->waiting = true;
3644 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); 3644 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3645 spin_unlock_irq(&device->tconn->req_lock); 3645 spin_unlock_irq(&device->connection->req_lock);
3646 timeout = schedule_timeout(timeout); 3646 timeout = schedule_timeout(timeout);
3647 finish_wait(&device->misc_wait, &wait); 3647 finish_wait(&device->misc_wait, &wait);
3648 spin_lock_irq(&device->tconn->req_lock); 3648 spin_lock_irq(&device->connection->req_lock);
3649 if (!timeout || device->state.conn < C_CONNECTED) 3649 if (!timeout || device->state.conn < C_CONNECTED)
3650 return -ETIMEDOUT; 3650 return -ETIMEDOUT;
3651 if (signal_pending(current)) 3651 if (signal_pending(current))
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index bc160ae80798..1b5b7ea7f7cc 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -104,7 +104,7 @@ static struct drbd_config_context {
104 struct drbd_genlmsghdr *reply_dh; 104 struct drbd_genlmsghdr *reply_dh;
105 /* resolved from attributes, if possible */ 105 /* resolved from attributes, if possible */
106 struct drbd_device *device; 106 struct drbd_device *device;
107 struct drbd_tconn *tconn; 107 struct drbd_connection *connection;
108} adm_ctx; 108} adm_ctx;
109 109
110static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) 110static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -203,9 +203,9 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; 203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; 204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr && 205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) || 206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
207 (adm_ctx.peer_addr && 207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) { 208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
209 err = -EINVAL; 209 err = -EINVAL;
210 goto fail; 210 goto fail;
211 } 211 }
@@ -213,19 +213,19 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
213 213
214 adm_ctx.minor = d_in->minor; 214 adm_ctx.minor = d_in->minor;
215 adm_ctx.device = minor_to_device(d_in->minor); 215 adm_ctx.device = minor_to_device(d_in->minor);
216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name); 216 adm_ctx.connection = conn_get_by_name(adm_ctx.resource_name);
217 217
218 if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) { 218 if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor"); 219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID; 220 return ERR_MINOR_INVALID;
221 } 221 }
222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) { 222 if (!adm_ctx.connection && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource"); 223 drbd_msg_put_info("unknown resource");
224 return ERR_INVALID_REQUEST; 224 return ERR_INVALID_REQUEST;
225 } 225 }
226 226
227 if (flags & DRBD_ADM_NEED_CONNECTION) { 227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) { 228 if (adm_ctx.connection && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected"); 229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST; 230 return ERR_INVALID_REQUEST;
231 } 231 }
@@ -234,22 +234,22 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
234 return ERR_INVALID_REQUEST; 234 return ERR_INVALID_REQUEST;
235 } 235 }
236 if (adm_ctx.my_addr && adm_ctx.peer_addr) 236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr), 237 adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr), 238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr), 239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr)); 240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) { 241 if (!adm_ctx.connection) {
242 drbd_msg_put_info("unknown connection"); 242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST; 243 return ERR_INVALID_REQUEST;
244 } 244 }
245 } 245 }
246 246
247 /* some more paranoia, if the request was over-determined */ 247 /* some more paranoia, if the request was over-determined */
248 if (adm_ctx.device && adm_ctx.tconn && 248 if (adm_ctx.device && adm_ctx.connection &&
249 adm_ctx.device->tconn != adm_ctx.tconn) { 249 adm_ctx.device->connection != adm_ctx.connection) {
250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n", 250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
251 adm_ctx.minor, adm_ctx.resource_name, 251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.device->tconn->name); 252 adm_ctx.device->connection->name);
253 drbd_msg_put_info("minor exists in different resource"); 253 drbd_msg_put_info("minor exists in different resource");
254 return ERR_INVALID_REQUEST; 254 return ERR_INVALID_REQUEST;
255 } 255 }
@@ -258,7 +258,7 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
258 adm_ctx.volume != adm_ctx.device->vnr) { 258 adm_ctx.volume != adm_ctx.device->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", 259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume, 260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.device->vnr, adm_ctx.device->tconn->name); 261 adm_ctx.device->vnr, adm_ctx.device->connection->name);
262 drbd_msg_put_info("minor exists as different volume"); 262 drbd_msg_put_info("minor exists as different volume");
263 return ERR_INVALID_REQUEST; 263 return ERR_INVALID_REQUEST;
264 } 264 }
@@ -273,9 +273,9 @@ fail:
273 273
274static int drbd_adm_finish(struct genl_info *info, int retcode) 274static int drbd_adm_finish(struct genl_info *info, int retcode)
275{ 275{
276 if (adm_ctx.tconn) { 276 if (adm_ctx.connection) {
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy); 277 kref_put(&adm_ctx.connection->kref, &conn_destroy);
278 adm_ctx.tconn = NULL; 278 adm_ctx.connection = NULL;
279 } 279 }
280 280
281 if (!adm_ctx.reply_skb) 281 if (!adm_ctx.reply_skb)
@@ -286,29 +286,29 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
286 return 0; 286 return 0;
287} 287}
288 288
289static void setup_khelper_env(struct drbd_tconn *tconn, char **envp) 289static void setup_khelper_env(struct drbd_connection *connection, char **envp)
290{ 290{
291 char *afs; 291 char *afs;
292 292
293 /* FIXME: A future version will not allow this case. */ 293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0) 294 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
295 return; 295 return;
296 296
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) { 297 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
298 case AF_INET6: 298 case AF_INET6:
299 afs = "ipv6"; 299 afs = "ipv6";
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6", 300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr); 301 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
302 break; 302 break;
303 case AF_INET: 303 case AF_INET:
304 afs = "ipv4"; 304 afs = "ipv4";
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", 305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); 306 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
307 break; 307 break;
308 default: 308 default:
309 afs = "ssocks"; 309 afs = "ssocks";
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", 310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); 311 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
312 } 312 }
313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs); 313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
314} 314}
@@ -323,15 +323,15 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
323 NULL }; 323 NULL };
324 char mb[12]; 324 char mb[12];
325 char *argv[] = {usermode_helper, cmd, mb, NULL }; 325 char *argv[] = {usermode_helper, cmd, mb, NULL };
326 struct drbd_tconn *tconn = device->tconn; 326 struct drbd_connection *connection = device->connection;
327 struct sib_info sib; 327 struct sib_info sib;
328 int ret; 328 int ret;
329 329
330 if (current == tconn->worker.task) 330 if (current == connection->worker.task)
331 set_bit(CALLBACK_PENDING, &tconn->flags); 331 set_bit(CALLBACK_PENDING, &connection->flags);
332 332
333 snprintf(mb, 12, "minor-%d", device_to_minor(device)); 333 snprintf(mb, 12, "minor-%d", device_to_minor(device));
334 setup_khelper_env(tconn, envp); 334 setup_khelper_env(connection, envp);
335 335
336 /* The helper may take some time. 336 /* The helper may take some time.
337 * write out any unsynced meta data changes now */ 337 * write out any unsynced meta data changes now */
@@ -354,8 +354,8 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
354 sib.helper_exit_code = ret; 354 sib.helper_exit_code = ret;
355 drbd_bcast_event(device, &sib); 355 drbd_bcast_event(device, &sib);
356 356
357 if (current == tconn->worker.task) 357 if (current == connection->worker.task)
358 clear_bit(CALLBACK_PENDING, &tconn->flags); 358 clear_bit(CALLBACK_PENDING, &connection->flags);
359 359
360 if (ret < 0) /* Ignore any ERRNOs we got. */ 360 if (ret < 0) /* Ignore any ERRNOs we got. */
361 ret = 0; 361 ret = 0;
@@ -363,7 +363,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
363 return ret; 363 return ret;
364} 364}
365 365
366static int conn_khelper(struct drbd_tconn *tconn, char *cmd) 366static int conn_khelper(struct drbd_connection *connection, char *cmd)
367{ 367{
368 char *envp[] = { "HOME=/", 368 char *envp[] = { "HOME=/",
369 "TERM=linux", 369 "TERM=linux",
@@ -371,23 +371,23 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd)
371 (char[20]) { }, /* address family */ 371 (char[20]) { }, /* address family */
372 (char[60]) { }, /* address */ 372 (char[60]) { }, /* address */
373 NULL }; 373 NULL };
374 char *argv[] = {usermode_helper, cmd, tconn->name, NULL }; 374 char *argv[] = {usermode_helper, cmd, connection->name, NULL };
375 int ret; 375 int ret;
376 376
377 setup_khelper_env(tconn, envp); 377 setup_khelper_env(connection, envp);
378 conn_md_sync(tconn); 378 conn_md_sync(connection);
379 379
380 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name); 380 conn_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, connection->name);
381 /* TODO: conn_bcast_event() ?? */ 381 /* TODO: conn_bcast_event() ?? */
382 382
383 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 383 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
384 if (ret) 384 if (ret)
385 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", 385 conn_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
386 usermode_helper, cmd, tconn->name, 386 usermode_helper, cmd, connection->name,
387 (ret >> 8) & 0xff, ret); 387 (ret >> 8) & 0xff, ret);
388 else 388 else
389 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", 389 conn_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
390 usermode_helper, cmd, tconn->name, 390 usermode_helper, cmd, connection->name,
391 (ret >> 8) & 0xff, ret); 391 (ret >> 8) & 0xff, ret);
392 /* TODO: conn_bcast_event() ?? */ 392 /* TODO: conn_bcast_event() ?? */
393 393
@@ -397,14 +397,14 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd)
397 return ret; 397 return ret;
398} 398}
399 399
400static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) 400static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
401{ 401{
402 enum drbd_fencing_p fp = FP_NOT_AVAIL; 402 enum drbd_fencing_p fp = FP_NOT_AVAIL;
403 struct drbd_device *device; 403 struct drbd_device *device;
404 int vnr; 404 int vnr;
405 405
406 rcu_read_lock(); 406 rcu_read_lock();
407 idr_for_each_entry(&tconn->volumes, device, vnr) { 407 idr_for_each_entry(&connection->volumes, device, vnr) {
408 if (get_ldev_if_state(device, D_CONSISTENT)) { 408 if (get_ldev_if_state(device, D_CONSISTENT)) {
409 fp = max_t(enum drbd_fencing_p, fp, 409 fp = max_t(enum drbd_fencing_p, fp,
410 rcu_dereference(device->ldev->disk_conf)->fencing); 410 rcu_dereference(device->ldev->disk_conf)->fencing);
@@ -416,7 +416,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
416 return fp; 416 return fp;
417} 417}
418 418
419bool conn_try_outdate_peer(struct drbd_tconn *tconn) 419bool conn_try_outdate_peer(struct drbd_connection *connection)
420{ 420{
421 unsigned int connect_cnt; 421 unsigned int connect_cnt;
422 union drbd_state mask = { }; 422 union drbd_state mask = { };
@@ -425,26 +425,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
425 char *ex_to_string; 425 char *ex_to_string;
426 int r; 426 int r;
427 427
428 if (tconn->cstate >= C_WF_REPORT_PARAMS) { 428 if (connection->cstate >= C_WF_REPORT_PARAMS) {
429 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n"); 429 conn_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
430 return false; 430 return false;
431 } 431 }
432 432
433 spin_lock_irq(&tconn->req_lock); 433 spin_lock_irq(&connection->req_lock);
434 connect_cnt = tconn->connect_cnt; 434 connect_cnt = connection->connect_cnt;
435 spin_unlock_irq(&tconn->req_lock); 435 spin_unlock_irq(&connection->req_lock);
436 436
437 fp = highest_fencing_policy(tconn); 437 fp = highest_fencing_policy(connection);
438 switch (fp) { 438 switch (fp) {
439 case FP_NOT_AVAIL: 439 case FP_NOT_AVAIL:
440 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n"); 440 conn_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
441 goto out; 441 goto out;
442 case FP_DONT_CARE: 442 case FP_DONT_CARE:
443 return true; 443 return true;
444 default: ; 444 default: ;
445 } 445 }
446 446
447 r = conn_khelper(tconn, "fence-peer"); 447 r = conn_khelper(connection, "fence-peer");
448 448
449 switch ((r>>8) & 0xff) { 449 switch ((r>>8) & 0xff) {
450 case 3: /* peer is inconsistent */ 450 case 3: /* peer is inconsistent */
@@ -458,7 +458,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
458 val.pdsk = D_OUTDATED; 458 val.pdsk = D_OUTDATED;
459 break; 459 break;
460 case 5: /* peer was down */ 460 case 5: /* peer was down */
461 if (conn_highest_disk(tconn) == D_UP_TO_DATE) { 461 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
462 /* we will(have) create(d) a new UUID anyways... */ 462 /* we will(have) create(d) a new UUID anyways... */
463 ex_to_string = "peer is unreachable, assumed to be dead"; 463 ex_to_string = "peer is unreachable, assumed to be dead";
464 mask.pdsk = D_MASK; 464 mask.pdsk = D_MASK;
@@ -471,65 +471,65 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
471 * This is useful when an unconnected R_SECONDARY is asked to 471 * This is useful when an unconnected R_SECONDARY is asked to
472 * become R_PRIMARY, but finds the other peer being active. */ 472 * become R_PRIMARY, but finds the other peer being active. */
473 ex_to_string = "peer is active"; 473 ex_to_string = "peer is active";
474 conn_warn(tconn, "Peer is primary, outdating myself.\n"); 474 conn_warn(connection, "Peer is primary, outdating myself.\n");
475 mask.disk = D_MASK; 475 mask.disk = D_MASK;
476 val.disk = D_OUTDATED; 476 val.disk = D_OUTDATED;
477 break; 477 break;
478 case 7: 478 case 7:
479 if (fp != FP_STONITH) 479 if (fp != FP_STONITH)
480 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n"); 480 conn_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
481 ex_to_string = "peer was stonithed"; 481 ex_to_string = "peer was stonithed";
482 mask.pdsk = D_MASK; 482 mask.pdsk = D_MASK;
483 val.pdsk = D_OUTDATED; 483 val.pdsk = D_OUTDATED;
484 break; 484 break;
485 default: 485 default:
486 /* The script is broken ... */ 486 /* The script is broken ... */
487 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 487 conn_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
488 return false; /* Eventually leave IO frozen */ 488 return false; /* Eventually leave IO frozen */
489 } 489 }
490 490
491 conn_info(tconn, "fence-peer helper returned %d (%s)\n", 491 conn_info(connection, "fence-peer helper returned %d (%s)\n",
492 (r>>8) & 0xff, ex_to_string); 492 (r>>8) & 0xff, ex_to_string);
493 493
494 out: 494 out:
495 495
496 /* Not using 496 /* Not using
497 conn_request_state(tconn, mask, val, CS_VERBOSE); 497 conn_request_state(connection, mask, val, CS_VERBOSE);
498 here, because we might were able to re-establish the connection in the 498 here, because we might were able to re-establish the connection in the
499 meantime. */ 499 meantime. */
500 spin_lock_irq(&tconn->req_lock); 500 spin_lock_irq(&connection->req_lock);
501 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) { 501 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
502 if (tconn->connect_cnt != connect_cnt) 502 if (connection->connect_cnt != connect_cnt)
503 /* In case the connection was established and droped 503 /* In case the connection was established and droped
504 while the fence-peer handler was running, ignore it */ 504 while the fence-peer handler was running, ignore it */
505 conn_info(tconn, "Ignoring fence-peer exit code\n"); 505 conn_info(connection, "Ignoring fence-peer exit code\n");
506 else 506 else
507 _conn_request_state(tconn, mask, val, CS_VERBOSE); 507 _conn_request_state(connection, mask, val, CS_VERBOSE);
508 } 508 }
509 spin_unlock_irq(&tconn->req_lock); 509 spin_unlock_irq(&connection->req_lock);
510 510
511 return conn_highest_pdsk(tconn) <= D_OUTDATED; 511 return conn_highest_pdsk(connection) <= D_OUTDATED;
512} 512}
513 513
514static int _try_outdate_peer_async(void *data) 514static int _try_outdate_peer_async(void *data)
515{ 515{
516 struct drbd_tconn *tconn = (struct drbd_tconn *)data; 516 struct drbd_connection *connection = (struct drbd_connection *)data;
517 517
518 conn_try_outdate_peer(tconn); 518 conn_try_outdate_peer(connection);
519 519
520 kref_put(&tconn->kref, &conn_destroy); 520 kref_put(&connection->kref, &conn_destroy);
521 return 0; 521 return 0;
522} 522}
523 523
524void conn_try_outdate_peer_async(struct drbd_tconn *tconn) 524void conn_try_outdate_peer_async(struct drbd_connection *connection)
525{ 525{
526 struct task_struct *opa; 526 struct task_struct *opa;
527 527
528 kref_get(&tconn->kref); 528 kref_get(&connection->kref);
529 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h"); 529 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
530 if (IS_ERR(opa)) { 530 if (IS_ERR(opa)) {
531 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n"); 531 conn_err(connection, "out of mem, failed to invoke fence-peer helper\n");
532 kref_put(&tconn->kref, &conn_destroy); 532 kref_put(&connection->kref, &conn_destroy);
533 } 533 }
534} 534}
535 535
@@ -544,7 +544,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
544 union drbd_state mask, val; 544 union drbd_state mask, val;
545 545
546 if (new_role == R_PRIMARY) 546 if (new_role == R_PRIMARY)
547 request_ping(device->tconn); /* Detect a dead peer ASAP */ 547 request_ping(device->connection); /* Detect a dead peer ASAP */
548 548
549 mutex_lock(device->state_mutex); 549 mutex_lock(device->state_mutex);
550 550
@@ -575,7 +575,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
575 device->state.disk == D_CONSISTENT && mask.pdsk == 0) { 575 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
576 D_ASSERT(device->state.pdsk == D_UNKNOWN); 576 D_ASSERT(device->state.pdsk == D_UNKNOWN);
577 577
578 if (conn_try_outdate_peer(device->tconn)) { 578 if (conn_try_outdate_peer(device->connection)) {
579 val.disk = D_UP_TO_DATE; 579 val.disk = D_UP_TO_DATE;
580 mask.disk = D_MASK; 580 mask.disk = D_MASK;
581 } 581 }
@@ -585,7 +585,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
585 if (rv == SS_NOTHING_TO_DO) 585 if (rv == SS_NOTHING_TO_DO)
586 goto out; 586 goto out;
587 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 587 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
588 if (!conn_try_outdate_peer(device->tconn) && force) { 588 if (!conn_try_outdate_peer(device->connection) && force) {
589 dev_warn(DEV, "Forced into split brain situation!\n"); 589 dev_warn(DEV, "Forced into split brain situation!\n");
590 mask.pdsk = D_MASK; 590 mask.pdsk = D_MASK;
591 val.pdsk = D_OUTDATED; 591 val.pdsk = D_OUTDATED;
@@ -598,7 +598,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
598 retry at most once more in this case. */ 598 retry at most once more in this case. */
599 int timeo; 599 int timeo;
600 rcu_read_lock(); 600 rcu_read_lock();
601 nc = rcu_dereference(device->tconn->net_conf); 601 nc = rcu_dereference(device->connection->net_conf);
602 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; 602 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
603 rcu_read_unlock(); 603 rcu_read_unlock();
604 schedule_timeout_interruptible(timeo); 604 schedule_timeout_interruptible(timeo);
@@ -633,11 +633,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
633 put_ldev(device); 633 put_ldev(device);
634 } 634 }
635 } else { 635 } else {
636 mutex_lock(&device->tconn->conf_update); 636 mutex_lock(&device->connection->conf_update);
637 nc = device->tconn->net_conf; 637 nc = device->connection->net_conf;
638 if (nc) 638 if (nc)
639 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 639 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
640 mutex_unlock(&device->tconn->conf_update); 640 mutex_unlock(&device->connection->conf_update);
641 641
642 set_disk_ro(device->vdisk, false); 642 set_disk_ro(device->vdisk, false);
643 if (get_ldev(device)) { 643 if (get_ldev(device)) {
@@ -1134,12 +1134,12 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1134 Because new from 8.3.8 onwards the peer can use multiple 1134 Because new from 8.3.8 onwards the peer can use multiple
1135 BIOs for a single peer_request */ 1135 BIOs for a single peer_request */
1136 if (device->state.conn >= C_WF_REPORT_PARAMS) { 1136 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1137 if (device->tconn->agreed_pro_version < 94) 1137 if (device->connection->agreed_pro_version < 94)
1138 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); 1138 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1139 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ 1139 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1140 else if (device->tconn->agreed_pro_version == 94) 1140 else if (device->connection->agreed_pro_version == 94)
1141 peer = DRBD_MAX_SIZE_H80_PACKET; 1141 peer = DRBD_MAX_SIZE_H80_PACKET;
1142 else if (device->tconn->agreed_pro_version < 100) 1142 else if (device->connection->agreed_pro_version < 100)
1143 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ 1143 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1144 else 1144 else
1145 peer = DRBD_MAX_BIO_SIZE; 1145 peer = DRBD_MAX_BIO_SIZE;
@@ -1157,25 +1157,25 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1157} 1157}
1158 1158
1159/* Starts the worker thread */ 1159/* Starts the worker thread */
1160static void conn_reconfig_start(struct drbd_tconn *tconn) 1160static void conn_reconfig_start(struct drbd_connection *connection)
1161{ 1161{
1162 drbd_thread_start(&tconn->worker); 1162 drbd_thread_start(&connection->worker);
1163 conn_flush_workqueue(tconn); 1163 conn_flush_workqueue(connection);
1164} 1164}
1165 1165
1166/* if still unconfigured, stops worker again. */ 1166/* if still unconfigured, stops worker again. */
1167static void conn_reconfig_done(struct drbd_tconn *tconn) 1167static void conn_reconfig_done(struct drbd_connection *connection)
1168{ 1168{
1169 bool stop_threads; 1169 bool stop_threads;
1170 spin_lock_irq(&tconn->req_lock); 1170 spin_lock_irq(&connection->req_lock);
1171 stop_threads = conn_all_vols_unconf(tconn) && 1171 stop_threads = conn_all_vols_unconf(connection) &&
1172 tconn->cstate == C_STANDALONE; 1172 connection->cstate == C_STANDALONE;
1173 spin_unlock_irq(&tconn->req_lock); 1173 spin_unlock_irq(&connection->req_lock);
1174 if (stop_threads) { 1174 if (stop_threads) {
1175 /* asender is implicitly stopped by receiver 1175 /* asender is implicitly stopped by receiver
1176 * in conn_disconnect() */ 1176 * in conn_disconnect() */
1177 drbd_thread_stop(&tconn->receiver); 1177 drbd_thread_stop(&connection->receiver);
1178 drbd_thread_stop(&tconn->worker); 1178 drbd_thread_stop(&connection->worker);
1179 } 1179 }
1180} 1180}
1181 1181
@@ -1190,10 +1190,10 @@ static void drbd_suspend_al(struct drbd_device *device)
1190 } 1190 }
1191 1191
1192 drbd_al_shrink(device); 1192 drbd_al_shrink(device);
1193 spin_lock_irq(&device->tconn->req_lock); 1193 spin_lock_irq(&device->connection->req_lock);
1194 if (device->state.conn < C_CONNECTED) 1194 if (device->state.conn < C_CONNECTED)
1195 s = !test_and_set_bit(AL_SUSPENDED, &device->flags); 1195 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1196 spin_unlock_irq(&device->tconn->req_lock); 1196 spin_unlock_irq(&device->connection->req_lock);
1197 lc_unlock(device->act_log); 1197 lc_unlock(device->act_log);
1198 1198
1199 if (s) 1199 if (s)
@@ -1264,7 +1264,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1264 goto fail; 1264 goto fail;
1265 } 1265 }
1266 1266
1267 mutex_lock(&device->tconn->conf_update); 1267 mutex_lock(&device->connection->conf_update);
1268 old_disk_conf = device->ldev->disk_conf; 1268 old_disk_conf = device->ldev->disk_conf;
1269 *new_disk_conf = *old_disk_conf; 1269 *new_disk_conf = *old_disk_conf;
1270 if (should_set_defaults(info)) 1270 if (should_set_defaults(info))
@@ -1327,7 +1327,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1327 rcu_assign_pointer(device->rs_plan_s, new_plan); 1327 rcu_assign_pointer(device->rs_plan_s, new_plan);
1328 } 1328 }
1329 1329
1330 mutex_unlock(&device->tconn->conf_update); 1330 mutex_unlock(&device->connection->conf_update);
1331 1331
1332 if (new_disk_conf->al_updates) 1332 if (new_disk_conf->al_updates)
1333 device->ldev->md.flags &= ~MDF_AL_DISABLED; 1333 device->ldev->md.flags &= ~MDF_AL_DISABLED;
@@ -1339,7 +1339,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1339 else 1339 else
1340 set_bit(MD_NO_FUA, &device->flags); 1340 set_bit(MD_NO_FUA, &device->flags);
1341 1341
1342 drbd_bump_write_ordering(device->tconn, WO_bdev_flush); 1342 drbd_bump_write_ordering(device->connection, WO_bdev_flush);
1343 1343
1344 drbd_md_sync(device); 1344 drbd_md_sync(device);
1345 1345
@@ -1353,7 +1353,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1353 goto success; 1353 goto success;
1354 1354
1355fail_unlock: 1355fail_unlock:
1356 mutex_unlock(&device->tconn->conf_update); 1356 mutex_unlock(&device->connection->conf_update);
1357 fail: 1357 fail:
1358 kfree(new_disk_conf); 1358 kfree(new_disk_conf);
1359 kfree(new_plan); 1359 kfree(new_plan);
@@ -1388,7 +1388,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1388 goto finish; 1388 goto finish;
1389 1389
1390 device = adm_ctx.device; 1390 device = adm_ctx.device;
1391 conn_reconfig_start(device->tconn); 1391 conn_reconfig_start(device->connection);
1392 1392
1393 /* if you want to reconfigure, please tear down first */ 1393 /* if you want to reconfigure, please tear down first */
1394 if (device->state.disk > D_DISKLESS) { 1394 if (device->state.disk > D_DISKLESS) {
@@ -1455,7 +1455,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1455 goto fail; 1455 goto fail;
1456 1456
1457 rcu_read_lock(); 1457 rcu_read_lock();
1458 nc = rcu_dereference(device->tconn->net_conf); 1458 nc = rcu_dereference(device->connection->net_conf);
1459 if (nc) { 1459 if (nc) {
1460 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { 1460 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1461 rcu_read_unlock(); 1461 rcu_read_unlock();
@@ -1636,7 +1636,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1636 new_disk_conf = NULL; 1636 new_disk_conf = NULL;
1637 new_plan = NULL; 1637 new_plan = NULL;
1638 1638
1639 drbd_bump_write_ordering(device->tconn, WO_bdev_flush); 1639 drbd_bump_write_ordering(device->connection, WO_bdev_flush);
1640 1640
1641 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) 1641 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1642 set_bit(CRASHED_PRIMARY, &device->flags); 1642 set_bit(CRASHED_PRIMARY, &device->flags);
@@ -1644,7 +1644,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1644 clear_bit(CRASHED_PRIMARY, &device->flags); 1644 clear_bit(CRASHED_PRIMARY, &device->flags);
1645 1645
1646 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && 1646 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1647 !(device->state.role == R_PRIMARY && device->tconn->susp_nod)) 1647 !(device->state.role == R_PRIMARY && device->connection->susp_nod))
1648 set_bit(CRASHED_PRIMARY, &device->flags); 1648 set_bit(CRASHED_PRIMARY, &device->flags);
1649 1649
1650 device->send_cnt = 0; 1650 device->send_cnt = 0;
@@ -1702,7 +1702,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1702 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) 1702 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1703 drbd_suspend_al(device); /* IO is still suspended here... */ 1703 drbd_suspend_al(device); /* IO is still suspended here... */
1704 1704
1705 spin_lock_irq(&device->tconn->req_lock); 1705 spin_lock_irq(&device->connection->req_lock);
1706 os = drbd_read_state(device); 1706 os = drbd_read_state(device);
1707 ns = os; 1707 ns = os;
1708 /* If MDF_CONSISTENT is not set go into inconsistent state, 1708 /* If MDF_CONSISTENT is not set go into inconsistent state,
@@ -1754,7 +1754,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1754 } 1754 }
1755 1755
1756 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); 1756 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1757 spin_unlock_irq(&device->tconn->req_lock); 1757 spin_unlock_irq(&device->connection->req_lock);
1758 1758
1759 if (rv < SS_SUCCESS) 1759 if (rv < SS_SUCCESS)
1760 goto force_diskless_dec; 1760 goto force_diskless_dec;
@@ -1771,7 +1771,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1771 1771
1772 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 1772 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1773 put_ldev(device); 1773 put_ldev(device);
1774 conn_reconfig_done(device->tconn); 1774 conn_reconfig_done(device->connection);
1775 drbd_adm_finish(info, retcode); 1775 drbd_adm_finish(info, retcode);
1776 return 0; 1776 return 0;
1777 1777
@@ -1781,7 +1781,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1781 drbd_force_state(device, NS(disk, D_DISKLESS)); 1781 drbd_force_state(device, NS(disk, D_DISKLESS));
1782 drbd_md_sync(device); 1782 drbd_md_sync(device);
1783 fail: 1783 fail:
1784 conn_reconfig_done(device->tconn); 1784 conn_reconfig_done(device->connection);
1785 if (nbc) { 1785 if (nbc) {
1786 if (nbc->backing_bdev) 1786 if (nbc->backing_bdev)
1787 blkdev_put(nbc->backing_bdev, 1787 blkdev_put(nbc->backing_bdev,
@@ -1860,14 +1860,14 @@ out:
1860 return 0; 1860 return 0;
1861} 1861}
1862 1862
1863static bool conn_resync_running(struct drbd_tconn *tconn) 1863static bool conn_resync_running(struct drbd_connection *connection)
1864{ 1864{
1865 struct drbd_device *device; 1865 struct drbd_device *device;
1866 bool rv = false; 1866 bool rv = false;
1867 int vnr; 1867 int vnr;
1868 1868
1869 rcu_read_lock(); 1869 rcu_read_lock();
1870 idr_for_each_entry(&tconn->volumes, device, vnr) { 1870 idr_for_each_entry(&connection->volumes, device, vnr) {
1871 if (device->state.conn == C_SYNC_SOURCE || 1871 if (device->state.conn == C_SYNC_SOURCE ||
1872 device->state.conn == C_SYNC_TARGET || 1872 device->state.conn == C_SYNC_TARGET ||
1873 device->state.conn == C_PAUSED_SYNC_S || 1873 device->state.conn == C_PAUSED_SYNC_S ||
@@ -1881,14 +1881,14 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
1881 return rv; 1881 return rv;
1882} 1882}
1883 1883
1884static bool conn_ov_running(struct drbd_tconn *tconn) 1884static bool conn_ov_running(struct drbd_connection *connection)
1885{ 1885{
1886 struct drbd_device *device; 1886 struct drbd_device *device;
1887 bool rv = false; 1887 bool rv = false;
1888 int vnr; 1888 int vnr;
1889 1889
1890 rcu_read_lock(); 1890 rcu_read_lock();
1891 idr_for_each_entry(&tconn->volumes, device, vnr) { 1891 idr_for_each_entry(&connection->volumes, device, vnr) {
1892 if (device->state.conn == C_VERIFY_S || 1892 if (device->state.conn == C_VERIFY_S ||
1893 device->state.conn == C_VERIFY_T) { 1893 device->state.conn == C_VERIFY_T) {
1894 rv = true; 1894 rv = true;
@@ -1901,12 +1901,12 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
1901} 1901}
1902 1902
1903static enum drbd_ret_code 1903static enum drbd_ret_code
1904_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf) 1904_check_net_options(struct drbd_connection *connection, struct net_conf *old_conf, struct net_conf *new_conf)
1905{ 1905{
1906 struct drbd_device *device; 1906 struct drbd_device *device;
1907 int i; 1907 int i;
1908 1908
1909 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) { 1909 if (old_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
1910 if (new_conf->wire_protocol != old_conf->wire_protocol) 1910 if (new_conf->wire_protocol != old_conf->wire_protocol)
1911 return ERR_NEED_APV_100; 1911 return ERR_NEED_APV_100;
1912 1912
@@ -1918,15 +1918,15 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
1918 } 1918 }
1919 1919
1920 if (!new_conf->two_primaries && 1920 if (!new_conf->two_primaries &&
1921 conn_highest_role(tconn) == R_PRIMARY && 1921 conn_highest_role(connection) == R_PRIMARY &&
1922 conn_highest_peer(tconn) == R_PRIMARY) 1922 conn_highest_peer(connection) == R_PRIMARY)
1923 return ERR_NEED_ALLOW_TWO_PRI; 1923 return ERR_NEED_ALLOW_TWO_PRI;
1924 1924
1925 if (new_conf->two_primaries && 1925 if (new_conf->two_primaries &&
1926 (new_conf->wire_protocol != DRBD_PROT_C)) 1926 (new_conf->wire_protocol != DRBD_PROT_C))
1927 return ERR_NOT_PROTO_C; 1927 return ERR_NOT_PROTO_C;
1928 1928
1929 idr_for_each_entry(&tconn->volumes, device, i) { 1929 idr_for_each_entry(&connection->volumes, device, i) {
1930 if (get_ldev(device)) { 1930 if (get_ldev(device)) {
1931 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing; 1931 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
1932 put_ldev(device); 1932 put_ldev(device);
@@ -1944,18 +1944,18 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
1944} 1944}
1945 1945
1946static enum drbd_ret_code 1946static enum drbd_ret_code
1947check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf) 1947check_net_options(struct drbd_connection *connection, struct net_conf *new_conf)
1948{ 1948{
1949 static enum drbd_ret_code rv; 1949 static enum drbd_ret_code rv;
1950 struct drbd_device *device; 1950 struct drbd_device *device;
1951 int i; 1951 int i;
1952 1952
1953 rcu_read_lock(); 1953 rcu_read_lock();
1954 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf); 1954 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_conf);
1955 rcu_read_unlock(); 1955 rcu_read_unlock();
1956 1956
1957 /* tconn->volumes protected by genl_lock() here */ 1957 /* connection->volumes protected by genl_lock() here */
1958 idr_for_each_entry(&tconn->volumes, device, i) { 1958 idr_for_each_entry(&connection->volumes, device, i) {
1959 if (!device->bitmap) { 1959 if (!device->bitmap) {
1960 if (drbd_bm_init(device)) 1960 if (drbd_bm_init(device))
1961 return ERR_NOMEM; 1961 return ERR_NOMEM;
@@ -2027,7 +2027,7 @@ static void free_crypto(struct crypto *crypto)
2027int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) 2027int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2028{ 2028{
2029 enum drbd_ret_code retcode; 2029 enum drbd_ret_code retcode;
2030 struct drbd_tconn *tconn; 2030 struct drbd_connection *connection;
2031 struct net_conf *old_conf, *new_conf = NULL; 2031 struct net_conf *old_conf, *new_conf = NULL;
2032 int err; 2032 int err;
2033 int ovr; /* online verify running */ 2033 int ovr; /* online verify running */
@@ -2040,7 +2040,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2040 if (retcode != NO_ERROR) 2040 if (retcode != NO_ERROR)
2041 goto out; 2041 goto out;
2042 2042
2043 tconn = adm_ctx.tconn; 2043 connection = adm_ctx.connection;
2044 2044
2045 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 2045 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2046 if (!new_conf) { 2046 if (!new_conf) {
@@ -2048,11 +2048,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2048 goto out; 2048 goto out;
2049 } 2049 }
2050 2050
2051 conn_reconfig_start(tconn); 2051 conn_reconfig_start(connection);
2052 2052
2053 mutex_lock(&tconn->data.mutex); 2053 mutex_lock(&connection->data.mutex);
2054 mutex_lock(&tconn->conf_update); 2054 mutex_lock(&connection->conf_update);
2055 old_conf = tconn->net_conf; 2055 old_conf = connection->net_conf;
2056 2056
2057 if (!old_conf) { 2057 if (!old_conf) {
2058 drbd_msg_put_info("net conf missing, try connect"); 2058 drbd_msg_put_info("net conf missing, try connect");
@@ -2071,19 +2071,19 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2071 goto fail; 2071 goto fail;
2072 } 2072 }
2073 2073
2074 retcode = check_net_options(tconn, new_conf); 2074 retcode = check_net_options(connection, new_conf);
2075 if (retcode != NO_ERROR) 2075 if (retcode != NO_ERROR)
2076 goto fail; 2076 goto fail;
2077 2077
2078 /* re-sync running */ 2078 /* re-sync running */
2079 rsr = conn_resync_running(tconn); 2079 rsr = conn_resync_running(connection);
2080 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) { 2080 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
2081 retcode = ERR_CSUMS_RESYNC_RUNNING; 2081 retcode = ERR_CSUMS_RESYNC_RUNNING;
2082 goto fail; 2082 goto fail;
2083 } 2083 }
2084 2084
2085 /* online verify running */ 2085 /* online verify running */
2086 ovr = conn_ov_running(tconn); 2086 ovr = conn_ov_running(connection);
2087 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) { 2087 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
2088 retcode = ERR_VERIFY_RUNNING; 2088 retcode = ERR_VERIFY_RUNNING;
2089 goto fail; 2089 goto fail;
@@ -2093,45 +2093,45 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2093 if (retcode != NO_ERROR) 2093 if (retcode != NO_ERROR)
2094 goto fail; 2094 goto fail;
2095 2095
2096 rcu_assign_pointer(tconn->net_conf, new_conf); 2096 rcu_assign_pointer(connection->net_conf, new_conf);
2097 2097
2098 if (!rsr) { 2098 if (!rsr) {
2099 crypto_free_hash(tconn->csums_tfm); 2099 crypto_free_hash(connection->csums_tfm);
2100 tconn->csums_tfm = crypto.csums_tfm; 2100 connection->csums_tfm = crypto.csums_tfm;
2101 crypto.csums_tfm = NULL; 2101 crypto.csums_tfm = NULL;
2102 } 2102 }
2103 if (!ovr) { 2103 if (!ovr) {
2104 crypto_free_hash(tconn->verify_tfm); 2104 crypto_free_hash(connection->verify_tfm);
2105 tconn->verify_tfm = crypto.verify_tfm; 2105 connection->verify_tfm = crypto.verify_tfm;
2106 crypto.verify_tfm = NULL; 2106 crypto.verify_tfm = NULL;
2107 } 2107 }
2108 2108
2109 crypto_free_hash(tconn->integrity_tfm); 2109 crypto_free_hash(connection->integrity_tfm);
2110 tconn->integrity_tfm = crypto.integrity_tfm; 2110 connection->integrity_tfm = crypto.integrity_tfm;
2111 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100) 2111 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2112 /* Do this without trying to take tconn->data.mutex again. */ 2112 /* Do this without trying to take connection->data.mutex again. */
2113 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE); 2113 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2114 2114
2115 crypto_free_hash(tconn->cram_hmac_tfm); 2115 crypto_free_hash(connection->cram_hmac_tfm);
2116 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; 2116 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2117 2117
2118 mutex_unlock(&tconn->conf_update); 2118 mutex_unlock(&connection->conf_update);
2119 mutex_unlock(&tconn->data.mutex); 2119 mutex_unlock(&connection->data.mutex);
2120 synchronize_rcu(); 2120 synchronize_rcu();
2121 kfree(old_conf); 2121 kfree(old_conf);
2122 2122
2123 if (tconn->cstate >= C_WF_REPORT_PARAMS) 2123 if (connection->cstate >= C_WF_REPORT_PARAMS)
2124 drbd_send_sync_param(minor_to_device(conn_lowest_minor(tconn))); 2124 drbd_send_sync_param(minor_to_device(conn_lowest_minor(connection)));
2125 2125
2126 goto done; 2126 goto done;
2127 2127
2128 fail: 2128 fail:
2129 mutex_unlock(&tconn->conf_update); 2129 mutex_unlock(&connection->conf_update);
2130 mutex_unlock(&tconn->data.mutex); 2130 mutex_unlock(&connection->data.mutex);
2131 free_crypto(&crypto); 2131 free_crypto(&crypto);
2132 kfree(new_conf); 2132 kfree(new_conf);
2133 done: 2133 done:
2134 conn_reconfig_done(tconn); 2134 conn_reconfig_done(connection);
2135 out: 2135 out:
2136 drbd_adm_finish(info, retcode); 2136 drbd_adm_finish(info, retcode);
2137 return 0; 2137 return 0;
@@ -2142,7 +2142,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2142 struct drbd_device *device; 2142 struct drbd_device *device;
2143 struct net_conf *old_conf, *new_conf = NULL; 2143 struct net_conf *old_conf, *new_conf = NULL;
2144 struct crypto crypto = { }; 2144 struct crypto crypto = { };
2145 struct drbd_tconn *tconn; 2145 struct drbd_connection *connection;
2146 enum drbd_ret_code retcode; 2146 enum drbd_ret_code retcode;
2147 int i; 2147 int i;
2148 int err; 2148 int err;
@@ -2162,24 +2162,24 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2162 /* No need for _rcu here. All reconfiguration is 2162 /* No need for _rcu here. All reconfiguration is
2163 * strictly serialized on genl_lock(). We are protected against 2163 * strictly serialized on genl_lock(). We are protected against
2164 * concurrent reconfiguration/addition/deletion */ 2164 * concurrent reconfiguration/addition/deletion */
2165 list_for_each_entry(tconn, &drbd_tconns, all_tconn) { 2165 list_for_each_entry(connection, &drbd_connections, connections) {
2166 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len && 2166 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2167 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) { 2167 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr, connection->my_addr_len)) {
2168 retcode = ERR_LOCAL_ADDR; 2168 retcode = ERR_LOCAL_ADDR;
2169 goto out; 2169 goto out;
2170 } 2170 }
2171 2171
2172 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len && 2172 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2173 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) { 2173 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr, connection->peer_addr_len)) {
2174 retcode = ERR_PEER_ADDR; 2174 retcode = ERR_PEER_ADDR;
2175 goto out; 2175 goto out;
2176 } 2176 }
2177 } 2177 }
2178 2178
2179 tconn = adm_ctx.tconn; 2179 connection = adm_ctx.connection;
2180 conn_reconfig_start(tconn); 2180 conn_reconfig_start(connection);
2181 2181
2182 if (tconn->cstate > C_STANDALONE) { 2182 if (connection->cstate > C_STANDALONE) {
2183 retcode = ERR_NET_CONFIGURED; 2183 retcode = ERR_NET_CONFIGURED;
2184 goto fail; 2184 goto fail;
2185 } 2185 }
@@ -2200,7 +2200,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2200 goto fail; 2200 goto fail;
2201 } 2201 }
2202 2202
2203 retcode = check_net_options(tconn, new_conf); 2203 retcode = check_net_options(connection, new_conf);
2204 if (retcode != NO_ERROR) 2204 if (retcode != NO_ERROR)
2205 goto fail; 2205 goto fail;
2206 2206
@@ -2210,40 +2210,40 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2210 2210
2211 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 2211 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2212 2212
2213 conn_flush_workqueue(tconn); 2213 conn_flush_workqueue(connection);
2214 2214
2215 mutex_lock(&tconn->conf_update); 2215 mutex_lock(&connection->conf_update);
2216 old_conf = tconn->net_conf; 2216 old_conf = connection->net_conf;
2217 if (old_conf) { 2217 if (old_conf) {
2218 retcode = ERR_NET_CONFIGURED; 2218 retcode = ERR_NET_CONFIGURED;
2219 mutex_unlock(&tconn->conf_update); 2219 mutex_unlock(&connection->conf_update);
2220 goto fail; 2220 goto fail;
2221 } 2221 }
2222 rcu_assign_pointer(tconn->net_conf, new_conf); 2222 rcu_assign_pointer(connection->net_conf, new_conf);
2223 2223
2224 conn_free_crypto(tconn); 2224 conn_free_crypto(connection);
2225 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; 2225 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2226 tconn->integrity_tfm = crypto.integrity_tfm; 2226 connection->integrity_tfm = crypto.integrity_tfm;
2227 tconn->csums_tfm = crypto.csums_tfm; 2227 connection->csums_tfm = crypto.csums_tfm;
2228 tconn->verify_tfm = crypto.verify_tfm; 2228 connection->verify_tfm = crypto.verify_tfm;
2229 2229
2230 tconn->my_addr_len = nla_len(adm_ctx.my_addr); 2230 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2231 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len); 2231 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2232 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr); 2232 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2233 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len); 2233 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2234 2234
2235 mutex_unlock(&tconn->conf_update); 2235 mutex_unlock(&connection->conf_update);
2236 2236
2237 rcu_read_lock(); 2237 rcu_read_lock();
2238 idr_for_each_entry(&tconn->volumes, device, i) { 2238 idr_for_each_entry(&connection->volumes, device, i) {
2239 device->send_cnt = 0; 2239 device->send_cnt = 0;
2240 device->recv_cnt = 0; 2240 device->recv_cnt = 0;
2241 } 2241 }
2242 rcu_read_unlock(); 2242 rcu_read_unlock();
2243 2243
2244 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE); 2244 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2245 2245
2246 conn_reconfig_done(tconn); 2246 conn_reconfig_done(connection);
2247 drbd_adm_finish(info, retcode); 2247 drbd_adm_finish(info, retcode);
2248 return 0; 2248 return 0;
2249 2249
@@ -2251,17 +2251,17 @@ fail:
2251 free_crypto(&crypto); 2251 free_crypto(&crypto);
2252 kfree(new_conf); 2252 kfree(new_conf);
2253 2253
2254 conn_reconfig_done(tconn); 2254 conn_reconfig_done(connection);
2255out: 2255out:
2256 drbd_adm_finish(info, retcode); 2256 drbd_adm_finish(info, retcode);
2257 return 0; 2257 return 0;
2258} 2258}
2259 2259
2260static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force) 2260static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2261{ 2261{
2262 enum drbd_state_rv rv; 2262 enum drbd_state_rv rv;
2263 2263
2264 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 2264 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2265 force ? CS_HARD : 0); 2265 force ? CS_HARD : 0);
2266 2266
2267 switch (rv) { 2267 switch (rv) {
@@ -2271,18 +2271,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
2271 return SS_SUCCESS; 2271 return SS_SUCCESS;
2272 case SS_PRIMARY_NOP: 2272 case SS_PRIMARY_NOP:
2273 /* Our state checking code wants to see the peer outdated. */ 2273 /* Our state checking code wants to see the peer outdated. */
2274 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); 2274 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2275 2275
2276 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */ 2276 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2277 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE); 2277 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2278 2278
2279 break; 2279 break;
2280 case SS_CW_FAILED_BY_PEER: 2280 case SS_CW_FAILED_BY_PEER:
2281 /* The peer probably wants to see us outdated. */ 2281 /* The peer probably wants to see us outdated. */
2282 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, 2282 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2283 disk, D_OUTDATED), 0); 2283 disk, D_OUTDATED), 0);
2284 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) { 2284 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2285 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 2285 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2286 CS_HARD); 2286 CS_HARD);
2287 } 2287 }
2288 break; 2288 break;
@@ -2296,7 +2296,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
2296 * The state handling only uses drbd_thread_stop_nowait(), 2296 * The state handling only uses drbd_thread_stop_nowait(),
2297 * we want to really wait here until the receiver is no more. 2297 * we want to really wait here until the receiver is no more.
2298 */ 2298 */
2299 drbd_thread_stop(&adm_ctx.tconn->receiver); 2299 drbd_thread_stop(&adm_ctx.connection->receiver);
2300 2300
2301 /* Race breaker. This additional state change request may be 2301 /* Race breaker. This additional state change request may be
2302 * necessary, if this was a forced disconnect during a receiver 2302 * necessary, if this was a forced disconnect during a receiver
@@ -2304,10 +2304,10 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
2304 * after drbdd_init() returned. Typically, we should be 2304 * after drbdd_init() returned. Typically, we should be
2305 * C_STANDALONE already, now, and this becomes a no-op. 2305 * C_STANDALONE already, now, and this becomes a no-op.
2306 */ 2306 */
2307 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE), 2307 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2308 CS_VERBOSE | CS_HARD); 2308 CS_VERBOSE | CS_HARD);
2309 if (rv2 < SS_SUCCESS) 2309 if (rv2 < SS_SUCCESS)
2310 conn_err(tconn, 2310 conn_err(connection,
2311 "unexpected rv2=%d in conn_try_disconnect()\n", 2311 "unexpected rv2=%d in conn_try_disconnect()\n",
2312 rv2); 2312 rv2);
2313 } 2313 }
@@ -2317,7 +2317,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
2317int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) 2317int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2318{ 2318{
2319 struct disconnect_parms parms; 2319 struct disconnect_parms parms;
2320 struct drbd_tconn *tconn; 2320 struct drbd_connection *connection;
2321 enum drbd_state_rv rv; 2321 enum drbd_state_rv rv;
2322 enum drbd_ret_code retcode; 2322 enum drbd_ret_code retcode;
2323 int err; 2323 int err;
@@ -2328,7 +2328,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2328 if (retcode != NO_ERROR) 2328 if (retcode != NO_ERROR)
2329 goto fail; 2329 goto fail;
2330 2330
2331 tconn = adm_ctx.tconn; 2331 connection = adm_ctx.connection;
2332 memset(&parms, 0, sizeof(parms)); 2332 memset(&parms, 0, sizeof(parms));
2333 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { 2333 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2334 err = disconnect_parms_from_attrs(&parms, info); 2334 err = disconnect_parms_from_attrs(&parms, info);
@@ -2339,7 +2339,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2339 } 2339 }
2340 } 2340 }
2341 2341
2342 rv = conn_try_disconnect(tconn, parms.force_disconnect); 2342 rv = conn_try_disconnect(connection, parms.force_disconnect);
2343 if (rv < SS_SUCCESS) 2343 if (rv < SS_SUCCESS)
2344 retcode = rv; /* FIXME: Type mismatch. */ 2344 retcode = rv; /* FIXME: Type mismatch. */
2345 else 2345 else
@@ -2357,7 +2357,7 @@ void resync_after_online_grow(struct drbd_device *device)
2357 if (device->state.role != device->state.peer) 2357 if (device->state.role != device->state.peer)
2358 iass = (device->state.role == R_PRIMARY); 2358 iass = (device->state.role == R_PRIMARY);
2359 else 2359 else
2360 iass = test_bit(RESOLVE_CONFLICTS, &device->tconn->flags); 2360 iass = test_bit(RESOLVE_CONFLICTS, &device->connection->flags);
2361 2361
2362 if (iass) 2362 if (iass)
2363 drbd_start_resync(device, C_SYNC_SOURCE); 2363 drbd_start_resync(device, C_SYNC_SOURCE);
@@ -2412,7 +2412,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2412 goto fail_ldev; 2412 goto fail_ldev;
2413 } 2413 }
2414 2414
2415 if (rs.no_resync && device->tconn->agreed_pro_version < 93) { 2415 if (rs.no_resync && device->connection->agreed_pro_version < 93) {
2416 retcode = ERR_NEED_APV_93; 2416 retcode = ERR_NEED_APV_93;
2417 goto fail_ldev; 2417 goto fail_ldev;
2418 } 2418 }
@@ -2454,12 +2454,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2454 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 2454 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2455 2455
2456 if (new_disk_conf) { 2456 if (new_disk_conf) {
2457 mutex_lock(&device->tconn->conf_update); 2457 mutex_lock(&device->connection->conf_update);
2458 old_disk_conf = device->ldev->disk_conf; 2458 old_disk_conf = device->ldev->disk_conf;
2459 *new_disk_conf = *old_disk_conf; 2459 *new_disk_conf = *old_disk_conf;
2460 new_disk_conf->disk_size = (sector_t)rs.resize_size; 2460 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2461 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 2461 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2462 mutex_unlock(&device->tconn->conf_update); 2462 mutex_unlock(&device->connection->conf_update);
2463 synchronize_rcu(); 2463 synchronize_rcu();
2464 kfree(old_disk_conf); 2464 kfree(old_disk_conf);
2465 } 2465 }
@@ -2499,7 +2499,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2499int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) 2499int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2500{ 2500{
2501 enum drbd_ret_code retcode; 2501 enum drbd_ret_code retcode;
2502 struct drbd_tconn *tconn; 2502 struct drbd_connection *connection;
2503 struct res_opts res_opts; 2503 struct res_opts res_opts;
2504 int err; 2504 int err;
2505 2505
@@ -2508,9 +2508,9 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2508 return retcode; 2508 return retcode;
2509 if (retcode != NO_ERROR) 2509 if (retcode != NO_ERROR)
2510 goto fail; 2510 goto fail;
2511 tconn = adm_ctx.tconn; 2511 connection = adm_ctx.connection;
2512 2512
2513 res_opts = tconn->res_opts; 2513 res_opts = connection->res_opts;
2514 if (should_set_defaults(info)) 2514 if (should_set_defaults(info))
2515 set_res_opts_defaults(&res_opts); 2515 set_res_opts_defaults(&res_opts);
2516 2516
@@ -2521,7 +2521,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2521 goto fail; 2521 goto fail;
2522 } 2522 }
2523 2523
2524 err = set_resource_options(tconn, &res_opts); 2524 err = set_resource_options(connection, &res_opts);
2525 if (err) { 2525 if (err) {
2526 retcode = ERR_INVALID_REQUEST; 2526 retcode = ERR_INVALID_REQUEST;
2527 if (err == -ENOMEM) 2527 if (err == -ENOMEM)
@@ -2710,9 +2710,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2710 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 2710 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2711 if (retcode == SS_SUCCESS) { 2711 if (retcode == SS_SUCCESS) {
2712 if (device->state.conn < C_CONNECTED) 2712 if (device->state.conn < C_CONNECTED)
2713 tl_clear(device->tconn); 2713 tl_clear(device->connection);
2714 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED) 2714 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
2715 tl_restart(device->tconn, FAIL_FROZEN_DISK_IO); 2715 tl_restart(device->connection, FAIL_FROZEN_DISK_IO);
2716 } 2716 }
2717 drbd_resume_io(device); 2717 drbd_resume_io(device);
2718 2718
@@ -2726,8 +2726,7 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2726 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED)); 2726 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2727} 2727}
2728 2728
2729static int nla_put_drbd_cfg_context(struct sk_buff *skb, 2729static int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_connection *connection, unsigned vnr)
2730 struct drbd_tconn *tconn, unsigned vnr)
2731{ 2730{
2732 struct nlattr *nla; 2731 struct nlattr *nla;
2733 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); 2732 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
@@ -2736,13 +2735,13 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
2736 if (vnr != VOLUME_UNSPECIFIED && 2735 if (vnr != VOLUME_UNSPECIFIED &&
2737 nla_put_u32(skb, T_ctx_volume, vnr)) 2736 nla_put_u32(skb, T_ctx_volume, vnr))
2738 goto nla_put_failure; 2737 goto nla_put_failure;
2739 if (nla_put_string(skb, T_ctx_resource_name, tconn->name)) 2738 if (nla_put_string(skb, T_ctx_resource_name, connection->name))
2740 goto nla_put_failure; 2739 goto nla_put_failure;
2741 if (tconn->my_addr_len && 2740 if (connection->my_addr_len &&
2742 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr)) 2741 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
2743 goto nla_put_failure; 2742 goto nla_put_failure;
2744 if (tconn->peer_addr_len && 2743 if (connection->peer_addr_len &&
2745 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr)) 2744 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
2746 goto nla_put_failure; 2745 goto nla_put_failure;
2747 nla_nest_end(skb, nla); 2746 nla_nest_end(skb, nla);
2748 return 0; 2747 return 0;
@@ -2779,10 +2778,10 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2779 2778
2780 /* We need to add connection name and volume number information still. 2779 /* We need to add connection name and volume number information still.
2781 * Minor number is in drbd_genlmsghdr. */ 2780 * Minor number is in drbd_genlmsghdr. */
2782 if (nla_put_drbd_cfg_context(skb, device->tconn, device->vnr)) 2781 if (nla_put_drbd_cfg_context(skb, device->connection, device->vnr))
2783 goto nla_put_failure; 2782 goto nla_put_failure;
2784 2783
2785 if (res_opts_to_skb(skb, &device->tconn->res_opts, exclude_sensitive)) 2784 if (res_opts_to_skb(skb, &device->connection->res_opts, exclude_sensitive))
2786 goto nla_put_failure; 2785 goto nla_put_failure;
2787 2786
2788 rcu_read_lock(); 2787 rcu_read_lock();
@@ -2795,7 +2794,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2795 if (!err) { 2794 if (!err) {
2796 struct net_conf *nc; 2795 struct net_conf *nc;
2797 2796
2798 nc = rcu_dereference(device->tconn->net_conf); 2797 nc = rcu_dereference(device->connection->net_conf);
2799 if (nc) 2798 if (nc)
2800 err = net_conf_to_skb(skb, nc, exclude_sensitive); 2799 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2801 } 2800 }
@@ -2899,18 +2898,18 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2899{ 2898{
2900 struct drbd_device *device; 2899 struct drbd_device *device;
2901 struct drbd_genlmsghdr *dh; 2900 struct drbd_genlmsghdr *dh;
2902 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0]; 2901 struct drbd_connection *pos = (struct drbd_connection *)cb->args[0];
2903 struct drbd_tconn *tconn = NULL; 2902 struct drbd_connection *connection = NULL;
2904 struct drbd_tconn *tmp; 2903 struct drbd_connection *tmp;
2905 unsigned volume = cb->args[1]; 2904 unsigned volume = cb->args[1];
2906 2905
2907 /* Open coded, deferred, iteration: 2906 /* Open coded, deferred, iteration:
2908 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { 2907 * list_for_each_entry_safe(connection, tmp, &drbd_connections, connections) {
2909 * idr_for_each_entry(&tconn->volumes, device, i) { 2908 * idr_for_each_entry(&connection->volumes, device, i) {
2910 * ... 2909 * ...
2911 * } 2910 * }
2912 * } 2911 * }
2913 * where tconn is cb->args[0]; 2912 * where connection is cb->args[0];
2914 * and i is cb->args[1]; 2913 * and i is cb->args[1];
2915 * 2914 *
2916 * cb->args[2] indicates if we shall loop over all resources, 2915 * cb->args[2] indicates if we shall loop over all resources,
@@ -2927,36 +2926,36 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2927 /* synchronize with conn_create()/conn_destroy() */ 2926 /* synchronize with conn_create()/conn_destroy() */
2928 rcu_read_lock(); 2927 rcu_read_lock();
2929 /* revalidate iterator position */ 2928 /* revalidate iterator position */
2930 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) { 2929 list_for_each_entry_rcu(tmp, &drbd_connections, connections) {
2931 if (pos == NULL) { 2930 if (pos == NULL) {
2932 /* first iteration */ 2931 /* first iteration */
2933 pos = tmp; 2932 pos = tmp;
2934 tconn = pos; 2933 connection = pos;
2935 break; 2934 break;
2936 } 2935 }
2937 if (tmp == pos) { 2936 if (tmp == pos) {
2938 tconn = pos; 2937 connection = pos;
2939 break; 2938 break;
2940 } 2939 }
2941 } 2940 }
2942 if (tconn) { 2941 if (connection) {
2943next_tconn: 2942next_connection:
2944 device = idr_get_next(&tconn->volumes, &volume); 2943 device = idr_get_next(&connection->volumes, &volume);
2945 if (!device) { 2944 if (!device) {
2946 /* No more volumes to dump on this tconn. 2945 /* No more volumes to dump on this connection.
2947 * Advance tconn iterator. */ 2946 * Advance connection iterator. */
2948 pos = list_entry_rcu(tconn->all_tconn.next, 2947 pos = list_entry_rcu(connection->connections.next,
2949 struct drbd_tconn, all_tconn); 2948 struct drbd_connection, connections);
2950 /* Did we dump any volume on this tconn yet? */ 2949 /* Did we dump any volume on this connection yet? */
2951 if (volume != 0) { 2950 if (volume != 0) {
2952 /* If we reached the end of the list, 2951 /* If we reached the end of the list,
2953 * or only a single resource dump was requested, 2952 * or only a single resource dump was requested,
2954 * we are done. */ 2953 * we are done. */
2955 if (&pos->all_tconn == &drbd_tconns || cb->args[2]) 2954 if (&pos->connections == &drbd_connections || cb->args[2])
2956 goto out; 2955 goto out;
2957 volume = 0; 2956 volume = 0;
2958 tconn = pos; 2957 connection = pos;
2959 goto next_tconn; 2958 goto next_connection;
2960 } 2959 }
2961 } 2960 }
2962 2961
@@ -2967,22 +2966,22 @@ next_tconn:
2967 goto out; 2966 goto out;
2968 2967
2969 if (!device) { 2968 if (!device) {
2970 /* This is a tconn without a single volume. 2969 /* This is a connection without a single volume.
2971 * Suprisingly enough, it may have a network 2970 * Suprisingly enough, it may have a network
2972 * configuration. */ 2971 * configuration. */
2973 struct net_conf *nc; 2972 struct net_conf *nc;
2974 dh->minor = -1U; 2973 dh->minor = -1U;
2975 dh->ret_code = NO_ERROR; 2974 dh->ret_code = NO_ERROR;
2976 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED)) 2975 if (nla_put_drbd_cfg_context(skb, connection, VOLUME_UNSPECIFIED))
2977 goto cancel; 2976 goto cancel;
2978 nc = rcu_dereference(tconn->net_conf); 2977 nc = rcu_dereference(connection->net_conf);
2979 if (nc && net_conf_to_skb(skb, nc, 1) != 0) 2978 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2980 goto cancel; 2979 goto cancel;
2981 goto done; 2980 goto done;
2982 } 2981 }
2983 2982
2984 D_ASSERT(device->vnr == volume); 2983 D_ASSERT(device->vnr == volume);
2985 D_ASSERT(device->tconn == tconn); 2984 D_ASSERT(device->connection == connection);
2986 2985
2987 dh->minor = device_to_minor(device); 2986 dh->minor = device_to_minor(device);
2988 dh->ret_code = NO_ERROR; 2987 dh->ret_code = NO_ERROR;
@@ -2994,15 +2993,15 @@ cancel:
2994 } 2993 }
2995done: 2994done:
2996 genlmsg_end(skb, dh); 2995 genlmsg_end(skb, dh);
2997 } 2996 }
2998 2997
2999out: 2998out:
3000 rcu_read_unlock(); 2999 rcu_read_unlock();
3001 /* where to start the next iteration */ 3000 /* where to start the next iteration */
3002 cb->args[0] = (long)pos; 3001 cb->args[0] = (long)pos;
3003 cb->args[1] = (pos == tconn) ? volume + 1 : 0; 3002 cb->args[1] = (pos == connection) ? volume + 1 : 0;
3004 3003
3005 /* No more tconns/volumes/minors found results in an empty skb. 3004 /* No more connections/volumes/minors found results in an empty skb.
3006 * Which will terminate the dump. */ 3005 * Which will terminate the dump. */
3007 return skb->len; 3006 return skb->len;
3008} 3007}
@@ -3022,7 +3021,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3022 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; 3021 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3023 struct nlattr *nla; 3022 struct nlattr *nla;
3024 const char *resource_name; 3023 const char *resource_name;
3025 struct drbd_tconn *tconn; 3024 struct drbd_connection *connection;
3026 int maxtype; 3025 int maxtype;
3027 3026
3028 /* Is this a followup call? */ 3027 /* Is this a followup call? */
@@ -3051,18 +3050,18 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3051 if (!nla) 3050 if (!nla)
3052 return -EINVAL; 3051 return -EINVAL;
3053 resource_name = nla_data(nla); 3052 resource_name = nla_data(nla);
3054 tconn = conn_get_by_name(resource_name); 3053 connection = conn_get_by_name(resource_name);
3055 3054
3056 if (!tconn) 3055 if (!connection)
3057 return -ENODEV; 3056 return -ENODEV;
3058 3057
3059 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */ 3058 kref_put(&connection->kref, &conn_destroy); /* get_one_status() (re)validates connection by itself */
3060 3059
3061 /* prime iterators, and set "filter" mode mark: 3060 /* prime iterators, and set "filter" mode mark:
3062 * only dump this tconn. */ 3061 * only dump this connection. */
3063 cb->args[0] = (long)tconn; 3062 cb->args[0] = (long)connection;
3064 /* cb->args[1] = 0; passed in this way. */ 3063 /* cb->args[1] = 0; passed in this way. */
3065 cb->args[2] = (long)tconn; 3064 cb->args[2] = (long)connection;
3066 3065
3067dump: 3066dump:
3068 return get_one_status(skb, cb); 3067 return get_one_status(skb, cb);
@@ -3169,7 +3168,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3169 } 3168 }
3170 3169
3171 /* this is "skip initial sync", assume to be clean */ 3170 /* this is "skip initial sync", assume to be clean */
3172 if (device->state.conn == C_CONNECTED && device->tconn->agreed_pro_version >= 90 && 3171 if (device->state.conn == C_CONNECTED && device->connection->agreed_pro_version >= 90 &&
3173 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 3172 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3174 dev_info(DEV, "Preparing to skip initial sync\n"); 3173 dev_info(DEV, "Preparing to skip initial sync\n");
3175 skip_initial_sync = 1; 3174 skip_initial_sync = 1;
@@ -3192,10 +3191,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3192 drbd_send_uuids_skip_initial_sync(device); 3191 drbd_send_uuids_skip_initial_sync(device);
3193 _drbd_uuid_set(device, UI_BITMAP, 0); 3192 _drbd_uuid_set(device, UI_BITMAP, 0);
3194 drbd_print_uuids(device, "cleared bitmap UUID"); 3193 drbd_print_uuids(device, "cleared bitmap UUID");
3195 spin_lock_irq(&device->tconn->req_lock); 3194 spin_lock_irq(&device->connection->req_lock);
3196 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3195 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3197 CS_VERBOSE, NULL); 3196 CS_VERBOSE, NULL);
3198 spin_unlock_irq(&device->tconn->req_lock); 3197 spin_unlock_irq(&device->connection->req_lock);
3199 } 3198 }
3200 } 3199 }
3201 3200
@@ -3249,7 +3248,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3249 if (retcode != NO_ERROR) 3248 if (retcode != NO_ERROR)
3250 goto out; 3249 goto out;
3251 3250
3252 if (adm_ctx.tconn) { 3251 if (adm_ctx.connection) {
3253 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { 3252 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3254 retcode = ERR_INVALID_REQUEST; 3253 retcode = ERR_INVALID_REQUEST;
3255 drbd_msg_put_info("resource exists"); 3254 drbd_msg_put_info("resource exists");
@@ -3288,7 +3287,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3288 } 3287 }
3289 3288
3290 /* drbd_adm_prepare made sure already 3289 /* drbd_adm_prepare made sure already
3291 * that device->tconn and device->vnr match the request. */ 3290 * that device->connection and device->vnr match the request. */
3292 if (adm_ctx.device) { 3291 if (adm_ctx.device) {
3293 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) 3292 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3294 retcode = ERR_MINOR_EXISTS; 3293 retcode = ERR_MINOR_EXISTS;
@@ -3296,7 +3295,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3296 goto out; 3295 goto out;
3297 } 3296 }
3298 3297
3299 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume); 3298 retcode = conn_new_minor(adm_ctx.connection, dh->minor, adm_ctx.volume);
3300out: 3299out:
3301 drbd_adm_finish(info, retcode); 3300 drbd_adm_finish(info, retcode);
3302 return 0; 3301 return 0;
@@ -3311,7 +3310,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_device *device)
3311 device->state.role == R_SECONDARY) { 3310 device->state.role == R_SECONDARY) {
3312 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS), 3311 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
3313 CS_VERBOSE + CS_WAIT_COMPLETE); 3312 CS_VERBOSE + CS_WAIT_COMPLETE);
3314 idr_remove(&device->tconn->volumes, device->vnr); 3313 idr_remove(&device->connection->volumes, device->vnr);
3315 idr_remove(&minors, device_to_minor(device)); 3314 idr_remove(&minors, device_to_minor(device));
3316 destroy_workqueue(device->submit.wq); 3315 destroy_workqueue(device->submit.wq);
3317 del_gendisk(device->vdisk); 3316 del_gendisk(device->vdisk);
@@ -3350,13 +3349,13 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3350 if (retcode != NO_ERROR) 3349 if (retcode != NO_ERROR)
3351 goto out; 3350 goto out;
3352 3351
3353 if (!adm_ctx.tconn) { 3352 if (!adm_ctx.connection) {
3354 retcode = ERR_RES_NOT_KNOWN; 3353 retcode = ERR_RES_NOT_KNOWN;
3355 goto out; 3354 goto out;
3356 } 3355 }
3357 3356
3358 /* demote */ 3357 /* demote */
3359 idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { 3358 idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
3360 retcode = drbd_set_role(device, R_SECONDARY, 0); 3359 retcode = drbd_set_role(device, R_SECONDARY, 0);
3361 if (retcode < SS_SUCCESS) { 3360 if (retcode < SS_SUCCESS) {
3362 drbd_msg_put_info("failed to demote"); 3361 drbd_msg_put_info("failed to demote");
@@ -3364,14 +3363,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3364 } 3363 }
3365 } 3364 }
3366 3365
3367 retcode = conn_try_disconnect(adm_ctx.tconn, 0); 3366 retcode = conn_try_disconnect(adm_ctx.connection, 0);
3368 if (retcode < SS_SUCCESS) { 3367 if (retcode < SS_SUCCESS) {
3369 drbd_msg_put_info("failed to disconnect"); 3368 drbd_msg_put_info("failed to disconnect");
3370 goto out; 3369 goto out;
3371 } 3370 }
3372 3371
3373 /* detach */ 3372 /* detach */
3374 idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { 3373 idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
3375 retcode = adm_detach(device, 0); 3374 retcode = adm_detach(device, 0);
3376 if (retcode < SS_SUCCESS || retcode > NO_ERROR) { 3375 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3377 drbd_msg_put_info("failed to detach"); 3376 drbd_msg_put_info("failed to detach");
@@ -3379,15 +3378,15 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3379 } 3378 }
3380 } 3379 }
3381 3380
3382 /* If we reach this, all volumes (of this tconn) are Secondary, 3381 /* If we reach this, all volumes (of this connection) are Secondary,
3383 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have 3382 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3384 * actually stopped, state handling only does drbd_thread_stop_nowait(). */ 3383 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3385 drbd_thread_stop(&adm_ctx.tconn->worker); 3384 drbd_thread_stop(&adm_ctx.connection->worker);
3386 3385
3387 /* Now, nothing can fail anymore */ 3386 /* Now, nothing can fail anymore */
3388 3387
3389 /* delete volumes */ 3388 /* delete volumes */
3390 idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { 3389 idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
3391 retcode = adm_delete_minor(device); 3390 retcode = adm_delete_minor(device);
3392 if (retcode != NO_ERROR) { 3391 if (retcode != NO_ERROR) {
3393 /* "can not happen" */ 3392 /* "can not happen" */
@@ -3397,10 +3396,10 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3397 } 3396 }
3398 3397
3399 /* delete connection */ 3398 /* delete connection */
3400 if (conn_lowest_minor(adm_ctx.tconn) < 0) { 3399 if (conn_lowest_minor(adm_ctx.connection) < 0) {
3401 list_del_rcu(&adm_ctx.tconn->all_tconn); 3400 list_del_rcu(&adm_ctx.connection->connections);
3402 synchronize_rcu(); 3401 synchronize_rcu();
3403 kref_put(&adm_ctx.tconn->kref, &conn_destroy); 3402 kref_put(&adm_ctx.connection->kref, &conn_destroy);
3404 3403
3405 retcode = NO_ERROR; 3404 retcode = NO_ERROR;
3406 } else { 3405 } else {
@@ -3424,10 +3423,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3424 if (retcode != NO_ERROR) 3423 if (retcode != NO_ERROR)
3425 goto out; 3424 goto out;
3426 3425
3427 if (conn_lowest_minor(adm_ctx.tconn) < 0) { 3426 if (conn_lowest_minor(adm_ctx.connection) < 0) {
3428 list_del_rcu(&adm_ctx.tconn->all_tconn); 3427 list_del_rcu(&adm_ctx.connection->connections);
3429 synchronize_rcu(); 3428 synchronize_rcu();
3430 kref_put(&adm_ctx.tconn->kref, &conn_destroy); 3429 kref_put(&adm_ctx.connection->kref, &conn_destroy);
3431 3430
3432 retcode = NO_ERROR; 3431 retcode = NO_ERROR;
3433 } else { 3432 } else {
@@ -3435,7 +3434,7 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3435 } 3434 }
3436 3435
3437 if (retcode == NO_ERROR) 3436 if (retcode == NO_ERROR)
3438 drbd_thread_stop(&adm_ctx.tconn->worker); 3437 drbd_thread_stop(&adm_ctx.connection->worker);
3439out: 3438out:
3440 drbd_adm_finish(info, retcode); 3439 drbd_adm_finish(info, retcode);
3441 return 0; 3440 return 0;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 3e21322833d9..9c4d413655e3 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -251,7 +251,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
251 /* reset device->congestion_reason */ 251 /* reset device->congestion_reason */
252 bdi_rw_congested(&device->rq_queue->backing_dev_info); 252 bdi_rw_congested(&device->rq_queue->backing_dev_info);
253 253
254 nc = rcu_dereference(device->tconn->net_conf); 254 nc = rcu_dereference(device->connection->net_conf);
255 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; 255 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
256 seq_printf(seq, 256 seq_printf(seq,
257 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" 257 "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
@@ -280,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
280 atomic_read(&device->rs_pending_cnt), 280 atomic_read(&device->rs_pending_cnt),
281 atomic_read(&device->unacked_cnt), 281 atomic_read(&device->unacked_cnt),
282 atomic_read(&device->ap_bio_cnt), 282 atomic_read(&device->ap_bio_cnt),
283 device->tconn->epochs, 283 device->connection->epochs,
284 write_ordering_chars[device->tconn->write_ordering] 284 write_ordering_chars[device->connection->write_ordering]
285 ); 285 );
286 seq_printf(seq, " oos:%llu\n", 286 seq_printf(seq, " oos:%llu\n",
287 Bit2KB((unsigned long long) 287 Bit2KB((unsigned long long)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 281112f32a9e..42dbf5d86a43 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -62,11 +62,11 @@ enum finish_epoch {
62 FE_RECYCLED, 62 FE_RECYCLED,
63}; 63};
64 64
65static int drbd_do_features(struct drbd_tconn *tconn); 65static int drbd_do_features(struct drbd_connection *connection);
66static int drbd_do_auth(struct drbd_tconn *tconn); 66static int drbd_do_auth(struct drbd_connection *connection);
67static int drbd_disconnected(struct drbd_device *device); 67static int drbd_disconnected(struct drbd_device *device);
68 68
69static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event); 69static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
70static int e_end_block(struct drbd_work *, int); 70static int e_end_block(struct drbd_work *, int);
71 71
72 72
@@ -221,9 +221,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
221 LIST_HEAD(reclaimed); 221 LIST_HEAD(reclaimed);
222 struct drbd_peer_request *peer_req, *t; 222 struct drbd_peer_request *peer_req, *t;
223 223
224 spin_lock_irq(&device->tconn->req_lock); 224 spin_lock_irq(&device->connection->req_lock);
225 reclaim_finished_net_peer_reqs(device, &reclaimed); 225 reclaim_finished_net_peer_reqs(device, &reclaimed);
226 spin_unlock_irq(&device->tconn->req_lock); 226 spin_unlock_irq(&device->connection->req_lock);
227 227
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229 drbd_free_net_peer_req(device, peer_req); 229 drbd_free_net_peer_req(device, peer_req);
@@ -252,7 +252,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
252 /* Yes, we may run up to @number over max_buffers. If we 252 /* Yes, we may run up to @number over max_buffers. If we
253 * follow it strictly, the admin will get it wrong anyways. */ 253 * follow it strictly, the admin will get it wrong anyways. */
254 rcu_read_lock(); 254 rcu_read_lock();
255 nc = rcu_dereference(device->tconn->net_conf); 255 nc = rcu_dereference(device->connection->net_conf);
256 mxb = nc ? nc->max_buffers : 1000000; 256 mxb = nc ? nc->max_buffers : 1000000;
257 rcu_read_unlock(); 257 rcu_read_unlock();
258 258
@@ -288,7 +288,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
288} 288}
289 289
290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. 290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
291 * Is also used from inside an other spin_lock_irq(&device->tconn->req_lock); 291 * Is also used from inside an other spin_lock_irq(&device->connection->req_lock);
292 * Either links the page chain back to the global pool, 292 * Either links the page chain back to the global pool,
293 * or returns all pages to the system. */ 293 * or returns all pages to the system. */
294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
@@ -396,9 +396,9 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
396 int count = 0; 396 int count = 0;
397 int is_net = list == &device->net_ee; 397 int is_net = list == &device->net_ee;
398 398
399 spin_lock_irq(&device->tconn->req_lock); 399 spin_lock_irq(&device->connection->req_lock);
400 list_splice_init(list, &work_list); 400 list_splice_init(list, &work_list);
401 spin_unlock_irq(&device->tconn->req_lock); 401 spin_unlock_irq(&device->connection->req_lock);
402 402
403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
404 __drbd_free_peer_req(device, peer_req, is_net); 404 __drbd_free_peer_req(device, peer_req, is_net);
@@ -417,10 +417,10 @@ static int drbd_finish_peer_reqs(struct drbd_device *device)
417 struct drbd_peer_request *peer_req, *t; 417 struct drbd_peer_request *peer_req, *t;
418 int err = 0; 418 int err = 0;
419 419
420 spin_lock_irq(&device->tconn->req_lock); 420 spin_lock_irq(&device->connection->req_lock);
421 reclaim_finished_net_peer_reqs(device, &reclaimed); 421 reclaim_finished_net_peer_reqs(device, &reclaimed);
422 list_splice_init(&device->done_ee, &work_list); 422 list_splice_init(&device->done_ee, &work_list);
423 spin_unlock_irq(&device->tconn->req_lock); 423 spin_unlock_irq(&device->connection->req_lock);
424 424
425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
426 drbd_free_net_peer_req(device, peer_req); 426 drbd_free_net_peer_req(device, peer_req);
@@ -452,19 +452,19 @@ static void _drbd_wait_ee_list_empty(struct drbd_device *device,
452 * and calling prepare_to_wait in the fast path */ 452 * and calling prepare_to_wait in the fast path */
453 while (!list_empty(head)) { 453 while (!list_empty(head)) {
454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
455 spin_unlock_irq(&device->tconn->req_lock); 455 spin_unlock_irq(&device->connection->req_lock);
456 io_schedule(); 456 io_schedule();
457 finish_wait(&device->ee_wait, &wait); 457 finish_wait(&device->ee_wait, &wait);
458 spin_lock_irq(&device->tconn->req_lock); 458 spin_lock_irq(&device->connection->req_lock);
459 } 459 }
460} 460}
461 461
462static void drbd_wait_ee_list_empty(struct drbd_device *device, 462static void drbd_wait_ee_list_empty(struct drbd_device *device,
463 struct list_head *head) 463 struct list_head *head)
464{ 464{
465 spin_lock_irq(&device->tconn->req_lock); 465 spin_lock_irq(&device->connection->req_lock);
466 _drbd_wait_ee_list_empty(device, head); 466 _drbd_wait_ee_list_empty(device, head);
467 spin_unlock_irq(&device->tconn->req_lock); 467 spin_unlock_irq(&device->connection->req_lock);
468} 468}
469 469
470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
@@ -489,44 +489,44 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
489 return rv; 489 return rv;
490} 490}
491 491
492static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size) 492static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
493{ 493{
494 int rv; 494 int rv;
495 495
496 rv = drbd_recv_short(tconn->data.socket, buf, size, 0); 496 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
497 497
498 if (rv < 0) { 498 if (rv < 0) {
499 if (rv == -ECONNRESET) 499 if (rv == -ECONNRESET)
500 conn_info(tconn, "sock was reset by peer\n"); 500 conn_info(connection, "sock was reset by peer\n");
501 else if (rv != -ERESTARTSYS) 501 else if (rv != -ERESTARTSYS)
502 conn_err(tconn, "sock_recvmsg returned %d\n", rv); 502 conn_err(connection, "sock_recvmsg returned %d\n", rv);
503 } else if (rv == 0) { 503 } else if (rv == 0) {
504 if (test_bit(DISCONNECT_SENT, &tconn->flags)) { 504 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
505 long t; 505 long t;
506 rcu_read_lock(); 506 rcu_read_lock();
507 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10; 507 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
508 rcu_read_unlock(); 508 rcu_read_unlock();
509 509
510 t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t); 510 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
511 511
512 if (t) 512 if (t)
513 goto out; 513 goto out;
514 } 514 }
515 conn_info(tconn, "sock was shut down by peer\n"); 515 conn_info(connection, "sock was shut down by peer\n");
516 } 516 }
517 517
518 if (rv != size) 518 if (rv != size)
519 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); 519 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
520 520
521out: 521out:
522 return rv; 522 return rv;
523} 523}
524 524
525static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size) 525static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
526{ 526{
527 int err; 527 int err;
528 528
529 err = drbd_recv(tconn, buf, size); 529 err = drbd_recv(connection, buf, size);
530 if (err != size) { 530 if (err != size) {
531 if (err >= 0) 531 if (err >= 0)
532 err = -EIO; 532 err = -EIO;
@@ -535,13 +535,13 @@ static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
535 return err; 535 return err;
536} 536}
537 537
538static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size) 538static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
539{ 539{
540 int err; 540 int err;
541 541
542 err = drbd_recv_all(tconn, buf, size); 542 err = drbd_recv_all(connection, buf, size);
543 if (err && !signal_pending(current)) 543 if (err && !signal_pending(current))
544 conn_warn(tconn, "short read (expected size %d)\n", (int)size); 544 conn_warn(connection, "short read (expected size %d)\n", (int)size);
545 return err; 545 return err;
546} 546}
547 547
@@ -564,7 +564,7 @@ static void drbd_setbufsize(struct socket *sock, unsigned int snd,
564 } 564 }
565} 565}
566 566
567static struct socket *drbd_try_connect(struct drbd_tconn *tconn) 567static struct socket *drbd_try_connect(struct drbd_connection *connection)
568{ 568{
569 const char *what; 569 const char *what;
570 struct socket *sock; 570 struct socket *sock;
@@ -576,7 +576,7 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
576 int disconnect_on_error = 1; 576 int disconnect_on_error = 1;
577 577
578 rcu_read_lock(); 578 rcu_read_lock();
579 nc = rcu_dereference(tconn->net_conf); 579 nc = rcu_dereference(connection->net_conf);
580 if (!nc) { 580 if (!nc) {
581 rcu_read_unlock(); 581 rcu_read_unlock();
582 return NULL; 582 return NULL;
@@ -586,16 +586,16 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
586 connect_int = nc->connect_int; 586 connect_int = nc->connect_int;
587 rcu_read_unlock(); 587 rcu_read_unlock();
588 588
589 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6)); 589 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
590 memcpy(&src_in6, &tconn->my_addr, my_addr_len); 590 memcpy(&src_in6, &connection->my_addr, my_addr_len);
591 591
592 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6) 592 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
593 src_in6.sin6_port = 0; 593 src_in6.sin6_port = 0;
594 else 594 else
595 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 595 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
596 596
597 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6)); 597 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
598 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len); 598 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
599 599
600 what = "sock_create_kern"; 600 what = "sock_create_kern";
601 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family, 601 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
@@ -643,17 +643,17 @@ out:
643 disconnect_on_error = 0; 643 disconnect_on_error = 0;
644 break; 644 break;
645 default: 645 default:
646 conn_err(tconn, "%s failed, err = %d\n", what, err); 646 conn_err(connection, "%s failed, err = %d\n", what, err);
647 } 647 }
648 if (disconnect_on_error) 648 if (disconnect_on_error)
649 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 649 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
650 } 650 }
651 651
652 return sock; 652 return sock;
653} 653}
654 654
655struct accept_wait_data { 655struct accept_wait_data {
656 struct drbd_tconn *tconn; 656 struct drbd_connection *connection;
657 struct socket *s_listen; 657 struct socket *s_listen;
658 struct completion door_bell; 658 struct completion door_bell;
659 void (*original_sk_state_change)(struct sock *sk); 659 void (*original_sk_state_change)(struct sock *sk);
@@ -671,7 +671,7 @@ static void drbd_incoming_connection(struct sock *sk)
671 state_change(sk); 671 state_change(sk);
672} 672}
673 673
674static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad) 674static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
675{ 675{
676 int err, sndbuf_size, rcvbuf_size, my_addr_len; 676 int err, sndbuf_size, rcvbuf_size, my_addr_len;
677 struct sockaddr_in6 my_addr; 677 struct sockaddr_in6 my_addr;
@@ -680,7 +680,7 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
680 const char *what; 680 const char *what;
681 681
682 rcu_read_lock(); 682 rcu_read_lock();
683 nc = rcu_dereference(tconn->net_conf); 683 nc = rcu_dereference(connection->net_conf);
684 if (!nc) { 684 if (!nc) {
685 rcu_read_unlock(); 685 rcu_read_unlock();
686 return -EIO; 686 return -EIO;
@@ -689,8 +689,8 @@ static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_da
689 rcvbuf_size = nc->rcvbuf_size; 689 rcvbuf_size = nc->rcvbuf_size;
690 rcu_read_unlock(); 690 rcu_read_unlock();
691 691
692 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6)); 692 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
693 memcpy(&my_addr, &tconn->my_addr, my_addr_len); 693 memcpy(&my_addr, &connection->my_addr, my_addr_len);
694 694
695 what = "sock_create_kern"; 695 what = "sock_create_kern";
696 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family, 696 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
@@ -726,8 +726,8 @@ out:
726 sock_release(s_listen); 726 sock_release(s_listen);
727 if (err < 0) { 727 if (err < 0) {
728 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 728 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
729 conn_err(tconn, "%s failed, err = %d\n", what, err); 729 conn_err(connection, "%s failed, err = %d\n", what, err);
730 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 730 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
731 } 731 }
732 } 732 }
733 733
@@ -742,14 +742,14 @@ static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad
742 write_unlock_bh(&sk->sk_callback_lock); 742 write_unlock_bh(&sk->sk_callback_lock);
743} 743}
744 744
745static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad) 745static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
746{ 746{
747 int timeo, connect_int, err = 0; 747 int timeo, connect_int, err = 0;
748 struct socket *s_estab = NULL; 748 struct socket *s_estab = NULL;
749 struct net_conf *nc; 749 struct net_conf *nc;
750 750
751 rcu_read_lock(); 751 rcu_read_lock();
752 nc = rcu_dereference(tconn->net_conf); 752 nc = rcu_dereference(connection->net_conf);
753 if (!nc) { 753 if (!nc) {
754 rcu_read_unlock(); 754 rcu_read_unlock();
755 return NULL; 755 return NULL;
@@ -768,8 +768,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
768 err = kernel_accept(ad->s_listen, &s_estab, 0); 768 err = kernel_accept(ad->s_listen, &s_estab, 0);
769 if (err < 0) { 769 if (err < 0) {
770 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 770 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
771 conn_err(tconn, "accept failed, err = %d\n", err); 771 conn_err(connection, "accept failed, err = %d\n", err);
772 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 772 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
773 } 773 }
774 } 774 }
775 775
@@ -779,29 +779,29 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct acc
779 return s_estab; 779 return s_estab;
780} 780}
781 781
782static int decode_header(struct drbd_tconn *, void *, struct packet_info *); 782static int decode_header(struct drbd_connection *, void *, struct packet_info *);
783 783
784static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock, 784static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
785 enum drbd_packet cmd) 785 enum drbd_packet cmd)
786{ 786{
787 if (!conn_prepare_command(tconn, sock)) 787 if (!conn_prepare_command(connection, sock))
788 return -EIO; 788 return -EIO;
789 return conn_send_command(tconn, sock, cmd, 0, NULL, 0); 789 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
790} 790}
791 791
792static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock) 792static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
793{ 793{
794 unsigned int header_size = drbd_header_size(tconn); 794 unsigned int header_size = drbd_header_size(connection);
795 struct packet_info pi; 795 struct packet_info pi;
796 int err; 796 int err;
797 797
798 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0); 798 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
799 if (err != header_size) { 799 if (err != header_size) {
800 if (err >= 0) 800 if (err >= 0)
801 err = -EIO; 801 err = -EIO;
802 return err; 802 return err;
803 } 803 }
804 err = decode_header(tconn, tconn->data.rbuf, &pi); 804 err = decode_header(connection, connection->data.rbuf, &pi);
805 if (err) 805 if (err)
806 return err; 806 return err;
807 return pi.cmd; 807 return pi.cmd;
@@ -838,8 +838,8 @@ int drbd_connected(struct drbd_device *device)
838 atomic_set(&device->packet_seq, 0); 838 atomic_set(&device->packet_seq, 0);
839 device->peer_seq = 0; 839 device->peer_seq = 0;
840 840
841 device->state_mutex = device->tconn->agreed_pro_version < 100 ? 841 device->state_mutex = device->connection->agreed_pro_version < 100 ?
842 &device->tconn->cstate_mutex : 842 &device->connection->cstate_mutex :
843 &device->own_state_mutex; 843 &device->own_state_mutex;
844 844
845 err = drbd_send_sync_param(device); 845 err = drbd_send_sync_param(device);
@@ -864,7 +864,7 @@ int drbd_connected(struct drbd_device *device)
864 * no point in trying again, please go standalone. 864 * no point in trying again, please go standalone.
865 * -2 We do not have a network config... 865 * -2 We do not have a network config...
866 */ 866 */
867static int conn_connect(struct drbd_tconn *tconn) 867static int conn_connect(struct drbd_connection *connection)
868{ 868{
869 struct drbd_socket sock, msock; 869 struct drbd_socket sock, msock;
870 struct drbd_device *device; 870 struct drbd_device *device;
@@ -873,50 +873,50 @@ static int conn_connect(struct drbd_tconn *tconn)
873 bool discard_my_data; 873 bool discard_my_data;
874 enum drbd_state_rv rv; 874 enum drbd_state_rv rv;
875 struct accept_wait_data ad = { 875 struct accept_wait_data ad = {
876 .tconn = tconn, 876 .connection = connection,
877 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell), 877 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
878 }; 878 };
879 879
880 clear_bit(DISCONNECT_SENT, &tconn->flags); 880 clear_bit(DISCONNECT_SENT, &connection->flags);
881 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS) 881 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
882 return -2; 882 return -2;
883 883
884 mutex_init(&sock.mutex); 884 mutex_init(&sock.mutex);
885 sock.sbuf = tconn->data.sbuf; 885 sock.sbuf = connection->data.sbuf;
886 sock.rbuf = tconn->data.rbuf; 886 sock.rbuf = connection->data.rbuf;
887 sock.socket = NULL; 887 sock.socket = NULL;
888 mutex_init(&msock.mutex); 888 mutex_init(&msock.mutex);
889 msock.sbuf = tconn->meta.sbuf; 889 msock.sbuf = connection->meta.sbuf;
890 msock.rbuf = tconn->meta.rbuf; 890 msock.rbuf = connection->meta.rbuf;
891 msock.socket = NULL; 891 msock.socket = NULL;
892 892
893 /* Assume that the peer only understands protocol 80 until we know better. */ 893 /* Assume that the peer only understands protocol 80 until we know better. */
894 tconn->agreed_pro_version = 80; 894 connection->agreed_pro_version = 80;
895 895
896 if (prepare_listen_socket(tconn, &ad)) 896 if (prepare_listen_socket(connection, &ad))
897 return 0; 897 return 0;
898 898
899 do { 899 do {
900 struct socket *s; 900 struct socket *s;
901 901
902 s = drbd_try_connect(tconn); 902 s = drbd_try_connect(connection);
903 if (s) { 903 if (s) {
904 if (!sock.socket) { 904 if (!sock.socket) {
905 sock.socket = s; 905 sock.socket = s;
906 send_first_packet(tconn, &sock, P_INITIAL_DATA); 906 send_first_packet(connection, &sock, P_INITIAL_DATA);
907 } else if (!msock.socket) { 907 } else if (!msock.socket) {
908 clear_bit(RESOLVE_CONFLICTS, &tconn->flags); 908 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
909 msock.socket = s; 909 msock.socket = s;
910 send_first_packet(tconn, &msock, P_INITIAL_META); 910 send_first_packet(connection, &msock, P_INITIAL_META);
911 } else { 911 } else {
912 conn_err(tconn, "Logic error in conn_connect()\n"); 912 conn_err(connection, "Logic error in conn_connect()\n");
913 goto out_release_sockets; 913 goto out_release_sockets;
914 } 914 }
915 } 915 }
916 916
917 if (sock.socket && msock.socket) { 917 if (sock.socket && msock.socket) {
918 rcu_read_lock(); 918 rcu_read_lock();
919 nc = rcu_dereference(tconn->net_conf); 919 nc = rcu_dereference(connection->net_conf);
920 timeout = nc->ping_timeo * HZ / 10; 920 timeout = nc->ping_timeo * HZ / 10;
921 rcu_read_unlock(); 921 rcu_read_unlock();
922 schedule_timeout_interruptible(timeout); 922 schedule_timeout_interruptible(timeout);
@@ -927,15 +927,15 @@ static int conn_connect(struct drbd_tconn *tconn)
927 } 927 }
928 928
929retry: 929retry:
930 s = drbd_wait_for_connect(tconn, &ad); 930 s = drbd_wait_for_connect(connection, &ad);
931 if (s) { 931 if (s) {
932 int fp = receive_first_packet(tconn, s); 932 int fp = receive_first_packet(connection, s);
933 drbd_socket_okay(&sock.socket); 933 drbd_socket_okay(&sock.socket);
934 drbd_socket_okay(&msock.socket); 934 drbd_socket_okay(&msock.socket);
935 switch (fp) { 935 switch (fp) {
936 case P_INITIAL_DATA: 936 case P_INITIAL_DATA:
937 if (sock.socket) { 937 if (sock.socket) {
938 conn_warn(tconn, "initial packet S crossed\n"); 938 conn_warn(connection, "initial packet S crossed\n");
939 sock_release(sock.socket); 939 sock_release(sock.socket);
940 sock.socket = s; 940 sock.socket = s;
941 goto randomize; 941 goto randomize;
@@ -943,9 +943,9 @@ retry:
943 sock.socket = s; 943 sock.socket = s;
944 break; 944 break;
945 case P_INITIAL_META: 945 case P_INITIAL_META:
946 set_bit(RESOLVE_CONFLICTS, &tconn->flags); 946 set_bit(RESOLVE_CONFLICTS, &connection->flags);
947 if (msock.socket) { 947 if (msock.socket) {
948 conn_warn(tconn, "initial packet M crossed\n"); 948 conn_warn(connection, "initial packet M crossed\n");
949 sock_release(msock.socket); 949 sock_release(msock.socket);
950 msock.socket = s; 950 msock.socket = s;
951 goto randomize; 951 goto randomize;
@@ -953,7 +953,7 @@ retry:
953 msock.socket = s; 953 msock.socket = s;
954 break; 954 break;
955 default: 955 default:
956 conn_warn(tconn, "Error receiving initial packet\n"); 956 conn_warn(connection, "Error receiving initial packet\n");
957 sock_release(s); 957 sock_release(s);
958randomize: 958randomize:
959 if (prandom_u32() & 1) 959 if (prandom_u32() & 1)
@@ -961,12 +961,12 @@ randomize:
961 } 961 }
962 } 962 }
963 963
964 if (tconn->cstate <= C_DISCONNECTING) 964 if (connection->cstate <= C_DISCONNECTING)
965 goto out_release_sockets; 965 goto out_release_sockets;
966 if (signal_pending(current)) { 966 if (signal_pending(current)) {
967 flush_signals(current); 967 flush_signals(current);
968 smp_rmb(); 968 smp_rmb();
969 if (get_t_state(&tconn->receiver) == EXITING) 969 if (get_t_state(&connection->receiver) == EXITING)
970 goto out_release_sockets; 970 goto out_release_sockets;
971 } 971 }
972 972
@@ -987,12 +987,12 @@ randomize:
987 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; 987 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
988 988
989 /* NOT YET ... 989 /* NOT YET ...
990 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10; 990 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
991 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 991 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
992 * first set it to the P_CONNECTION_FEATURES timeout, 992 * first set it to the P_CONNECTION_FEATURES timeout,
993 * which we set to 4x the configured ping_timeout. */ 993 * which we set to 4x the configured ping_timeout. */
994 rcu_read_lock(); 994 rcu_read_lock();
995 nc = rcu_dereference(tconn->net_conf); 995 nc = rcu_dereference(connection->net_conf);
996 996
997 sock.socket->sk->sk_sndtimeo = 997 sock.socket->sk->sk_sndtimeo =
998 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; 998 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
@@ -1009,36 +1009,36 @@ randomize:
1009 drbd_tcp_nodelay(sock.socket); 1009 drbd_tcp_nodelay(sock.socket);
1010 drbd_tcp_nodelay(msock.socket); 1010 drbd_tcp_nodelay(msock.socket);
1011 1011
1012 tconn->data.socket = sock.socket; 1012 connection->data.socket = sock.socket;
1013 tconn->meta.socket = msock.socket; 1013 connection->meta.socket = msock.socket;
1014 tconn->last_received = jiffies; 1014 connection->last_received = jiffies;
1015 1015
1016 h = drbd_do_features(tconn); 1016 h = drbd_do_features(connection);
1017 if (h <= 0) 1017 if (h <= 0)
1018 return h; 1018 return h;
1019 1019
1020 if (tconn->cram_hmac_tfm) { 1020 if (connection->cram_hmac_tfm) {
1021 /* drbd_request_state(device, NS(conn, WFAuth)); */ 1021 /* drbd_request_state(device, NS(conn, WFAuth)); */
1022 switch (drbd_do_auth(tconn)) { 1022 switch (drbd_do_auth(connection)) {
1023 case -1: 1023 case -1:
1024 conn_err(tconn, "Authentication of peer failed\n"); 1024 conn_err(connection, "Authentication of peer failed\n");
1025 return -1; 1025 return -1;
1026 case 0: 1026 case 0:
1027 conn_err(tconn, "Authentication of peer failed, trying again.\n"); 1027 conn_err(connection, "Authentication of peer failed, trying again.\n");
1028 return 0; 1028 return 0;
1029 } 1029 }
1030 } 1030 }
1031 1031
1032 tconn->data.socket->sk->sk_sndtimeo = timeout; 1032 connection->data.socket->sk->sk_sndtimeo = timeout;
1033 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1033 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1034 1034
1035 if (drbd_send_protocol(tconn) == -EOPNOTSUPP) 1035 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1036 return -1; 1036 return -1;
1037 1037
1038 set_bit(STATE_SENT, &tconn->flags); 1038 set_bit(STATE_SENT, &connection->flags);
1039 1039
1040 rcu_read_lock(); 1040 rcu_read_lock();
1041 idr_for_each_entry(&tconn->volumes, device, vnr) { 1041 idr_for_each_entry(&connection->volumes, device, vnr) {
1042 kref_get(&device->kref); 1042 kref_get(&device->kref);
1043 rcu_read_unlock(); 1043 rcu_read_unlock();
1044 1044
@@ -1063,21 +1063,21 @@ randomize:
1063 } 1063 }
1064 rcu_read_unlock(); 1064 rcu_read_unlock();
1065 1065
1066 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE); 1066 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1067 if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) { 1067 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1068 clear_bit(STATE_SENT, &tconn->flags); 1068 clear_bit(STATE_SENT, &connection->flags);
1069 return 0; 1069 return 0;
1070 } 1070 }
1071 1071
1072 drbd_thread_start(&tconn->asender); 1072 drbd_thread_start(&connection->asender);
1073 1073
1074 mutex_lock(&tconn->conf_update); 1074 mutex_lock(&connection->conf_update);
1075 /* The discard_my_data flag is a single-shot modifier to the next 1075 /* The discard_my_data flag is a single-shot modifier to the next
1076 * connection attempt, the handshake of which is now well underway. 1076 * connection attempt, the handshake of which is now well underway.
1077 * No need for rcu style copying of the whole struct 1077 * No need for rcu style copying of the whole struct
1078 * just to clear a single value. */ 1078 * just to clear a single value. */
1079 tconn->net_conf->discard_my_data = 0; 1079 connection->net_conf->discard_my_data = 0;
1080 mutex_unlock(&tconn->conf_update); 1080 mutex_unlock(&connection->conf_update);
1081 1081
1082 return h; 1082 return h;
1083 1083
@@ -1091,15 +1091,15 @@ out_release_sockets:
1091 return -1; 1091 return -1;
1092} 1092}
1093 1093
1094static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi) 1094static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
1095{ 1095{
1096 unsigned int header_size = drbd_header_size(tconn); 1096 unsigned int header_size = drbd_header_size(connection);
1097 1097
1098 if (header_size == sizeof(struct p_header100) && 1098 if (header_size == sizeof(struct p_header100) &&
1099 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) { 1099 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1100 struct p_header100 *h = header; 1100 struct p_header100 *h = header;
1101 if (h->pad != 0) { 1101 if (h->pad != 0) {
1102 conn_err(tconn, "Header padding is not zero\n"); 1102 conn_err(connection, "Header padding is not zero\n");
1103 return -EINVAL; 1103 return -EINVAL;
1104 } 1104 }
1105 pi->vnr = be16_to_cpu(h->volume); 1105 pi->vnr = be16_to_cpu(h->volume);
@@ -1118,39 +1118,39 @@ static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_i
1118 pi->size = be16_to_cpu(h->length); 1118 pi->size = be16_to_cpu(h->length);
1119 pi->vnr = 0; 1119 pi->vnr = 0;
1120 } else { 1120 } else {
1121 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n", 1121 conn_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1122 be32_to_cpu(*(__be32 *)header), 1122 be32_to_cpu(*(__be32 *)header),
1123 tconn->agreed_pro_version); 1123 connection->agreed_pro_version);
1124 return -EINVAL; 1124 return -EINVAL;
1125 } 1125 }
1126 pi->data = header + header_size; 1126 pi->data = header + header_size;
1127 return 0; 1127 return 0;
1128} 1128}
1129 1129
1130static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi) 1130static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1131{ 1131{
1132 void *buffer = tconn->data.rbuf; 1132 void *buffer = connection->data.rbuf;
1133 int err; 1133 int err;
1134 1134
1135 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn)); 1135 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1136 if (err) 1136 if (err)
1137 return err; 1137 return err;
1138 1138
1139 err = decode_header(tconn, buffer, pi); 1139 err = decode_header(connection, buffer, pi);
1140 tconn->last_received = jiffies; 1140 connection->last_received = jiffies;
1141 1141
1142 return err; 1142 return err;
1143} 1143}
1144 1144
1145static void drbd_flush(struct drbd_tconn *tconn) 1145static void drbd_flush(struct drbd_connection *connection)
1146{ 1146{
1147 int rv; 1147 int rv;
1148 struct drbd_device *device; 1148 struct drbd_device *device;
1149 int vnr; 1149 int vnr;
1150 1150
1151 if (tconn->write_ordering >= WO_bdev_flush) { 1151 if (connection->write_ordering >= WO_bdev_flush) {
1152 rcu_read_lock(); 1152 rcu_read_lock();
1153 idr_for_each_entry(&tconn->volumes, device, vnr) { 1153 idr_for_each_entry(&connection->volumes, device, vnr) {
1154 if (!get_ldev(device)) 1154 if (!get_ldev(device))
1155 continue; 1155 continue;
1156 kref_get(&device->kref); 1156 kref_get(&device->kref);
@@ -1163,7 +1163,7 @@ static void drbd_flush(struct drbd_tconn *tconn)
1163 /* would rather check on EOPNOTSUPP, but that is not reliable. 1163 /* would rather check on EOPNOTSUPP, but that is not reliable.
1164 * don't try again for ANY return value != 0 1164 * don't try again for ANY return value != 0
1165 * if (rv == -EOPNOTSUPP) */ 1165 * if (rv == -EOPNOTSUPP) */
1166 drbd_bump_write_ordering(tconn, WO_drain_io); 1166 drbd_bump_write_ordering(connection, WO_drain_io);
1167 } 1167 }
1168 put_ldev(device); 1168 put_ldev(device);
1169 kref_put(&device->kref, &drbd_minor_destroy); 1169 kref_put(&device->kref, &drbd_minor_destroy);
@@ -1182,7 +1182,7 @@ static void drbd_flush(struct drbd_tconn *tconn)
1182 * @epoch: Epoch object. 1182 * @epoch: Epoch object.
1183 * @ev: Epoch event. 1183 * @ev: Epoch event.
1184 */ 1184 */
1185static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn, 1185static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
1186 struct drbd_epoch *epoch, 1186 struct drbd_epoch *epoch,
1187 enum epoch_event ev) 1187 enum epoch_event ev)
1188{ 1188{
@@ -1190,7 +1190,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1190 struct drbd_epoch *next_epoch; 1190 struct drbd_epoch *next_epoch;
1191 enum finish_epoch rv = FE_STILL_LIVE; 1191 enum finish_epoch rv = FE_STILL_LIVE;
1192 1192
1193 spin_lock(&tconn->epoch_lock); 1193 spin_lock(&connection->epoch_lock);
1194 do { 1194 do {
1195 next_epoch = NULL; 1195 next_epoch = NULL;
1196 1196
@@ -1212,22 +1212,22 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1212 atomic_read(&epoch->active) == 0 && 1212 atomic_read(&epoch->active) == 0 &&
1213 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { 1213 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1214 if (!(ev & EV_CLEANUP)) { 1214 if (!(ev & EV_CLEANUP)) {
1215 spin_unlock(&tconn->epoch_lock); 1215 spin_unlock(&connection->epoch_lock);
1216 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size); 1216 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1217 spin_lock(&tconn->epoch_lock); 1217 spin_lock(&connection->epoch_lock);
1218 } 1218 }
1219#if 0 1219#if 0
1220 /* FIXME: dec unacked on connection, once we have 1220 /* FIXME: dec unacked on connection, once we have
1221 * something to count pending connection packets in. */ 1221 * something to count pending connection packets in. */
1222 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) 1222 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1223 dec_unacked(epoch->tconn); 1223 dec_unacked(epoch->connection);
1224#endif 1224#endif
1225 1225
1226 if (tconn->current_epoch != epoch) { 1226 if (connection->current_epoch != epoch) {
1227 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1227 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1228 list_del(&epoch->list); 1228 list_del(&epoch->list);
1229 ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1229 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1230 tconn->epochs--; 1230 connection->epochs--;
1231 kfree(epoch); 1231 kfree(epoch);
1232 1232
1233 if (rv == FE_STILL_LIVE) 1233 if (rv == FE_STILL_LIVE)
@@ -1247,17 +1247,17 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1247 epoch = next_epoch; 1247 epoch = next_epoch;
1248 } while (1); 1248 } while (1);
1249 1249
1250 spin_unlock(&tconn->epoch_lock); 1250 spin_unlock(&connection->epoch_lock);
1251 1251
1252 return rv; 1252 return rv;
1253} 1253}
1254 1254
1255/** 1255/**
1256 * drbd_bump_write_ordering() - Fall back to an other write ordering method 1256 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1257 * @tconn: DRBD connection. 1257 * @connection: DRBD connection.
1258 * @wo: Write ordering method to try. 1258 * @wo: Write ordering method to try.
1259 */ 1259 */
1260void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo) 1260void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo)
1261{ 1261{
1262 struct disk_conf *dc; 1262 struct disk_conf *dc;
1263 struct drbd_device *device; 1263 struct drbd_device *device;
@@ -1269,10 +1269,10 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
1269 [WO_bdev_flush] = "flush", 1269 [WO_bdev_flush] = "flush",
1270 }; 1270 };
1271 1271
1272 pwo = tconn->write_ordering; 1272 pwo = connection->write_ordering;
1273 wo = min(pwo, wo); 1273 wo = min(pwo, wo);
1274 rcu_read_lock(); 1274 rcu_read_lock();
1275 idr_for_each_entry(&tconn->volumes, device, vnr) { 1275 idr_for_each_entry(&connection->volumes, device, vnr) {
1276 if (!get_ldev_if_state(device, D_ATTACHING)) 1276 if (!get_ldev_if_state(device, D_ATTACHING))
1277 continue; 1277 continue;
1278 dc = rcu_dereference(device->ldev->disk_conf); 1278 dc = rcu_dereference(device->ldev->disk_conf);
@@ -1284,9 +1284,9 @@ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo
1284 put_ldev(device); 1284 put_ldev(device);
1285 } 1285 }
1286 rcu_read_unlock(); 1286 rcu_read_unlock();
1287 tconn->write_ordering = wo; 1287 connection->write_ordering = wo;
1288 if (pwo != tconn->write_ordering || wo == WO_bdev_flush) 1288 if (pwo != connection->write_ordering || wo == WO_bdev_flush)
1289 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]); 1289 conn_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]);
1290} 1290}
1291 1291
1292/** 1292/**
@@ -1399,13 +1399,13 @@ static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1399 wake_up(&device->misc_wait); 1399 wake_up(&device->misc_wait);
1400} 1400}
1401 1401
1402static void conn_wait_active_ee_empty(struct drbd_tconn *tconn) 1402static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1403{ 1403{
1404 struct drbd_device *device; 1404 struct drbd_device *device;
1405 int vnr; 1405 int vnr;
1406 1406
1407 rcu_read_lock(); 1407 rcu_read_lock();
1408 idr_for_each_entry(&tconn->volumes, device, vnr) { 1408 idr_for_each_entry(&connection->volumes, device, vnr) {
1409 kref_get(&device->kref); 1409 kref_get(&device->kref);
1410 rcu_read_unlock(); 1410 rcu_read_unlock();
1411 drbd_wait_ee_list_empty(device, &device->active_ee); 1411 drbd_wait_ee_list_empty(device, &device->active_ee);
@@ -1415,7 +1415,7 @@ static void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1415 rcu_read_unlock(); 1415 rcu_read_unlock();
1416} 1416}
1417 1417
1418static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi) 1418static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
1419{ 1419{
1420 int rv; 1420 int rv;
1421 struct p_barrier *p = pi->data; 1421 struct p_barrier *p = pi->data;
@@ -1424,16 +1424,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1424 /* FIXME these are unacked on connection, 1424 /* FIXME these are unacked on connection,
1425 * not a specific (peer)device. 1425 * not a specific (peer)device.
1426 */ 1426 */
1427 tconn->current_epoch->barrier_nr = p->barrier; 1427 connection->current_epoch->barrier_nr = p->barrier;
1428 tconn->current_epoch->tconn = tconn; 1428 connection->current_epoch->connection = connection;
1429 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR); 1429 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
1430 1430
1431 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1431 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1432 * the activity log, which means it would not be resynced in case the 1432 * the activity log, which means it would not be resynced in case the
1433 * R_PRIMARY crashes now. 1433 * R_PRIMARY crashes now.
1434 * Therefore we must send the barrier_ack after the barrier request was 1434 * Therefore we must send the barrier_ack after the barrier request was
1435 * completed. */ 1435 * completed. */
1436 switch (tconn->write_ordering) { 1436 switch (connection->write_ordering) {
1437 case WO_none: 1437 case WO_none:
1438 if (rv == FE_RECYCLED) 1438 if (rv == FE_RECYCLED)
1439 return 0; 1439 return 0;
@@ -1444,15 +1444,15 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1444 if (epoch) 1444 if (epoch)
1445 break; 1445 break;
1446 else 1446 else
1447 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n"); 1447 conn_warn(connection, "Allocation of an epoch failed, slowing down\n");
1448 /* Fall through */ 1448 /* Fall through */
1449 1449
1450 case WO_bdev_flush: 1450 case WO_bdev_flush:
1451 case WO_drain_io: 1451 case WO_drain_io:
1452 conn_wait_active_ee_empty(tconn); 1452 conn_wait_active_ee_empty(connection);
1453 drbd_flush(tconn); 1453 drbd_flush(connection);
1454 1454
1455 if (atomic_read(&tconn->current_epoch->epoch_size)) { 1455 if (atomic_read(&connection->current_epoch->epoch_size)) {
1456 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1456 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1457 if (epoch) 1457 if (epoch)
1458 break; 1458 break;
@@ -1460,7 +1460,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1460 1460
1461 return 0; 1461 return 0;
1462 default: 1462 default:
1463 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering); 1463 conn_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering);
1464 return -EIO; 1464 return -EIO;
1465 } 1465 }
1466 1466
@@ -1468,16 +1468,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1468 atomic_set(&epoch->epoch_size, 0); 1468 atomic_set(&epoch->epoch_size, 0);
1469 atomic_set(&epoch->active, 0); 1469 atomic_set(&epoch->active, 0);
1470 1470
1471 spin_lock(&tconn->epoch_lock); 1471 spin_lock(&connection->epoch_lock);
1472 if (atomic_read(&tconn->current_epoch->epoch_size)) { 1472 if (atomic_read(&connection->current_epoch->epoch_size)) {
1473 list_add(&epoch->list, &tconn->current_epoch->list); 1473 list_add(&epoch->list, &connection->current_epoch->list);
1474 tconn->current_epoch = epoch; 1474 connection->current_epoch = epoch;
1475 tconn->epochs++; 1475 connection->epochs++;
1476 } else { 1476 } else {
1477 /* The current_epoch got recycled while we allocated this one... */ 1477 /* The current_epoch got recycled while we allocated this one... */
1478 kfree(epoch); 1478 kfree(epoch);
1479 } 1479 }
1480 spin_unlock(&tconn->epoch_lock); 1480 spin_unlock(&connection->epoch_lock);
1481 1481
1482 return 0; 1482 return 0;
1483} 1483}
@@ -1492,18 +1492,18 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1492 struct drbd_peer_request *peer_req; 1492 struct drbd_peer_request *peer_req;
1493 struct page *page; 1493 struct page *page;
1494 int dgs, ds, err; 1494 int dgs, ds, err;
1495 void *dig_in = device->tconn->int_dig_in; 1495 void *dig_in = device->connection->int_dig_in;
1496 void *dig_vv = device->tconn->int_dig_vv; 1496 void *dig_vv = device->connection->int_dig_vv;
1497 unsigned long *data; 1497 unsigned long *data;
1498 1498
1499 dgs = 0; 1499 dgs = 0;
1500 if (device->tconn->peer_integrity_tfm) { 1500 if (device->connection->peer_integrity_tfm) {
1501 dgs = crypto_hash_digestsize(device->tconn->peer_integrity_tfm); 1501 dgs = crypto_hash_digestsize(device->connection->peer_integrity_tfm);
1502 /* 1502 /*
1503 * FIXME: Receive the incoming digest into the receive buffer 1503 * FIXME: Receive the incoming digest into the receive buffer
1504 * here, together with its struct p_data? 1504 * here, together with its struct p_data?
1505 */ 1505 */
1506 err = drbd_recv_all_warn(device->tconn, dig_in, dgs); 1506 err = drbd_recv_all_warn(device->connection, dig_in, dgs);
1507 if (err) 1507 if (err)
1508 return NULL; 1508 return NULL;
1509 data_size -= dgs; 1509 data_size -= dgs;
@@ -1539,7 +1539,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1539 page_chain_for_each(page) { 1539 page_chain_for_each(page) {
1540 unsigned len = min_t(int, ds, PAGE_SIZE); 1540 unsigned len = min_t(int, ds, PAGE_SIZE);
1541 data = kmap(page); 1541 data = kmap(page);
1542 err = drbd_recv_all_warn(device->tconn, data, len); 1542 err = drbd_recv_all_warn(device->connection, data, len);
1543 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { 1543 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1544 dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 1544 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1545 data[0] = data[0] ^ (unsigned long)-1; 1545 data[0] = data[0] ^ (unsigned long)-1;
@@ -1553,7 +1553,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1553 } 1553 }
1554 1554
1555 if (dgs) { 1555 if (dgs) {
1556 drbd_csum_ee(device, device->tconn->peer_integrity_tfm, peer_req, dig_vv); 1556 drbd_csum_ee(device, device->connection->peer_integrity_tfm, peer_req, dig_vv);
1557 if (memcmp(dig_in, dig_vv, dgs)) { 1557 if (memcmp(dig_in, dig_vv, dgs)) {
1558 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1558 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1559 (unsigned long long)sector, data_size); 1559 (unsigned long long)sector, data_size);
@@ -1583,7 +1583,7 @@ static int drbd_drain_block(struct drbd_device *device, int data_size)
1583 while (data_size) { 1583 while (data_size) {
1584 unsigned int len = min_t(int, data_size, PAGE_SIZE); 1584 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1585 1585
1586 err = drbd_recv_all_warn(device->tconn, data, len); 1586 err = drbd_recv_all_warn(device->connection, data, len);
1587 if (err) 1587 if (err)
1588 break; 1588 break;
1589 data_size -= len; 1589 data_size -= len;
@@ -1600,13 +1600,13 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1600 struct bvec_iter iter; 1600 struct bvec_iter iter;
1601 struct bio *bio; 1601 struct bio *bio;
1602 int dgs, err, expect; 1602 int dgs, err, expect;
1603 void *dig_in = device->tconn->int_dig_in; 1603 void *dig_in = device->connection->int_dig_in;
1604 void *dig_vv = device->tconn->int_dig_vv; 1604 void *dig_vv = device->connection->int_dig_vv;
1605 1605
1606 dgs = 0; 1606 dgs = 0;
1607 if (device->tconn->peer_integrity_tfm) { 1607 if (device->connection->peer_integrity_tfm) {
1608 dgs = crypto_hash_digestsize(device->tconn->peer_integrity_tfm); 1608 dgs = crypto_hash_digestsize(device->connection->peer_integrity_tfm);
1609 err = drbd_recv_all_warn(device->tconn, dig_in, dgs); 1609 err = drbd_recv_all_warn(device->connection, dig_in, dgs);
1610 if (err) 1610 if (err)
1611 return err; 1611 return err;
1612 data_size -= dgs; 1612 data_size -= dgs;
@@ -1622,7 +1622,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1622 bio_for_each_segment(bvec, bio, iter) { 1622 bio_for_each_segment(bvec, bio, iter) {
1623 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; 1623 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1624 expect = min_t(int, data_size, bvec.bv_len); 1624 expect = min_t(int, data_size, bvec.bv_len);
1625 err = drbd_recv_all_warn(device->tconn, mapped, expect); 1625 err = drbd_recv_all_warn(device->connection, mapped, expect);
1626 kunmap(bvec.bv_page); 1626 kunmap(bvec.bv_page);
1627 if (err) 1627 if (err)
1628 return err; 1628 return err;
@@ -1630,7 +1630,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1630 } 1630 }
1631 1631
1632 if (dgs) { 1632 if (dgs) {
1633 drbd_csum_bio(device, device->tconn->peer_integrity_tfm, bio, dig_vv); 1633 drbd_csum_bio(device, device->connection->peer_integrity_tfm, bio, dig_vv);
1634 if (memcmp(dig_in, dig_vv, dgs)) { 1634 if (memcmp(dig_in, dig_vv, dgs)) {
1635 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1635 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1636 return -EINVAL; 1636 return -EINVAL;
@@ -1685,9 +1685,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1685 1685
1686 peer_req->w.cb = e_end_resync_block; 1686 peer_req->w.cb = e_end_resync_block;
1687 1687
1688 spin_lock_irq(&device->tconn->req_lock); 1688 spin_lock_irq(&device->connection->req_lock);
1689 list_add(&peer_req->w.list, &device->sync_ee); 1689 list_add(&peer_req->w.list, &device->sync_ee);
1690 spin_unlock_irq(&device->tconn->req_lock); 1690 spin_unlock_irq(&device->connection->req_lock);
1691 1691
1692 atomic_add(data_size >> 9, &device->rs_sect_ev); 1692 atomic_add(data_size >> 9, &device->rs_sect_ev);
1693 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) 1693 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1695,9 +1695,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1695 1695
1696 /* don't care for the reason here */ 1696 /* don't care for the reason here */
1697 dev_err(DEV, "submit failed, triggering re-connect\n"); 1697 dev_err(DEV, "submit failed, triggering re-connect\n");
1698 spin_lock_irq(&device->tconn->req_lock); 1698 spin_lock_irq(&device->connection->req_lock);
1699 list_del(&peer_req->w.list); 1699 list_del(&peer_req->w.list);
1700 spin_unlock_irq(&device->tconn->req_lock); 1700 spin_unlock_irq(&device->connection->req_lock);
1701 1701
1702 drbd_free_peer_req(device, peer_req); 1702 drbd_free_peer_req(device, peer_req);
1703fail: 1703fail:
@@ -1722,7 +1722,7 @@ find_request(struct drbd_device *device, struct rb_root *root, u64 id,
1722 return NULL; 1722 return NULL;
1723} 1723}
1724 1724
1725static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi) 1725static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
1726{ 1726{
1727 struct drbd_device *device; 1727 struct drbd_device *device;
1728 struct drbd_request *req; 1728 struct drbd_request *req;
@@ -1730,15 +1730,15 @@ static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1730 int err; 1730 int err;
1731 struct p_data *p = pi->data; 1731 struct p_data *p = pi->data;
1732 1732
1733 device = vnr_to_device(tconn, pi->vnr); 1733 device = vnr_to_device(connection, pi->vnr);
1734 if (!device) 1734 if (!device)
1735 return -EIO; 1735 return -EIO;
1736 1736
1737 sector = be64_to_cpu(p->sector); 1737 sector = be64_to_cpu(p->sector);
1738 1738
1739 spin_lock_irq(&device->tconn->req_lock); 1739 spin_lock_irq(&device->connection->req_lock);
1740 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); 1740 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1741 spin_unlock_irq(&device->tconn->req_lock); 1741 spin_unlock_irq(&device->connection->req_lock);
1742 if (unlikely(!req)) 1742 if (unlikely(!req))
1743 return -EIO; 1743 return -EIO;
1744 1744
@@ -1755,14 +1755,14 @@ static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1755 return err; 1755 return err;
1756} 1756}
1757 1757
1758static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi) 1758static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
1759{ 1759{
1760 struct drbd_device *device; 1760 struct drbd_device *device;
1761 sector_t sector; 1761 sector_t sector;
1762 int err; 1762 int err;
1763 struct p_data *p = pi->data; 1763 struct p_data *p = pi->data;
1764 1764
1765 device = vnr_to_device(tconn, pi->vnr); 1765 device = vnr_to_device(connection, pi->vnr);
1766 if (!device) 1766 if (!device)
1767 return -EIO; 1767 return -EIO;
1768 1768
@@ -1837,16 +1837,16 @@ static int e_end_block(struct drbd_work *w, int cancel)
1837 /* we delete from the conflict detection hash _after_ we sent out the 1837 /* we delete from the conflict detection hash _after_ we sent out the
1838 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1838 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1839 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 1839 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1840 spin_lock_irq(&device->tconn->req_lock); 1840 spin_lock_irq(&device->connection->req_lock);
1841 D_ASSERT(!drbd_interval_empty(&peer_req->i)); 1841 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1842 drbd_remove_epoch_entry_interval(device, peer_req); 1842 drbd_remove_epoch_entry_interval(device, peer_req);
1843 if (peer_req->flags & EE_RESTART_REQUESTS) 1843 if (peer_req->flags & EE_RESTART_REQUESTS)
1844 restart_conflicting_writes(device, sector, peer_req->i.size); 1844 restart_conflicting_writes(device, sector, peer_req->i.size);
1845 spin_unlock_irq(&device->tconn->req_lock); 1845 spin_unlock_irq(&device->connection->req_lock);
1846 } else 1846 } else
1847 D_ASSERT(drbd_interval_empty(&peer_req->i)); 1847 D_ASSERT(drbd_interval_empty(&peer_req->i));
1848 1848
1849 drbd_may_finish_epoch(device->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1849 drbd_may_finish_epoch(device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1850 1850
1851 return err; 1851 return err;
1852} 1852}
@@ -1871,9 +1871,9 @@ static int e_send_superseded(struct drbd_work *w, int unused)
1871 1871
1872static int e_send_retry_write(struct drbd_work *w, int unused) 1872static int e_send_retry_write(struct drbd_work *w, int unused)
1873{ 1873{
1874 struct drbd_tconn *tconn = w->device->tconn; 1874 struct drbd_connection *connection = w->device->connection;
1875 1875
1876 return e_send_ack(w, tconn->agreed_pro_version >= 100 ? 1876 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
1877 P_RETRY_WRITE : P_SUPERSEDED); 1877 P_RETRY_WRITE : P_SUPERSEDED);
1878} 1878}
1879 1879
@@ -1896,7 +1896,7 @@ static void update_peer_seq(struct drbd_device *device, unsigned int peer_seq)
1896{ 1896{
1897 unsigned int newest_peer_seq; 1897 unsigned int newest_peer_seq;
1898 1898
1899 if (test_bit(RESOLVE_CONFLICTS, &device->tconn->flags)) { 1899 if (test_bit(RESOLVE_CONFLICTS, &device->connection->flags)) {
1900 spin_lock(&device->peer_seq_lock); 1900 spin_lock(&device->peer_seq_lock);
1901 newest_peer_seq = seq_max(device->peer_seq, peer_seq); 1901 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
1902 device->peer_seq = newest_peer_seq; 1902 device->peer_seq = newest_peer_seq;
@@ -1918,7 +1918,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1918 struct drbd_peer_request *rs_req; 1918 struct drbd_peer_request *rs_req;
1919 bool rv = 0; 1919 bool rv = 0;
1920 1920
1921 spin_lock_irq(&device->tconn->req_lock); 1921 spin_lock_irq(&device->connection->req_lock);
1922 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 1922 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
1923 if (overlaps(peer_req->i.sector, peer_req->i.size, 1923 if (overlaps(peer_req->i.sector, peer_req->i.size,
1924 rs_req->i.sector, rs_req->i.size)) { 1924 rs_req->i.sector, rs_req->i.size)) {
@@ -1926,7 +1926,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1926 break; 1926 break;
1927 } 1927 }
1928 } 1928 }
1929 spin_unlock_irq(&device->tconn->req_lock); 1929 spin_unlock_irq(&device->connection->req_lock);
1930 1930
1931 return rv; 1931 return rv;
1932} 1932}
@@ -1958,7 +1958,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1958 long timeout; 1958 long timeout;
1959 int ret = 0, tp; 1959 int ret = 0, tp;
1960 1960
1961 if (!test_bit(RESOLVE_CONFLICTS, &device->tconn->flags)) 1961 if (!test_bit(RESOLVE_CONFLICTS, &device->connection->flags))
1962 return 0; 1962 return 0;
1963 1963
1964 spin_lock(&device->peer_seq_lock); 1964 spin_lock(&device->peer_seq_lock);
@@ -1974,7 +1974,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1974 } 1974 }
1975 1975
1976 rcu_read_lock(); 1976 rcu_read_lock();
1977 tp = rcu_dereference(device->tconn->net_conf)->two_primaries; 1977 tp = rcu_dereference(device->connection->net_conf)->two_primaries;
1978 rcu_read_unlock(); 1978 rcu_read_unlock();
1979 1979
1980 if (!tp) 1980 if (!tp)
@@ -1984,7 +1984,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1984 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); 1984 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
1985 spin_unlock(&device->peer_seq_lock); 1985 spin_unlock(&device->peer_seq_lock);
1986 rcu_read_lock(); 1986 rcu_read_lock();
1987 timeout = rcu_dereference(device->tconn->net_conf)->ping_timeo*HZ/10; 1987 timeout = rcu_dereference(device->connection->net_conf)->ping_timeo*HZ/10;
1988 rcu_read_unlock(); 1988 rcu_read_unlock();
1989 timeout = schedule_timeout(timeout); 1989 timeout = schedule_timeout(timeout);
1990 spin_lock(&device->peer_seq_lock); 1990 spin_lock(&device->peer_seq_lock);
@@ -2027,10 +2027,10 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2027 continue; 2027 continue;
2028 req->rq_state &= ~RQ_POSTPONED; 2028 req->rq_state &= ~RQ_POSTPONED;
2029 __req_mod(req, NEG_ACKED, &m); 2029 __req_mod(req, NEG_ACKED, &m);
2030 spin_unlock_irq(&device->tconn->req_lock); 2030 spin_unlock_irq(&device->connection->req_lock);
2031 if (m.bio) 2031 if (m.bio)
2032 complete_master_bio(device, &m); 2032 complete_master_bio(device, &m);
2033 spin_lock_irq(&device->tconn->req_lock); 2033 spin_lock_irq(&device->connection->req_lock);
2034 goto repeat; 2034 goto repeat;
2035 } 2035 }
2036} 2036}
@@ -2038,8 +2038,8 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2038static int handle_write_conflicts(struct drbd_device *device, 2038static int handle_write_conflicts(struct drbd_device *device,
2039 struct drbd_peer_request *peer_req) 2039 struct drbd_peer_request *peer_req)
2040{ 2040{
2041 struct drbd_tconn *tconn = device->tconn; 2041 struct drbd_connection *connection = device->connection;
2042 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags); 2042 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2043 sector_t sector = peer_req->i.sector; 2043 sector_t sector = peer_req->i.sector;
2044 const unsigned int size = peer_req->i.size; 2044 const unsigned int size = peer_req->i.size;
2045 struct drbd_interval *i; 2045 struct drbd_interval *i;
@@ -2092,7 +2092,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2092 peer_req->w.cb = superseded ? e_send_superseded : 2092 peer_req->w.cb = superseded ? e_send_superseded :
2093 e_send_retry_write; 2093 e_send_retry_write;
2094 list_add_tail(&peer_req->w.list, &device->done_ee); 2094 list_add_tail(&peer_req->w.list, &device->done_ee);
2095 wake_asender(device->tconn); 2095 wake_asender(device->connection);
2096 2096
2097 err = -ENOENT; 2097 err = -ENOENT;
2098 goto out; 2098 goto out;
@@ -2121,7 +2121,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2121 */ 2121 */
2122 err = drbd_wait_misc(device, &req->i); 2122 err = drbd_wait_misc(device, &req->i);
2123 if (err) { 2123 if (err) {
2124 _conn_request_state(device->tconn, 2124 _conn_request_state(device->connection,
2125 NS(conn, C_TIMEOUT), 2125 NS(conn, C_TIMEOUT),
2126 CS_HARD); 2126 CS_HARD);
2127 fail_postponed_requests(device, sector, size); 2127 fail_postponed_requests(device, sector, size);
@@ -2145,7 +2145,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2145} 2145}
2146 2146
2147/* mirrored write */ 2147/* mirrored write */
2148static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) 2148static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
2149{ 2149{
2150 struct drbd_device *device; 2150 struct drbd_device *device;
2151 sector_t sector; 2151 sector_t sector;
@@ -2156,7 +2156,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2156 u32 dp_flags; 2156 u32 dp_flags;
2157 int err, tp; 2157 int err, tp;
2158 2158
2159 device = vnr_to_device(tconn, pi->vnr); 2159 device = vnr_to_device(connection, pi->vnr);
2160 if (!device) 2160 if (!device)
2161 return -EIO; 2161 return -EIO;
2162 2162
@@ -2165,7 +2165,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2165 2165
2166 err = wait_for_and_update_peer_seq(device, peer_seq); 2166 err = wait_for_and_update_peer_seq(device, peer_seq);
2167 drbd_send_ack_dp(device, P_NEG_ACK, p, pi->size); 2167 drbd_send_ack_dp(device, P_NEG_ACK, p, pi->size);
2168 atomic_inc(&tconn->current_epoch->epoch_size); 2168 atomic_inc(&connection->current_epoch->epoch_size);
2169 err2 = drbd_drain_block(device, pi->size); 2169 err2 = drbd_drain_block(device, pi->size);
2170 if (!err) 2170 if (!err)
2171 err = err2; 2171 err = err2;
@@ -2197,24 +2197,24 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2197 if (dp_flags & DP_MAY_SET_IN_SYNC) 2197 if (dp_flags & DP_MAY_SET_IN_SYNC)
2198 peer_req->flags |= EE_MAY_SET_IN_SYNC; 2198 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2199 2199
2200 spin_lock(&tconn->epoch_lock); 2200 spin_lock(&connection->epoch_lock);
2201 peer_req->epoch = tconn->current_epoch; 2201 peer_req->epoch = connection->current_epoch;
2202 atomic_inc(&peer_req->epoch->epoch_size); 2202 atomic_inc(&peer_req->epoch->epoch_size);
2203 atomic_inc(&peer_req->epoch->active); 2203 atomic_inc(&peer_req->epoch->active);
2204 spin_unlock(&tconn->epoch_lock); 2204 spin_unlock(&connection->epoch_lock);
2205 2205
2206 rcu_read_lock(); 2206 rcu_read_lock();
2207 tp = rcu_dereference(device->tconn->net_conf)->two_primaries; 2207 tp = rcu_dereference(device->connection->net_conf)->two_primaries;
2208 rcu_read_unlock(); 2208 rcu_read_unlock();
2209 if (tp) { 2209 if (tp) {
2210 peer_req->flags |= EE_IN_INTERVAL_TREE; 2210 peer_req->flags |= EE_IN_INTERVAL_TREE;
2211 err = wait_for_and_update_peer_seq(device, peer_seq); 2211 err = wait_for_and_update_peer_seq(device, peer_seq);
2212 if (err) 2212 if (err)
2213 goto out_interrupted; 2213 goto out_interrupted;
2214 spin_lock_irq(&device->tconn->req_lock); 2214 spin_lock_irq(&device->connection->req_lock);
2215 err = handle_write_conflicts(device, peer_req); 2215 err = handle_write_conflicts(device, peer_req);
2216 if (err) { 2216 if (err) {
2217 spin_unlock_irq(&device->tconn->req_lock); 2217 spin_unlock_irq(&device->connection->req_lock);
2218 if (err == -ENOENT) { 2218 if (err == -ENOENT) {
2219 put_ldev(device); 2219 put_ldev(device);
2220 return 0; 2220 return 0;
@@ -2223,17 +2223,17 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2223 } 2223 }
2224 } else { 2224 } else {
2225 update_peer_seq(device, peer_seq); 2225 update_peer_seq(device, peer_seq);
2226 spin_lock_irq(&device->tconn->req_lock); 2226 spin_lock_irq(&device->connection->req_lock);
2227 } 2227 }
2228 list_add(&peer_req->w.list, &device->active_ee); 2228 list_add(&peer_req->w.list, &device->active_ee);
2229 spin_unlock_irq(&device->tconn->req_lock); 2229 spin_unlock_irq(&device->connection->req_lock);
2230 2230
2231 if (device->state.conn == C_SYNC_TARGET) 2231 if (device->state.conn == C_SYNC_TARGET)
2232 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); 2232 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2233 2233
2234 if (device->tconn->agreed_pro_version < 100) { 2234 if (device->connection->agreed_pro_version < 100) {
2235 rcu_read_lock(); 2235 rcu_read_lock();
2236 switch (rcu_dereference(device->tconn->net_conf)->wire_protocol) { 2236 switch (rcu_dereference(device->connection->net_conf)->wire_protocol) {
2237 case DRBD_PROT_C: 2237 case DRBD_PROT_C:
2238 dp_flags |= DP_SEND_WRITE_ACK; 2238 dp_flags |= DP_SEND_WRITE_ACK;
2239 break; 2239 break;
@@ -2271,15 +2271,15 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2271 2271
2272 /* don't care for the reason here */ 2272 /* don't care for the reason here */
2273 dev_err(DEV, "submit failed, triggering re-connect\n"); 2273 dev_err(DEV, "submit failed, triggering re-connect\n");
2274 spin_lock_irq(&device->tconn->req_lock); 2274 spin_lock_irq(&device->connection->req_lock);
2275 list_del(&peer_req->w.list); 2275 list_del(&peer_req->w.list);
2276 drbd_remove_epoch_entry_interval(device, peer_req); 2276 drbd_remove_epoch_entry_interval(device, peer_req);
2277 spin_unlock_irq(&device->tconn->req_lock); 2277 spin_unlock_irq(&device->connection->req_lock);
2278 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) 2278 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2279 drbd_al_complete_io(device, &peer_req->i); 2279 drbd_al_complete_io(device, &peer_req->i);
2280 2280
2281out_interrupted: 2281out_interrupted:
2282 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP); 2282 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
2283 put_ldev(device); 2283 put_ldev(device);
2284 drbd_free_peer_req(device, peer_req); 2284 drbd_free_peer_req(device, peer_req);
2285 return err; 2285 return err;
@@ -2357,7 +2357,7 @@ int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
2357} 2357}
2358 2358
2359 2359
2360static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi) 2360static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
2361{ 2361{
2362 struct drbd_device *device; 2362 struct drbd_device *device;
2363 sector_t sector; 2363 sector_t sector;
@@ -2368,7 +2368,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2368 unsigned int fault_type; 2368 unsigned int fault_type;
2369 struct p_block_req *p = pi->data; 2369 struct p_block_req *p = pi->data;
2370 2370
2371 device = vnr_to_device(tconn, pi->vnr); 2371 device = vnr_to_device(connection, pi->vnr);
2372 if (!device) 2372 if (!device)
2373 return -EIO; 2373 return -EIO;
2374 capacity = drbd_get_capacity(device->this_bdev); 2374 capacity = drbd_get_capacity(device->this_bdev);
@@ -2450,11 +2450,11 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2450 peer_req->digest = di; 2450 peer_req->digest = di;
2451 peer_req->flags |= EE_HAS_DIGEST; 2451 peer_req->flags |= EE_HAS_DIGEST;
2452 2452
2453 if (drbd_recv_all(device->tconn, di->digest, pi->size)) 2453 if (drbd_recv_all(device->connection, di->digest, pi->size))
2454 goto out_free_e; 2454 goto out_free_e;
2455 2455
2456 if (pi->cmd == P_CSUM_RS_REQUEST) { 2456 if (pi->cmd == P_CSUM_RS_REQUEST) {
2457 D_ASSERT(device->tconn->agreed_pro_version >= 89); 2457 D_ASSERT(device->connection->agreed_pro_version >= 89);
2458 peer_req->w.cb = w_e_end_csum_rs_req; 2458 peer_req->w.cb = w_e_end_csum_rs_req;
2459 /* used in the sector offset progress display */ 2459 /* used in the sector offset progress display */
2460 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2460 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2471,7 +2471,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2471 2471
2472 case P_OV_REQUEST: 2472 case P_OV_REQUEST:
2473 if (device->ov_start_sector == ~(sector_t)0 && 2473 if (device->ov_start_sector == ~(sector_t)0 &&
2474 device->tconn->agreed_pro_version >= 90) { 2474 device->connection->agreed_pro_version >= 90) {
2475 unsigned long now = jiffies; 2475 unsigned long now = jiffies;
2476 int i; 2476 int i;
2477 device->ov_start_sector = sector; 2477 device->ov_start_sector = sector;
@@ -2525,18 +2525,18 @@ submit_for_resync:
2525 2525
2526submit: 2526submit:
2527 inc_unacked(device); 2527 inc_unacked(device);
2528 spin_lock_irq(&device->tconn->req_lock); 2528 spin_lock_irq(&device->connection->req_lock);
2529 list_add_tail(&peer_req->w.list, &device->read_ee); 2529 list_add_tail(&peer_req->w.list, &device->read_ee);
2530 spin_unlock_irq(&device->tconn->req_lock); 2530 spin_unlock_irq(&device->connection->req_lock);
2531 2531
2532 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) 2532 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
2533 return 0; 2533 return 0;
2534 2534
2535 /* don't care for the reason here */ 2535 /* don't care for the reason here */
2536 dev_err(DEV, "submit failed, triggering re-connect\n"); 2536 dev_err(DEV, "submit failed, triggering re-connect\n");
2537 spin_lock_irq(&device->tconn->req_lock); 2537 spin_lock_irq(&device->connection->req_lock);
2538 list_del(&peer_req->w.list); 2538 list_del(&peer_req->w.list);
2539 spin_unlock_irq(&device->tconn->req_lock); 2539 spin_unlock_irq(&device->connection->req_lock);
2540 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2540 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2541 2541
2542out_free_e: 2542out_free_e:
@@ -2558,7 +2558,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2558 ch_self = device->comm_bm_set; 2558 ch_self = device->comm_bm_set;
2559 2559
2560 rcu_read_lock(); 2560 rcu_read_lock();
2561 after_sb_0p = rcu_dereference(device->tconn->net_conf)->after_sb_0p; 2561 after_sb_0p = rcu_dereference(device->connection->net_conf)->after_sb_0p;
2562 rcu_read_unlock(); 2562 rcu_read_unlock();
2563 switch (after_sb_0p) { 2563 switch (after_sb_0p) {
2564 case ASB_CONSENSUS: 2564 case ASB_CONSENSUS:
@@ -2593,7 +2593,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2593 "Using discard-least-changes instead\n"); 2593 "Using discard-least-changes instead\n");
2594 case ASB_DISCARD_ZERO_CHG: 2594 case ASB_DISCARD_ZERO_CHG:
2595 if (ch_peer == 0 && ch_self == 0) { 2595 if (ch_peer == 0 && ch_self == 0) {
2596 rv = test_bit(RESOLVE_CONFLICTS, &device->tconn->flags) 2596 rv = test_bit(RESOLVE_CONFLICTS, &device->connection->flags)
2597 ? -1 : 1; 2597 ? -1 : 1;
2598 break; 2598 break;
2599 } else { 2599 } else {
@@ -2609,7 +2609,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2609 rv = 1; 2609 rv = 1;
2610 else /* ( ch_self == ch_peer ) */ 2610 else /* ( ch_self == ch_peer ) */
2611 /* Well, then use something else. */ 2611 /* Well, then use something else. */
2612 rv = test_bit(RESOLVE_CONFLICTS, &device->tconn->flags) 2612 rv = test_bit(RESOLVE_CONFLICTS, &device->connection->flags)
2613 ? -1 : 1; 2613 ? -1 : 1;
2614 break; 2614 break;
2615 case ASB_DISCARD_LOCAL: 2615 case ASB_DISCARD_LOCAL:
@@ -2628,7 +2628,7 @@ static int drbd_asb_recover_1p(struct drbd_device *device) __must_hold(local)
2628 enum drbd_after_sb_p after_sb_1p; 2628 enum drbd_after_sb_p after_sb_1p;
2629 2629
2630 rcu_read_lock(); 2630 rcu_read_lock();
2631 after_sb_1p = rcu_dereference(device->tconn->net_conf)->after_sb_1p; 2631 after_sb_1p = rcu_dereference(device->connection->net_conf)->after_sb_1p;
2632 rcu_read_unlock(); 2632 rcu_read_unlock();
2633 switch (after_sb_1p) { 2633 switch (after_sb_1p) {
2634 case ASB_DISCARD_YOUNGER_PRI: 2634 case ASB_DISCARD_YOUNGER_PRI:
@@ -2681,7 +2681,7 @@ static int drbd_asb_recover_2p(struct drbd_device *device) __must_hold(local)
2681 enum drbd_after_sb_p after_sb_2p; 2681 enum drbd_after_sb_p after_sb_2p;
2682 2682
2683 rcu_read_lock(); 2683 rcu_read_lock();
2684 after_sb_2p = rcu_dereference(device->tconn->net_conf)->after_sb_2p; 2684 after_sb_2p = rcu_dereference(device->connection->net_conf)->after_sb_2p;
2685 rcu_read_unlock(); 2685 rcu_read_unlock();
2686 switch (after_sb_2p) { 2686 switch (after_sb_2p) {
2687 case ASB_DISCARD_YOUNGER_PRI: 2687 case ASB_DISCARD_YOUNGER_PRI:
@@ -2777,7 +2777,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2777 2777
2778 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2778 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2779 2779
2780 if (device->tconn->agreed_pro_version < 91) 2780 if (device->connection->agreed_pro_version < 91)
2781 return -1091; 2781 return -1091;
2782 2782
2783 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2783 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
@@ -2800,7 +2800,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2800 2800
2801 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { 2801 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
2802 2802
2803 if (device->tconn->agreed_pro_version < 91) 2803 if (device->connection->agreed_pro_version < 91)
2804 return -1091; 2804 return -1091;
2805 2805
2806 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && 2806 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2833,7 +2833,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2833 case 1: /* self_pri && !peer_pri */ return 1; 2833 case 1: /* self_pri && !peer_pri */ return 1;
2834 case 2: /* !self_pri && peer_pri */ return -1; 2834 case 2: /* !self_pri && peer_pri */ return -1;
2835 case 3: /* self_pri && peer_pri */ 2835 case 3: /* self_pri && peer_pri */
2836 dc = test_bit(RESOLVE_CONFLICTS, &device->tconn->flags); 2836 dc = test_bit(RESOLVE_CONFLICTS, &device->connection->flags);
2837 return dc ? -1 : 1; 2837 return dc ? -1 : 1;
2838 } 2838 }
2839 } 2839 }
@@ -2846,14 +2846,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2846 *rule_nr = 51; 2846 *rule_nr = 51;
2847 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); 2847 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
2848 if (self == peer) { 2848 if (self == peer) {
2849 if (device->tconn->agreed_pro_version < 96 ? 2849 if (device->connection->agreed_pro_version < 96 ?
2850 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 2850 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2851 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 2851 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2852 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { 2852 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
2853 /* The last P_SYNC_UUID did not get though. Undo the last start of 2853 /* The last P_SYNC_UUID did not get though. Undo the last start of
2854 resync as sync source modifications of the peer's UUIDs. */ 2854 resync as sync source modifications of the peer's UUIDs. */
2855 2855
2856 if (device->tconn->agreed_pro_version < 91) 2856 if (device->connection->agreed_pro_version < 91)
2857 return -1091; 2857 return -1091;
2858 2858
2859 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; 2859 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
@@ -2883,14 +2883,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2883 *rule_nr = 71; 2883 *rule_nr = 71;
2884 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2884 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2885 if (self == peer) { 2885 if (self == peer) {
2886 if (device->tconn->agreed_pro_version < 96 ? 2886 if (device->connection->agreed_pro_version < 96 ?
2887 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 2887 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2888 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 2888 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2889 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 2889 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2890 /* The last P_SYNC_UUID did not get though. Undo the last start of 2890 /* The last P_SYNC_UUID did not get though. Undo the last start of
2891 resync as sync source modifications of our UUIDs. */ 2891 resync as sync source modifications of our UUIDs. */
2892 2892
2893 if (device->tconn->agreed_pro_version < 91) 2893 if (device->connection->agreed_pro_version < 91)
2894 return -1091; 2894 return -1091;
2895 2895
2896 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); 2896 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
@@ -2982,7 +2982,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
2982 drbd_khelper(device, "initial-split-brain"); 2982 drbd_khelper(device, "initial-split-brain");
2983 2983
2984 rcu_read_lock(); 2984 rcu_read_lock();
2985 nc = rcu_dereference(device->tconn->net_conf); 2985 nc = rcu_dereference(device->connection->net_conf);
2986 2986
2987 if (hg == 100 || (hg == -100 && nc->always_asbp)) { 2987 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2988 int pcount = (device->state.role == R_PRIMARY) 2988 int pcount = (device->state.role == R_PRIMARY)
@@ -3057,7 +3057,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3057 } 3057 }
3058 } 3058 }
3059 3059
3060 if (tentative || test_bit(CONN_DRY_RUN, &device->tconn->flags)) { 3060 if (tentative || test_bit(CONN_DRY_RUN, &device->connection->flags)) {
3061 if (hg == 0) 3061 if (hg == 0)
3062 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 3062 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3063 else 3063 else
@@ -3103,7 +3103,7 @@ static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3103 return peer; 3103 return peer;
3104} 3104}
3105 3105
3106static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi) 3106static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
3107{ 3107{
3108 struct p_protocol *p = pi->data; 3108 struct p_protocol *p = pi->data;
3109 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 3109 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
@@ -3121,58 +3121,58 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3121 cf = be32_to_cpu(p->conn_flags); 3121 cf = be32_to_cpu(p->conn_flags);
3122 p_discard_my_data = cf & CF_DISCARD_MY_DATA; 3122 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3123 3123
3124 if (tconn->agreed_pro_version >= 87) { 3124 if (connection->agreed_pro_version >= 87) {
3125 int err; 3125 int err;
3126 3126
3127 if (pi->size > sizeof(integrity_alg)) 3127 if (pi->size > sizeof(integrity_alg))
3128 return -EIO; 3128 return -EIO;
3129 err = drbd_recv_all(tconn, integrity_alg, pi->size); 3129 err = drbd_recv_all(connection, integrity_alg, pi->size);
3130 if (err) 3130 if (err)
3131 return err; 3131 return err;
3132 integrity_alg[SHARED_SECRET_MAX - 1] = 0; 3132 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3133 } 3133 }
3134 3134
3135 if (pi->cmd != P_PROTOCOL_UPDATE) { 3135 if (pi->cmd != P_PROTOCOL_UPDATE) {
3136 clear_bit(CONN_DRY_RUN, &tconn->flags); 3136 clear_bit(CONN_DRY_RUN, &connection->flags);
3137 3137
3138 if (cf & CF_DRY_RUN) 3138 if (cf & CF_DRY_RUN)
3139 set_bit(CONN_DRY_RUN, &tconn->flags); 3139 set_bit(CONN_DRY_RUN, &connection->flags);
3140 3140
3141 rcu_read_lock(); 3141 rcu_read_lock();
3142 nc = rcu_dereference(tconn->net_conf); 3142 nc = rcu_dereference(connection->net_conf);
3143 3143
3144 if (p_proto != nc->wire_protocol) { 3144 if (p_proto != nc->wire_protocol) {
3145 conn_err(tconn, "incompatible %s settings\n", "protocol"); 3145 conn_err(connection, "incompatible %s settings\n", "protocol");
3146 goto disconnect_rcu_unlock; 3146 goto disconnect_rcu_unlock;
3147 } 3147 }
3148 3148
3149 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { 3149 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3150 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri"); 3150 conn_err(connection, "incompatible %s settings\n", "after-sb-0pri");
3151 goto disconnect_rcu_unlock; 3151 goto disconnect_rcu_unlock;
3152 } 3152 }
3153 3153
3154 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { 3154 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3155 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri"); 3155 conn_err(connection, "incompatible %s settings\n", "after-sb-1pri");
3156 goto disconnect_rcu_unlock; 3156 goto disconnect_rcu_unlock;
3157 } 3157 }
3158 3158
3159 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { 3159 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3160 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri"); 3160 conn_err(connection, "incompatible %s settings\n", "after-sb-2pri");
3161 goto disconnect_rcu_unlock; 3161 goto disconnect_rcu_unlock;
3162 } 3162 }
3163 3163
3164 if (p_discard_my_data && nc->discard_my_data) { 3164 if (p_discard_my_data && nc->discard_my_data) {
3165 conn_err(tconn, "incompatible %s settings\n", "discard-my-data"); 3165 conn_err(connection, "incompatible %s settings\n", "discard-my-data");
3166 goto disconnect_rcu_unlock; 3166 goto disconnect_rcu_unlock;
3167 } 3167 }
3168 3168
3169 if (p_two_primaries != nc->two_primaries) { 3169 if (p_two_primaries != nc->two_primaries) {
3170 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries"); 3170 conn_err(connection, "incompatible %s settings\n", "allow-two-primaries");
3171 goto disconnect_rcu_unlock; 3171 goto disconnect_rcu_unlock;
3172 } 3172 }
3173 3173
3174 if (strcmp(integrity_alg, nc->integrity_alg)) { 3174 if (strcmp(integrity_alg, nc->integrity_alg)) {
3175 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg"); 3175 conn_err(connection, "incompatible %s settings\n", "data-integrity-alg");
3176 goto disconnect_rcu_unlock; 3176 goto disconnect_rcu_unlock;
3177 } 3177 }
3178 3178
@@ -3193,7 +3193,7 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3193 3193
3194 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC); 3194 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3195 if (!peer_integrity_tfm) { 3195 if (!peer_integrity_tfm) {
3196 conn_err(tconn, "peer data-integrity-alg %s not supported\n", 3196 conn_err(connection, "peer data-integrity-alg %s not supported\n",
3197 integrity_alg); 3197 integrity_alg);
3198 goto disconnect; 3198 goto disconnect;
3199 } 3199 }
@@ -3202,20 +3202,20 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3202 int_dig_in = kmalloc(hash_size, GFP_KERNEL); 3202 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3203 int_dig_vv = kmalloc(hash_size, GFP_KERNEL); 3203 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3204 if (!(int_dig_in && int_dig_vv)) { 3204 if (!(int_dig_in && int_dig_vv)) {
3205 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n"); 3205 conn_err(connection, "Allocation of buffers for data integrity checking failed\n");
3206 goto disconnect; 3206 goto disconnect;
3207 } 3207 }
3208 } 3208 }
3209 3209
3210 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); 3210 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3211 if (!new_net_conf) { 3211 if (!new_net_conf) {
3212 conn_err(tconn, "Allocation of new net_conf failed\n"); 3212 conn_err(connection, "Allocation of new net_conf failed\n");
3213 goto disconnect; 3213 goto disconnect;
3214 } 3214 }
3215 3215
3216 mutex_lock(&tconn->data.mutex); 3216 mutex_lock(&connection->data.mutex);
3217 mutex_lock(&tconn->conf_update); 3217 mutex_lock(&connection->conf_update);
3218 old_net_conf = tconn->net_conf; 3218 old_net_conf = connection->net_conf;
3219 *new_net_conf = *old_net_conf; 3219 *new_net_conf = *old_net_conf;
3220 3220
3221 new_net_conf->wire_protocol = p_proto; 3221 new_net_conf->wire_protocol = p_proto;
@@ -3224,19 +3224,19 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3224 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p); 3224 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3225 new_net_conf->two_primaries = p_two_primaries; 3225 new_net_conf->two_primaries = p_two_primaries;
3226 3226
3227 rcu_assign_pointer(tconn->net_conf, new_net_conf); 3227 rcu_assign_pointer(connection->net_conf, new_net_conf);
3228 mutex_unlock(&tconn->conf_update); 3228 mutex_unlock(&connection->conf_update);
3229 mutex_unlock(&tconn->data.mutex); 3229 mutex_unlock(&connection->data.mutex);
3230 3230
3231 crypto_free_hash(tconn->peer_integrity_tfm); 3231 crypto_free_hash(connection->peer_integrity_tfm);
3232 kfree(tconn->int_dig_in); 3232 kfree(connection->int_dig_in);
3233 kfree(tconn->int_dig_vv); 3233 kfree(connection->int_dig_vv);
3234 tconn->peer_integrity_tfm = peer_integrity_tfm; 3234 connection->peer_integrity_tfm = peer_integrity_tfm;
3235 tconn->int_dig_in = int_dig_in; 3235 connection->int_dig_in = int_dig_in;
3236 tconn->int_dig_vv = int_dig_vv; 3236 connection->int_dig_vv = int_dig_vv;
3237 3237
3238 if (strcmp(old_net_conf->integrity_alg, integrity_alg)) 3238 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3239 conn_info(tconn, "peer data-integrity-alg: %s\n", 3239 conn_info(connection, "peer data-integrity-alg: %s\n",
3240 integrity_alg[0] ? integrity_alg : "(none)"); 3240 integrity_alg[0] ? integrity_alg : "(none)");
3241 3241
3242 synchronize_rcu(); 3242 synchronize_rcu();
@@ -3249,7 +3249,7 @@ disconnect:
3249 crypto_free_hash(peer_integrity_tfm); 3249 crypto_free_hash(peer_integrity_tfm);
3250 kfree(int_dig_in); 3250 kfree(int_dig_in);
3251 kfree(int_dig_vv); 3251 kfree(int_dig_vv);
3252 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3252 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
3253 return -EIO; 3253 return -EIO;
3254} 3254}
3255 3255
@@ -3276,14 +3276,14 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *devi
3276 return tfm; 3276 return tfm;
3277} 3277}
3278 3278
3279static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi) 3279static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
3280{ 3280{
3281 void *buffer = tconn->data.rbuf; 3281 void *buffer = connection->data.rbuf;
3282 int size = pi->size; 3282 int size = pi->size;
3283 3283
3284 while (size) { 3284 while (size) {
3285 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE); 3285 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3286 s = drbd_recv(tconn, buffer, s); 3286 s = drbd_recv(connection, buffer, s);
3287 if (s <= 0) { 3287 if (s <= 0) {
3288 if (s < 0) 3288 if (s < 0)
3289 return s; 3289 return s;
@@ -3307,14 +3307,14 @@ static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info
3307 * 3307 *
3308 * (We can also end up here if drbd is misconfigured.) 3308 * (We can also end up here if drbd is misconfigured.)
3309 */ 3309 */
3310static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi) 3310static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
3311{ 3311{
3312 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n", 3312 conn_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
3313 cmdname(pi->cmd), pi->vnr); 3313 cmdname(pi->cmd), pi->vnr);
3314 return ignore_remaining_packet(tconn, pi); 3314 return ignore_remaining_packet(connection, pi);
3315} 3315}
3316 3316
3317static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi) 3317static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
3318{ 3318{
3319 struct drbd_device *device; 3319 struct drbd_device *device;
3320 struct p_rs_param_95 *p; 3320 struct p_rs_param_95 *p;
@@ -3323,14 +3323,14 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3323 struct crypto_hash *csums_tfm = NULL; 3323 struct crypto_hash *csums_tfm = NULL;
3324 struct net_conf *old_net_conf, *new_net_conf = NULL; 3324 struct net_conf *old_net_conf, *new_net_conf = NULL;
3325 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; 3325 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3326 const int apv = tconn->agreed_pro_version; 3326 const int apv = connection->agreed_pro_version;
3327 struct fifo_buffer *old_plan = NULL, *new_plan = NULL; 3327 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3328 int fifo_size = 0; 3328 int fifo_size = 0;
3329 int err; 3329 int err;
3330 3330
3331 device = vnr_to_device(tconn, pi->vnr); 3331 device = vnr_to_device(connection, pi->vnr);
3332 if (!device) 3332 if (!device)
3333 return config_unknown_volume(tconn, pi); 3333 return config_unknown_volume(connection, pi);
3334 3334
3335 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 3335 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3336 : apv == 88 ? sizeof(struct p_rs_param) 3336 : apv == 88 ? sizeof(struct p_rs_param)
@@ -3361,17 +3361,17 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3361 p = pi->data; 3361 p = pi->data;
3362 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 3362 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3363 3363
3364 err = drbd_recv_all(device->tconn, p, header_size); 3364 err = drbd_recv_all(device->connection, p, header_size);
3365 if (err) 3365 if (err)
3366 return err; 3366 return err;
3367 3367
3368 mutex_lock(&device->tconn->conf_update); 3368 mutex_lock(&device->connection->conf_update);
3369 old_net_conf = device->tconn->net_conf; 3369 old_net_conf = device->connection->net_conf;
3370 if (get_ldev(device)) { 3370 if (get_ldev(device)) {
3371 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3371 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3372 if (!new_disk_conf) { 3372 if (!new_disk_conf) {
3373 put_ldev(device); 3373 put_ldev(device);
3374 mutex_unlock(&device->tconn->conf_update); 3374 mutex_unlock(&device->connection->conf_update);
3375 dev_err(DEV, "Allocation of new disk_conf failed\n"); 3375 dev_err(DEV, "Allocation of new disk_conf failed\n");
3376 return -ENOMEM; 3376 return -ENOMEM;
3377 } 3377 }
@@ -3392,7 +3392,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3392 goto reconnect; 3392 goto reconnect;
3393 } 3393 }
3394 3394
3395 err = drbd_recv_all(device->tconn, p->verify_alg, data_size); 3395 err = drbd_recv_all(device->connection, p->verify_alg, data_size);
3396 if (err) 3396 if (err)
3397 goto reconnect; 3397 goto reconnect;
3398 /* we expect NUL terminated string */ 3398 /* we expect NUL terminated string */
@@ -3466,18 +3466,18 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3466 if (verify_tfm) { 3466 if (verify_tfm) {
3467 strcpy(new_net_conf->verify_alg, p->verify_alg); 3467 strcpy(new_net_conf->verify_alg, p->verify_alg);
3468 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3468 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3469 crypto_free_hash(device->tconn->verify_tfm); 3469 crypto_free_hash(device->connection->verify_tfm);
3470 device->tconn->verify_tfm = verify_tfm; 3470 device->connection->verify_tfm = verify_tfm;
3471 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 3471 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3472 } 3472 }
3473 if (csums_tfm) { 3473 if (csums_tfm) {
3474 strcpy(new_net_conf->csums_alg, p->csums_alg); 3474 strcpy(new_net_conf->csums_alg, p->csums_alg);
3475 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3475 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3476 crypto_free_hash(device->tconn->csums_tfm); 3476 crypto_free_hash(device->connection->csums_tfm);
3477 device->tconn->csums_tfm = csums_tfm; 3477 device->connection->csums_tfm = csums_tfm;
3478 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 3478 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3479 } 3479 }
3480 rcu_assign_pointer(tconn->net_conf, new_net_conf); 3480 rcu_assign_pointer(connection->net_conf, new_net_conf);
3481 } 3481 }
3482 } 3482 }
3483 3483
@@ -3491,7 +3491,7 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3491 rcu_assign_pointer(device->rs_plan_s, new_plan); 3491 rcu_assign_pointer(device->rs_plan_s, new_plan);
3492 } 3492 }
3493 3493
3494 mutex_unlock(&device->tconn->conf_update); 3494 mutex_unlock(&device->connection->conf_update);
3495 synchronize_rcu(); 3495 synchronize_rcu();
3496 if (new_net_conf) 3496 if (new_net_conf)
3497 kfree(old_net_conf); 3497 kfree(old_net_conf);
@@ -3505,7 +3505,7 @@ reconnect:
3505 put_ldev(device); 3505 put_ldev(device);
3506 kfree(new_disk_conf); 3506 kfree(new_disk_conf);
3507 } 3507 }
3508 mutex_unlock(&device->tconn->conf_update); 3508 mutex_unlock(&device->connection->conf_update);
3509 return -EIO; 3509 return -EIO;
3510 3510
3511disconnect: 3511disconnect:
@@ -3514,13 +3514,13 @@ disconnect:
3514 put_ldev(device); 3514 put_ldev(device);
3515 kfree(new_disk_conf); 3515 kfree(new_disk_conf);
3516 } 3516 }
3517 mutex_unlock(&device->tconn->conf_update); 3517 mutex_unlock(&device->connection->conf_update);
3518 /* just for completeness: actually not needed, 3518 /* just for completeness: actually not needed,
3519 * as this is not reached if csums_tfm was ok. */ 3519 * as this is not reached if csums_tfm was ok. */
3520 crypto_free_hash(csums_tfm); 3520 crypto_free_hash(csums_tfm);
3521 /* but free the verify_tfm again, if csums_tfm did not work out */ 3521 /* but free the verify_tfm again, if csums_tfm did not work out */
3522 crypto_free_hash(verify_tfm); 3522 crypto_free_hash(verify_tfm);
3523 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3523 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3524 return -EIO; 3524 return -EIO;
3525} 3525}
3526 3526
@@ -3537,7 +3537,7 @@ static void warn_if_differ_considerably(struct drbd_device *device,
3537 (unsigned long long)a, (unsigned long long)b); 3537 (unsigned long long)a, (unsigned long long)b);
3538} 3538}
3539 3539
3540static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) 3540static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
3541{ 3541{
3542 struct drbd_device *device; 3542 struct drbd_device *device;
3543 struct p_sizes *p = pi->data; 3543 struct p_sizes *p = pi->data;
@@ -3546,9 +3546,9 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3546 int ldsc = 0; /* local disk size changed */ 3546 int ldsc = 0; /* local disk size changed */
3547 enum dds_flags ddsf; 3547 enum dds_flags ddsf;
3548 3548
3549 device = vnr_to_device(tconn, pi->vnr); 3549 device = vnr_to_device(connection, pi->vnr);
3550 if (!device) 3550 if (!device)
3551 return config_unknown_volume(tconn, pi); 3551 return config_unknown_volume(connection, pi);
3552 3552
3553 p_size = be64_to_cpu(p->d_size); 3553 p_size = be64_to_cpu(p->d_size);
3554 p_usize = be64_to_cpu(p->u_size); 3554 p_usize = be64_to_cpu(p->u_size);
@@ -3579,7 +3579,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3579 device->state.disk >= D_OUTDATED && 3579 device->state.disk >= D_OUTDATED &&
3580 device->state.conn < C_CONNECTED) { 3580 device->state.conn < C_CONNECTED) {
3581 dev_err(DEV, "The peer's disk size is too small!\n"); 3581 dev_err(DEV, "The peer's disk size is too small!\n");
3582 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3582 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3583 put_ldev(device); 3583 put_ldev(device);
3584 return -EIO; 3584 return -EIO;
3585 } 3585 }
@@ -3594,13 +3594,13 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3594 return -ENOMEM; 3594 return -ENOMEM;
3595 } 3595 }
3596 3596
3597 mutex_lock(&device->tconn->conf_update); 3597 mutex_lock(&device->connection->conf_update);
3598 old_disk_conf = device->ldev->disk_conf; 3598 old_disk_conf = device->ldev->disk_conf;
3599 *new_disk_conf = *old_disk_conf; 3599 *new_disk_conf = *old_disk_conf;
3600 new_disk_conf->disk_size = p_usize; 3600 new_disk_conf->disk_size = p_usize;
3601 3601
3602 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 3602 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3603 mutex_unlock(&device->tconn->conf_update); 3603 mutex_unlock(&device->connection->conf_update);
3604 synchronize_rcu(); 3604 synchronize_rcu();
3605 kfree(old_disk_conf); 3605 kfree(old_disk_conf);
3606 3606
@@ -3658,16 +3658,16 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3658 return 0; 3658 return 0;
3659} 3659}
3660 3660
3661static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi) 3661static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
3662{ 3662{
3663 struct drbd_device *device; 3663 struct drbd_device *device;
3664 struct p_uuids *p = pi->data; 3664 struct p_uuids *p = pi->data;
3665 u64 *p_uuid; 3665 u64 *p_uuid;
3666 int i, updated_uuids = 0; 3666 int i, updated_uuids = 0;
3667 3667
3668 device = vnr_to_device(tconn, pi->vnr); 3668 device = vnr_to_device(connection, pi->vnr);
3669 if (!device) 3669 if (!device)
3670 return config_unknown_volume(tconn, pi); 3670 return config_unknown_volume(connection, pi);
3671 3671
3672 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3672 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3673 if (!p_uuid) { 3673 if (!p_uuid) {
@@ -3687,14 +3687,14 @@ static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3687 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3687 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3688 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3688 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3689 (unsigned long long)device->ed_uuid); 3689 (unsigned long long)device->ed_uuid);
3690 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3690 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3691 return -EIO; 3691 return -EIO;
3692 } 3692 }
3693 3693
3694 if (get_ldev(device)) { 3694 if (get_ldev(device)) {
3695 int skip_initial_sync = 3695 int skip_initial_sync =
3696 device->state.conn == C_CONNECTED && 3696 device->state.conn == C_CONNECTED &&
3697 device->tconn->agreed_pro_version >= 90 && 3697 device->connection->agreed_pro_version >= 90 &&
3698 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3698 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3699 (p_uuid[UI_FLAGS] & 8); 3699 (p_uuid[UI_FLAGS] & 8);
3700 if (skip_initial_sync) { 3700 if (skip_initial_sync) {
@@ -3763,21 +3763,21 @@ static union drbd_state convert_state(union drbd_state ps)
3763 return ms; 3763 return ms;
3764} 3764}
3765 3765
3766static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi) 3766static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
3767{ 3767{
3768 struct drbd_device *device; 3768 struct drbd_device *device;
3769 struct p_req_state *p = pi->data; 3769 struct p_req_state *p = pi->data;
3770 union drbd_state mask, val; 3770 union drbd_state mask, val;
3771 enum drbd_state_rv rv; 3771 enum drbd_state_rv rv;
3772 3772
3773 device = vnr_to_device(tconn, pi->vnr); 3773 device = vnr_to_device(connection, pi->vnr);
3774 if (!device) 3774 if (!device)
3775 return -EIO; 3775 return -EIO;
3776 3776
3777 mask.i = be32_to_cpu(p->mask); 3777 mask.i = be32_to_cpu(p->mask);
3778 val.i = be32_to_cpu(p->val); 3778 val.i = be32_to_cpu(p->val);
3779 3779
3780 if (test_bit(RESOLVE_CONFLICTS, &device->tconn->flags) && 3780 if (test_bit(RESOLVE_CONFLICTS, &device->connection->flags) &&
3781 mutex_is_locked(device->state_mutex)) { 3781 mutex_is_locked(device->state_mutex)) {
3782 drbd_send_sr_reply(device, SS_CONCURRENT_ST_CHG); 3782 drbd_send_sr_reply(device, SS_CONCURRENT_ST_CHG);
3783 return 0; 3783 return 0;
@@ -3794,7 +3794,7 @@ static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3794 return 0; 3794 return 0;
3795} 3795}
3796 3796
3797static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi) 3797static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
3798{ 3798{
3799 struct p_req_state *p = pi->data; 3799 struct p_req_state *p = pi->data;
3800 union drbd_state mask, val; 3800 union drbd_state mask, val;
@@ -3803,22 +3803,22 @@ static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *
3803 mask.i = be32_to_cpu(p->mask); 3803 mask.i = be32_to_cpu(p->mask);
3804 val.i = be32_to_cpu(p->val); 3804 val.i = be32_to_cpu(p->val);
3805 3805
3806 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) && 3806 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
3807 mutex_is_locked(&tconn->cstate_mutex)) { 3807 mutex_is_locked(&connection->cstate_mutex)) {
3808 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG); 3808 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
3809 return 0; 3809 return 0;
3810 } 3810 }
3811 3811
3812 mask = convert_state(mask); 3812 mask = convert_state(mask);
3813 val = convert_state(val); 3813 val = convert_state(val);
3814 3814
3815 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL); 3815 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3816 conn_send_sr_reply(tconn, rv); 3816 conn_send_sr_reply(connection, rv);
3817 3817
3818 return 0; 3818 return 0;
3819} 3819}
3820 3820
3821static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi) 3821static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
3822{ 3822{
3823 struct drbd_device *device; 3823 struct drbd_device *device;
3824 struct p_state *p = pi->data; 3824 struct p_state *p = pi->data;
@@ -3827,9 +3827,9 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3827 enum chg_state_flags cs_flags; 3827 enum chg_state_flags cs_flags;
3828 int rv; 3828 int rv;
3829 3829
3830 device = vnr_to_device(tconn, pi->vnr); 3830 device = vnr_to_device(connection, pi->vnr);
3831 if (!device) 3831 if (!device)
3832 return config_unknown_volume(tconn, pi); 3832 return config_unknown_volume(connection, pi);
3833 3833
3834 peer_state.i = be32_to_cpu(p->state); 3834 peer_state.i = be32_to_cpu(p->state);
3835 3835
@@ -3839,10 +3839,10 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3839 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3839 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3840 } 3840 }
3841 3841
3842 spin_lock_irq(&device->tconn->req_lock); 3842 spin_lock_irq(&device->connection->req_lock);
3843 retry: 3843 retry:
3844 os = ns = drbd_read_state(device); 3844 os = ns = drbd_read_state(device);
3845 spin_unlock_irq(&device->tconn->req_lock); 3845 spin_unlock_irq(&device->connection->req_lock);
3846 3846
3847 /* If some other part of the code (asender thread, timeout) 3847 /* If some other part of the code (asender thread, timeout)
3848 * already decided to close the connection again, 3848 * already decided to close the connection again,
@@ -3936,16 +3936,16 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3936 peer_state.disk = D_DISKLESS; 3936 peer_state.disk = D_DISKLESS;
3937 real_peer_disk = D_DISKLESS; 3937 real_peer_disk = D_DISKLESS;
3938 } else { 3938 } else {
3939 if (test_and_clear_bit(CONN_DRY_RUN, &device->tconn->flags)) 3939 if (test_and_clear_bit(CONN_DRY_RUN, &device->connection->flags))
3940 return -EIO; 3940 return -EIO;
3941 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3941 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3942 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3942 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3943 return -EIO; 3943 return -EIO;
3944 } 3944 }
3945 } 3945 }
3946 } 3946 }
3947 3947
3948 spin_lock_irq(&device->tconn->req_lock); 3948 spin_lock_irq(&device->connection->req_lock);
3949 if (os.i != drbd_read_state(device).i) 3949 if (os.i != drbd_read_state(device).i)
3950 goto retry; 3950 goto retry;
3951 clear_bit(CONSIDER_RESYNC, &device->flags); 3951 clear_bit(CONSIDER_RESYNC, &device->flags);
@@ -3959,20 +3959,20 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3959 test_bit(NEW_CUR_UUID, &device->flags)) { 3959 test_bit(NEW_CUR_UUID, &device->flags)) {
3960 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3960 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3961 for temporal network outages! */ 3961 for temporal network outages! */
3962 spin_unlock_irq(&device->tconn->req_lock); 3962 spin_unlock_irq(&device->connection->req_lock);
3963 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3963 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3964 tl_clear(device->tconn); 3964 tl_clear(device->connection);
3965 drbd_uuid_new_current(device); 3965 drbd_uuid_new_current(device);
3966 clear_bit(NEW_CUR_UUID, &device->flags); 3966 clear_bit(NEW_CUR_UUID, &device->flags);
3967 conn_request_state(device->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); 3967 conn_request_state(device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3968 return -EIO; 3968 return -EIO;
3969 } 3969 }
3970 rv = _drbd_set_state(device, ns, cs_flags, NULL); 3970 rv = _drbd_set_state(device, ns, cs_flags, NULL);
3971 ns = drbd_read_state(device); 3971 ns = drbd_read_state(device);
3972 spin_unlock_irq(&device->tconn->req_lock); 3972 spin_unlock_irq(&device->connection->req_lock);
3973 3973
3974 if (rv < SS_SUCCESS) { 3974 if (rv < SS_SUCCESS) {
3975 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 3975 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3976 return -EIO; 3976 return -EIO;
3977 } 3977 }
3978 3978
@@ -3994,12 +3994,12 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3994 return 0; 3994 return 0;
3995} 3995}
3996 3996
3997static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi) 3997static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
3998{ 3998{
3999 struct drbd_device *device; 3999 struct drbd_device *device;
4000 struct p_rs_uuid *p = pi->data; 4000 struct p_rs_uuid *p = pi->data;
4001 4001
4002 device = vnr_to_device(tconn, pi->vnr); 4002 device = vnr_to_device(connection, pi->vnr);
4003 if (!device) 4003 if (!device)
4004 return -EIO; 4004 return -EIO;
4005 4005
@@ -4038,7 +4038,7 @@ receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4038 unsigned long *p, struct bm_xfer_ctx *c) 4038 unsigned long *p, struct bm_xfer_ctx *c)
4039{ 4039{
4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - 4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4041 drbd_header_size(device->tconn); 4041 drbd_header_size(device->connection);
4042 unsigned int num_words = min_t(size_t, data_size / sizeof(*p), 4042 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4043 c->bm_words - c->word_offset); 4043 c->bm_words - c->word_offset);
4044 unsigned int want = num_words * sizeof(*p); 4044 unsigned int want = num_words * sizeof(*p);
@@ -4050,7 +4050,7 @@ receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4050 } 4050 }
4051 if (want == 0) 4051 if (want == 0)
4052 return 0; 4052 return 0;
4053 err = drbd_recv_all(device->tconn, p, want); 4053 err = drbd_recv_all(device->connection, p, want);
4054 if (err) 4054 if (err)
4055 return err; 4055 return err;
4056 4056
@@ -4168,7 +4168,7 @@ decode_bitmap_c(struct drbd_device *device,
4168 * during all our tests. */ 4168 * during all our tests. */
4169 4169
4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4171 conn_request_state(device->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4171 conn_request_state(device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4172 return -EIO; 4172 return -EIO;
4173} 4173}
4174 4174
@@ -4176,7 +4176,7 @@ void INFO_bm_xfer_stats(struct drbd_device *device,
4176 const char *direction, struct bm_xfer_ctx *c) 4176 const char *direction, struct bm_xfer_ctx *c)
4177{ 4177{
4178 /* what would it take to transfer it "plaintext" */ 4178 /* what would it take to transfer it "plaintext" */
4179 unsigned int header_size = drbd_header_size(device->tconn); 4179 unsigned int header_size = drbd_header_size(device->connection);
4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; 4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181 unsigned int plain = 4181 unsigned int plain =
4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + 4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
@@ -4216,13 +4216,13 @@ void INFO_bm_xfer_stats(struct drbd_device *device,
4216 in order to be agnostic to the 32 vs 64 bits issue. 4216 in order to be agnostic to the 32 vs 64 bits issue.
4217 4217
4218 returns 0 on failure, 1 if we successfully received it. */ 4218 returns 0 on failure, 1 if we successfully received it. */
4219static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi) 4219static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
4220{ 4220{
4221 struct drbd_device *device; 4221 struct drbd_device *device;
4222 struct bm_xfer_ctx c; 4222 struct bm_xfer_ctx c;
4223 int err; 4223 int err;
4224 4224
4225 device = vnr_to_device(tconn, pi->vnr); 4225 device = vnr_to_device(connection, pi->vnr);
4226 if (!device) 4226 if (!device)
4227 return -EIO; 4227 return -EIO;
4228 4228
@@ -4243,7 +4243,7 @@ static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4243 * and the feature is enabled! */ 4243 * and the feature is enabled! */
4244 struct p_compressed_bm *p = pi->data; 4244 struct p_compressed_bm *p = pi->data;
4245 4245
4246 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) { 4246 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4247 dev_err(DEV, "ReportCBitmap packet too large\n"); 4247 dev_err(DEV, "ReportCBitmap packet too large\n");
4248 err = -EIO; 4248 err = -EIO;
4249 goto out; 4249 goto out;
@@ -4253,7 +4253,7 @@ static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4253 err = -EIO; 4253 err = -EIO;
4254 goto out; 4254 goto out;
4255 } 4255 }
4256 err = drbd_recv_all(device->tconn, p, pi->size); 4256 err = drbd_recv_all(device->connection, p, pi->size);
4257 if (err) 4257 if (err)
4258 goto out; 4258 goto out;
4259 err = decode_bitmap_c(device, p, &c, pi->size); 4259 err = decode_bitmap_c(device, p, &c, pi->size);
@@ -4264,14 +4264,14 @@ static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4264 } 4264 }
4265 4265
4266 c.packets[pi->cmd == P_BITMAP]++; 4266 c.packets[pi->cmd == P_BITMAP]++;
4267 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size; 4267 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
4268 4268
4269 if (err <= 0) { 4269 if (err <= 0) {
4270 if (err < 0) 4270 if (err < 0)
4271 goto out; 4271 goto out;
4272 break; 4272 break;
4273 } 4273 }
4274 err = drbd_recv_header(device->tconn, pi); 4274 err = drbd_recv_header(device->connection, pi);
4275 if (err) 4275 if (err)
4276 goto out; 4276 goto out;
4277 } 4277 }
@@ -4302,29 +4302,29 @@ static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4302 return err; 4302 return err;
4303} 4303}
4304 4304
4305static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi) 4305static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
4306{ 4306{
4307 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n", 4307 conn_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
4308 pi->cmd, pi->size); 4308 pi->cmd, pi->size);
4309 4309
4310 return ignore_remaining_packet(tconn, pi); 4310 return ignore_remaining_packet(connection, pi);
4311} 4311}
4312 4312
4313static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi) 4313static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
4314{ 4314{
4315 /* Make sure we've acked all the TCP data associated 4315 /* Make sure we've acked all the TCP data associated
4316 * with the data requests being unplugged */ 4316 * with the data requests being unplugged */
4317 drbd_tcp_quickack(tconn->data.socket); 4317 drbd_tcp_quickack(connection->data.socket);
4318 4318
4319 return 0; 4319 return 0;
4320} 4320}
4321 4321
4322static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi) 4322static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
4323{ 4323{
4324 struct drbd_device *device; 4324 struct drbd_device *device;
4325 struct p_block_desc *p = pi->data; 4325 struct p_block_desc *p = pi->data;
4326 4326
4327 device = vnr_to_device(tconn, pi->vnr); 4327 device = vnr_to_device(connection, pi->vnr);
4328 if (!device) 4328 if (!device)
4329 return -EIO; 4329 return -EIO;
4330 4330
@@ -4346,7 +4346,7 @@ static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4346struct data_cmd { 4346struct data_cmd {
4347 int expect_payload; 4347 int expect_payload;
4348 size_t pkt_size; 4348 size_t pkt_size;
4349 int (*fn)(struct drbd_tconn *, struct packet_info *); 4349 int (*fn)(struct drbd_connection *, struct packet_info *);
4350}; 4350};
4351 4351
4352static struct data_cmd drbd_cmd_handler[] = { 4352static struct data_cmd drbd_cmd_handler[] = {
@@ -4376,43 +4376,43 @@ static struct data_cmd drbd_cmd_handler[] = {
4376 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, 4376 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4377}; 4377};
4378 4378
4379static void drbdd(struct drbd_tconn *tconn) 4379static void drbdd(struct drbd_connection *connection)
4380{ 4380{
4381 struct packet_info pi; 4381 struct packet_info pi;
4382 size_t shs; /* sub header size */ 4382 size_t shs; /* sub header size */
4383 int err; 4383 int err;
4384 4384
4385 while (get_t_state(&tconn->receiver) == RUNNING) { 4385 while (get_t_state(&connection->receiver) == RUNNING) {
4386 struct data_cmd *cmd; 4386 struct data_cmd *cmd;
4387 4387
4388 drbd_thread_current_set_cpu(&tconn->receiver); 4388 drbd_thread_current_set_cpu(&connection->receiver);
4389 if (drbd_recv_header(tconn, &pi)) 4389 if (drbd_recv_header(connection, &pi))
4390 goto err_out; 4390 goto err_out;
4391 4391
4392 cmd = &drbd_cmd_handler[pi.cmd]; 4392 cmd = &drbd_cmd_handler[pi.cmd];
4393 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) { 4393 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4394 conn_err(tconn, "Unexpected data packet %s (0x%04x)", 4394 conn_err(connection, "Unexpected data packet %s (0x%04x)",
4395 cmdname(pi.cmd), pi.cmd); 4395 cmdname(pi.cmd), pi.cmd);
4396 goto err_out; 4396 goto err_out;
4397 } 4397 }
4398 4398
4399 shs = cmd->pkt_size; 4399 shs = cmd->pkt_size;
4400 if (pi.size > shs && !cmd->expect_payload) { 4400 if (pi.size > shs && !cmd->expect_payload) {
4401 conn_err(tconn, "No payload expected %s l:%d\n", 4401 conn_err(connection, "No payload expected %s l:%d\n",
4402 cmdname(pi.cmd), pi.size); 4402 cmdname(pi.cmd), pi.size);
4403 goto err_out; 4403 goto err_out;
4404 } 4404 }
4405 4405
4406 if (shs) { 4406 if (shs) {
4407 err = drbd_recv_all_warn(tconn, pi.data, shs); 4407 err = drbd_recv_all_warn(connection, pi.data, shs);
4408 if (err) 4408 if (err)
4409 goto err_out; 4409 goto err_out;
4410 pi.size -= shs; 4410 pi.size -= shs;
4411 } 4411 }
4412 4412
4413 err = cmd->fn(tconn, &pi); 4413 err = cmd->fn(connection, &pi);
4414 if (err) { 4414 if (err) {
4415 conn_err(tconn, "error receiving %s, e: %d l: %d!\n", 4415 conn_err(connection, "error receiving %s, e: %d l: %d!\n",
4416 cmdname(pi.cmd), err, pi.size); 4416 cmdname(pi.cmd), err, pi.size);
4417 goto err_out; 4417 goto err_out;
4418 } 4418 }
@@ -4420,27 +4420,27 @@ static void drbdd(struct drbd_tconn *tconn)
4420 return; 4420 return;
4421 4421
4422 err_out: 4422 err_out:
4423 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4423 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4424} 4424}
4425 4425
4426void conn_flush_workqueue(struct drbd_tconn *tconn) 4426void conn_flush_workqueue(struct drbd_connection *connection)
4427{ 4427{
4428 struct drbd_wq_barrier barr; 4428 struct drbd_wq_barrier barr;
4429 4429
4430 barr.w.cb = w_prev_work_done; 4430 barr.w.cb = w_prev_work_done;
4431 barr.w.tconn = tconn; 4431 barr.w.connection = connection;
4432 init_completion(&barr.done); 4432 init_completion(&barr.done);
4433 drbd_queue_work(&tconn->sender_work, &barr.w); 4433 drbd_queue_work(&connection->sender_work, &barr.w);
4434 wait_for_completion(&barr.done); 4434 wait_for_completion(&barr.done);
4435} 4435}
4436 4436
4437static void conn_disconnect(struct drbd_tconn *tconn) 4437static void conn_disconnect(struct drbd_connection *connection)
4438{ 4438{
4439 struct drbd_device *device; 4439 struct drbd_device *device;
4440 enum drbd_conns oc; 4440 enum drbd_conns oc;
4441 int vnr; 4441 int vnr;
4442 4442
4443 if (tconn->cstate == C_STANDALONE) 4443 if (connection->cstate == C_STANDALONE)
4444 return; 4444 return;
4445 4445
4446 /* We are about to start the cleanup after connection loss. 4446 /* We are about to start the cleanup after connection loss.
@@ -4448,14 +4448,14 @@ static void conn_disconnect(struct drbd_tconn *tconn)
4448 * Usually we should be in some network failure state already, 4448 * Usually we should be in some network failure state already,
4449 * but just in case we are not, we fix it up here. 4449 * but just in case we are not, we fix it up here.
4450 */ 4450 */
4451 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); 4451 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4452 4452
4453 /* asender does not clean up anything. it must not interfere, either */ 4453 /* asender does not clean up anything. it must not interfere, either */
4454 drbd_thread_stop(&tconn->asender); 4454 drbd_thread_stop(&connection->asender);
4455 drbd_free_sock(tconn); 4455 drbd_free_sock(connection);
4456 4456
4457 rcu_read_lock(); 4457 rcu_read_lock();
4458 idr_for_each_entry(&tconn->volumes, device, vnr) { 4458 idr_for_each_entry(&connection->volumes, device, vnr) {
4459 kref_get(&device->kref); 4459 kref_get(&device->kref);
4460 rcu_read_unlock(); 4460 rcu_read_unlock();
4461 drbd_disconnected(device); 4461 drbd_disconnected(device);
@@ -4464,26 +4464,26 @@ static void conn_disconnect(struct drbd_tconn *tconn)
4464 } 4464 }
4465 rcu_read_unlock(); 4465 rcu_read_unlock();
4466 4466
4467 if (!list_empty(&tconn->current_epoch->list)) 4467 if (!list_empty(&connection->current_epoch->list))
4468 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n"); 4468 conn_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
4469 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 4469 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4470 atomic_set(&tconn->current_epoch->epoch_size, 0); 4470 atomic_set(&connection->current_epoch->epoch_size, 0);
4471 tconn->send.seen_any_write_yet = false; 4471 connection->send.seen_any_write_yet = false;
4472 4472
4473 conn_info(tconn, "Connection closed\n"); 4473 conn_info(connection, "Connection closed\n");
4474 4474
4475 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN) 4475 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
4476 conn_try_outdate_peer_async(tconn); 4476 conn_try_outdate_peer_async(connection);
4477 4477
4478 spin_lock_irq(&tconn->req_lock); 4478 spin_lock_irq(&connection->req_lock);
4479 oc = tconn->cstate; 4479 oc = connection->cstate;
4480 if (oc >= C_UNCONNECTED) 4480 if (oc >= C_UNCONNECTED)
4481 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE); 4481 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4482 4482
4483 spin_unlock_irq(&tconn->req_lock); 4483 spin_unlock_irq(&connection->req_lock);
4484 4484
4485 if (oc == C_DISCONNECTING) 4485 if (oc == C_DISCONNECTING)
4486 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); 4486 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4487} 4487}
4488 4488
4489static int drbd_disconnected(struct drbd_device *device) 4489static int drbd_disconnected(struct drbd_device *device)
@@ -4491,11 +4491,11 @@ static int drbd_disconnected(struct drbd_device *device)
4491 unsigned int i; 4491 unsigned int i;
4492 4492
4493 /* wait for current activity to cease. */ 4493 /* wait for current activity to cease. */
4494 spin_lock_irq(&device->tconn->req_lock); 4494 spin_lock_irq(&device->connection->req_lock);
4495 _drbd_wait_ee_list_empty(device, &device->active_ee); 4495 _drbd_wait_ee_list_empty(device, &device->active_ee);
4496 _drbd_wait_ee_list_empty(device, &device->sync_ee); 4496 _drbd_wait_ee_list_empty(device, &device->sync_ee);
4497 _drbd_wait_ee_list_empty(device, &device->read_ee); 4497 _drbd_wait_ee_list_empty(device, &device->read_ee);
4498 spin_unlock_irq(&device->tconn->req_lock); 4498 spin_unlock_irq(&device->connection->req_lock);
4499 4499
4500 /* We do not have data structures that would allow us to 4500 /* We do not have data structures that would allow us to
4501 * get the rs_pending_cnt down to 0 again. 4501 * get the rs_pending_cnt down to 0 again.
@@ -4536,7 +4536,7 @@ static int drbd_disconnected(struct drbd_device *device)
4536 device->p_uuid = NULL; 4536 device->p_uuid = NULL;
4537 4537
4538 if (!drbd_suspended(device)) 4538 if (!drbd_suspended(device))
4539 tl_clear(device->tconn); 4539 tl_clear(device->connection);
4540 4540
4541 drbd_md_sync(device); 4541 drbd_md_sync(device);
4542 4542
@@ -4578,19 +4578,19 @@ static int drbd_disconnected(struct drbd_device *device)
4578 * 4578 *
4579 * for now, they are expected to be zero, but ignored. 4579 * for now, they are expected to be zero, but ignored.
4580 */ 4580 */
4581static int drbd_send_features(struct drbd_tconn *tconn) 4581static int drbd_send_features(struct drbd_connection *connection)
4582{ 4582{
4583 struct drbd_socket *sock; 4583 struct drbd_socket *sock;
4584 struct p_connection_features *p; 4584 struct p_connection_features *p;
4585 4585
4586 sock = &tconn->data; 4586 sock = &connection->data;
4587 p = conn_prepare_command(tconn, sock); 4587 p = conn_prepare_command(connection, sock);
4588 if (!p) 4588 if (!p)
4589 return -EIO; 4589 return -EIO;
4590 memset(p, 0, sizeof(*p)); 4590 memset(p, 0, sizeof(*p));
4591 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 4591 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4592 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 4592 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4593 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); 4593 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4594} 4594}
4595 4595
4596/* 4596/*
@@ -4600,36 +4600,36 @@ static int drbd_send_features(struct drbd_tconn *tconn)
4600 * -1 peer talks different language, 4600 * -1 peer talks different language,
4601 * no point in trying again, please go standalone. 4601 * no point in trying again, please go standalone.
4602 */ 4602 */
4603static int drbd_do_features(struct drbd_tconn *tconn) 4603static int drbd_do_features(struct drbd_connection *connection)
4604{ 4604{
4605 /* ASSERT current == tconn->receiver ... */ 4605 /* ASSERT current == connection->receiver ... */
4606 struct p_connection_features *p; 4606 struct p_connection_features *p;
4607 const int expect = sizeof(struct p_connection_features); 4607 const int expect = sizeof(struct p_connection_features);
4608 struct packet_info pi; 4608 struct packet_info pi;
4609 int err; 4609 int err;
4610 4610
4611 err = drbd_send_features(tconn); 4611 err = drbd_send_features(connection);
4612 if (err) 4612 if (err)
4613 return 0; 4613 return 0;
4614 4614
4615 err = drbd_recv_header(tconn, &pi); 4615 err = drbd_recv_header(connection, &pi);
4616 if (err) 4616 if (err)
4617 return 0; 4617 return 0;
4618 4618
4619 if (pi.cmd != P_CONNECTION_FEATURES) { 4619 if (pi.cmd != P_CONNECTION_FEATURES) {
4620 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n", 4620 conn_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4621 cmdname(pi.cmd), pi.cmd); 4621 cmdname(pi.cmd), pi.cmd);
4622 return -1; 4622 return -1;
4623 } 4623 }
4624 4624
4625 if (pi.size != expect) { 4625 if (pi.size != expect) {
4626 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n", 4626 conn_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
4627 expect, pi.size); 4627 expect, pi.size);
4628 return -1; 4628 return -1;
4629 } 4629 }
4630 4630
4631 p = pi.data; 4631 p = pi.data;
4632 err = drbd_recv_all_warn(tconn, p, expect); 4632 err = drbd_recv_all_warn(connection, p, expect);
4633 if (err) 4633 if (err)
4634 return 0; 4634 return 0;
4635 4635
@@ -4642,15 +4642,15 @@ static int drbd_do_features(struct drbd_tconn *tconn)
4642 PRO_VERSION_MIN > p->protocol_max) 4642 PRO_VERSION_MIN > p->protocol_max)
4643 goto incompat; 4643 goto incompat;
4644 4644
4645 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 4645 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4646 4646
4647 conn_info(tconn, "Handshake successful: " 4647 conn_info(connection, "Handshake successful: "
4648 "Agreed network protocol version %d\n", tconn->agreed_pro_version); 4648 "Agreed network protocol version %d\n", connection->agreed_pro_version);
4649 4649
4650 return 1; 4650 return 1;
4651 4651
4652 incompat: 4652 incompat:
4653 conn_err(tconn, "incompatible DRBD dialects: " 4653 conn_err(connection, "incompatible DRBD dialects: "
4654 "I support %d-%d, peer supports %d-%d\n", 4654 "I support %d-%d, peer supports %d-%d\n",
4655 PRO_VERSION_MIN, PRO_VERSION_MAX, 4655 PRO_VERSION_MIN, PRO_VERSION_MAX,
4656 p->protocol_min, p->protocol_max); 4656 p->protocol_min, p->protocol_max);
@@ -4658,10 +4658,10 @@ static int drbd_do_features(struct drbd_tconn *tconn)
4658} 4658}
4659 4659
4660#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 4660#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4661static int drbd_do_auth(struct drbd_tconn *tconn) 4661static int drbd_do_auth(struct drbd_connection *connection)
4662{ 4662{
4663 conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 4663 conn_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4664 conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 4664 conn_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4665 return -1; 4665 return -1;
4666} 4666}
4667#else 4667#else
@@ -4673,7 +4673,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4673 -1 - auth failed, don't try again. 4673 -1 - auth failed, don't try again.
4674*/ 4674*/
4675 4675
4676static int drbd_do_auth(struct drbd_tconn *tconn) 4676static int drbd_do_auth(struct drbd_connection *connection)
4677{ 4677{
4678 struct drbd_socket *sock; 4678 struct drbd_socket *sock;
4679 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 4679 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -4692,69 +4692,69 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4692 /* FIXME: Put the challenge/response into the preallocated socket buffer. */ 4692 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4693 4693
4694 rcu_read_lock(); 4694 rcu_read_lock();
4695 nc = rcu_dereference(tconn->net_conf); 4695 nc = rcu_dereference(connection->net_conf);
4696 key_len = strlen(nc->shared_secret); 4696 key_len = strlen(nc->shared_secret);
4697 memcpy(secret, nc->shared_secret, key_len); 4697 memcpy(secret, nc->shared_secret, key_len);
4698 rcu_read_unlock(); 4698 rcu_read_unlock();
4699 4699
4700 desc.tfm = tconn->cram_hmac_tfm; 4700 desc.tfm = connection->cram_hmac_tfm;
4701 desc.flags = 0; 4701 desc.flags = 0;
4702 4702
4703 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len); 4703 rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
4704 if (rv) { 4704 if (rv) {
4705 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv); 4705 conn_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
4706 rv = -1; 4706 rv = -1;
4707 goto fail; 4707 goto fail;
4708 } 4708 }
4709 4709
4710 get_random_bytes(my_challenge, CHALLENGE_LEN); 4710 get_random_bytes(my_challenge, CHALLENGE_LEN);
4711 4711
4712 sock = &tconn->data; 4712 sock = &connection->data;
4713 if (!conn_prepare_command(tconn, sock)) { 4713 if (!conn_prepare_command(connection, sock)) {
4714 rv = 0; 4714 rv = 0;
4715 goto fail; 4715 goto fail;
4716 } 4716 }
4717 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0, 4717 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
4718 my_challenge, CHALLENGE_LEN); 4718 my_challenge, CHALLENGE_LEN);
4719 if (!rv) 4719 if (!rv)
4720 goto fail; 4720 goto fail;
4721 4721
4722 err = drbd_recv_header(tconn, &pi); 4722 err = drbd_recv_header(connection, &pi);
4723 if (err) { 4723 if (err) {
4724 rv = 0; 4724 rv = 0;
4725 goto fail; 4725 goto fail;
4726 } 4726 }
4727 4727
4728 if (pi.cmd != P_AUTH_CHALLENGE) { 4728 if (pi.cmd != P_AUTH_CHALLENGE) {
4729 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n", 4729 conn_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4730 cmdname(pi.cmd), pi.cmd); 4730 cmdname(pi.cmd), pi.cmd);
4731 rv = 0; 4731 rv = 0;
4732 goto fail; 4732 goto fail;
4733 } 4733 }
4734 4734
4735 if (pi.size > CHALLENGE_LEN * 2) { 4735 if (pi.size > CHALLENGE_LEN * 2) {
4736 conn_err(tconn, "expected AuthChallenge payload too big.\n"); 4736 conn_err(connection, "expected AuthChallenge payload too big.\n");
4737 rv = -1; 4737 rv = -1;
4738 goto fail; 4738 goto fail;
4739 } 4739 }
4740 4740
4741 peers_ch = kmalloc(pi.size, GFP_NOIO); 4741 peers_ch = kmalloc(pi.size, GFP_NOIO);
4742 if (peers_ch == NULL) { 4742 if (peers_ch == NULL) {
4743 conn_err(tconn, "kmalloc of peers_ch failed\n"); 4743 conn_err(connection, "kmalloc of peers_ch failed\n");
4744 rv = -1; 4744 rv = -1;
4745 goto fail; 4745 goto fail;
4746 } 4746 }
4747 4747
4748 err = drbd_recv_all_warn(tconn, peers_ch, pi.size); 4748 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
4749 if (err) { 4749 if (err) {
4750 rv = 0; 4750 rv = 0;
4751 goto fail; 4751 goto fail;
4752 } 4752 }
4753 4753
4754 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm); 4754 resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
4755 response = kmalloc(resp_size, GFP_NOIO); 4755 response = kmalloc(resp_size, GFP_NOIO);
4756 if (response == NULL) { 4756 if (response == NULL) {
4757 conn_err(tconn, "kmalloc of response failed\n"); 4757 conn_err(connection, "kmalloc of response failed\n");
4758 rv = -1; 4758 rv = -1;
4759 goto fail; 4759 goto fail;
4760 } 4760 }
@@ -4764,40 +4764,40 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4764 4764
4765 rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4765 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4766 if (rv) { 4766 if (rv) {
4767 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); 4767 conn_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4768 rv = -1; 4768 rv = -1;
4769 goto fail; 4769 goto fail;
4770 } 4770 }
4771 4771
4772 if (!conn_prepare_command(tconn, sock)) { 4772 if (!conn_prepare_command(connection, sock)) {
4773 rv = 0; 4773 rv = 0;
4774 goto fail; 4774 goto fail;
4775 } 4775 }
4776 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0, 4776 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
4777 response, resp_size); 4777 response, resp_size);
4778 if (!rv) 4778 if (!rv)
4779 goto fail; 4779 goto fail;
4780 4780
4781 err = drbd_recv_header(tconn, &pi); 4781 err = drbd_recv_header(connection, &pi);
4782 if (err) { 4782 if (err) {
4783 rv = 0; 4783 rv = 0;
4784 goto fail; 4784 goto fail;
4785 } 4785 }
4786 4786
4787 if (pi.cmd != P_AUTH_RESPONSE) { 4787 if (pi.cmd != P_AUTH_RESPONSE) {
4788 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n", 4788 conn_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
4789 cmdname(pi.cmd), pi.cmd); 4789 cmdname(pi.cmd), pi.cmd);
4790 rv = 0; 4790 rv = 0;
4791 goto fail; 4791 goto fail;
4792 } 4792 }
4793 4793
4794 if (pi.size != resp_size) { 4794 if (pi.size != resp_size) {
4795 conn_err(tconn, "expected AuthResponse payload of wrong size\n"); 4795 conn_err(connection, "expected AuthResponse payload of wrong size\n");
4796 rv = 0; 4796 rv = 0;
4797 goto fail; 4797 goto fail;
4798 } 4798 }
4799 4799
4800 err = drbd_recv_all_warn(tconn, response , resp_size); 4800 err = drbd_recv_all_warn(connection, response , resp_size);
4801 if (err) { 4801 if (err) {
4802 rv = 0; 4802 rv = 0;
4803 goto fail; 4803 goto fail;
@@ -4805,7 +4805,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4805 4805
4806 right_response = kmalloc(resp_size, GFP_NOIO); 4806 right_response = kmalloc(resp_size, GFP_NOIO);
4807 if (right_response == NULL) { 4807 if (right_response == NULL) {
4808 conn_err(tconn, "kmalloc of right_response failed\n"); 4808 conn_err(connection, "kmalloc of right_response failed\n");
4809 rv = -1; 4809 rv = -1;
4810 goto fail; 4810 goto fail;
4811 } 4811 }
@@ -4814,7 +4814,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4814 4814
4815 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 4815 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4816 if (rv) { 4816 if (rv) {
4817 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); 4817 conn_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4818 rv = -1; 4818 rv = -1;
4819 goto fail; 4819 goto fail;
4820 } 4820 }
@@ -4822,7 +4822,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4822 rv = !memcmp(response, right_response, resp_size); 4822 rv = !memcmp(response, right_response, resp_size);
4823 4823
4824 if (rv) 4824 if (rv)
4825 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n", 4825 conn_info(connection, "Peer authenticated using %d bytes HMAC\n",
4826 resp_size); 4826 resp_size);
4827 else 4827 else
4828 rv = -1; 4828 rv = -1;
@@ -4838,64 +4838,64 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
4838 4838
4839int drbdd_init(struct drbd_thread *thi) 4839int drbdd_init(struct drbd_thread *thi)
4840{ 4840{
4841 struct drbd_tconn *tconn = thi->tconn; 4841 struct drbd_connection *connection = thi->connection;
4842 int h; 4842 int h;
4843 4843
4844 conn_info(tconn, "receiver (re)started\n"); 4844 conn_info(connection, "receiver (re)started\n");
4845 4845
4846 do { 4846 do {
4847 h = conn_connect(tconn); 4847 h = conn_connect(connection);
4848 if (h == 0) { 4848 if (h == 0) {
4849 conn_disconnect(tconn); 4849 conn_disconnect(connection);
4850 schedule_timeout_interruptible(HZ); 4850 schedule_timeout_interruptible(HZ);
4851 } 4851 }
4852 if (h == -1) { 4852 if (h == -1) {
4853 conn_warn(tconn, "Discarding network configuration.\n"); 4853 conn_warn(connection, "Discarding network configuration.\n");
4854 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 4854 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
4855 } 4855 }
4856 } while (h == 0); 4856 } while (h == 0);
4857 4857
4858 if (h > 0) 4858 if (h > 0)
4859 drbdd(tconn); 4859 drbdd(connection);
4860 4860
4861 conn_disconnect(tconn); 4861 conn_disconnect(connection);
4862 4862
4863 conn_info(tconn, "receiver terminated\n"); 4863 conn_info(connection, "receiver terminated\n");
4864 return 0; 4864 return 0;
4865} 4865}
4866 4866
4867/* ********* acknowledge sender ******** */ 4867/* ********* acknowledge sender ******** */
4868 4868
4869static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi) 4869static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
4870{ 4870{
4871 struct p_req_state_reply *p = pi->data; 4871 struct p_req_state_reply *p = pi->data;
4872 int retcode = be32_to_cpu(p->retcode); 4872 int retcode = be32_to_cpu(p->retcode);
4873 4873
4874 if (retcode >= SS_SUCCESS) { 4874 if (retcode >= SS_SUCCESS) {
4875 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags); 4875 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
4876 } else { 4876 } else {
4877 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags); 4877 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
4878 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n", 4878 conn_err(connection, "Requested state change failed by peer: %s (%d)\n",
4879 drbd_set_st_err_str(retcode), retcode); 4879 drbd_set_st_err_str(retcode), retcode);
4880 } 4880 }
4881 wake_up(&tconn->ping_wait); 4881 wake_up(&connection->ping_wait);
4882 4882
4883 return 0; 4883 return 0;
4884} 4884}
4885 4885
4886static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi) 4886static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
4887{ 4887{
4888 struct drbd_device *device; 4888 struct drbd_device *device;
4889 struct p_req_state_reply *p = pi->data; 4889 struct p_req_state_reply *p = pi->data;
4890 int retcode = be32_to_cpu(p->retcode); 4890 int retcode = be32_to_cpu(p->retcode);
4891 4891
4892 device = vnr_to_device(tconn, pi->vnr); 4892 device = vnr_to_device(connection, pi->vnr);
4893 if (!device) 4893 if (!device)
4894 return -EIO; 4894 return -EIO;
4895 4895
4896 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) { 4896 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
4897 D_ASSERT(tconn->agreed_pro_version < 100); 4897 D_ASSERT(connection->agreed_pro_version < 100);
4898 return got_conn_RqSReply(tconn, pi); 4898 return got_conn_RqSReply(connection, pi);
4899 } 4899 }
4900 4900
4901 if (retcode >= SS_SUCCESS) { 4901 if (retcode >= SS_SUCCESS) {
@@ -4910,34 +4910,34 @@ static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4910 return 0; 4910 return 0;
4911} 4911}
4912 4912
4913static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi) 4913static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
4914{ 4914{
4915 return drbd_send_ping_ack(tconn); 4915 return drbd_send_ping_ack(connection);
4916 4916
4917} 4917}
4918 4918
4919static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi) 4919static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
4920{ 4920{
4921 /* restore idle timeout */ 4921 /* restore idle timeout */
4922 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ; 4922 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
4923 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags)) 4923 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
4924 wake_up(&tconn->ping_wait); 4924 wake_up(&connection->ping_wait);
4925 4925
4926 return 0; 4926 return 0;
4927} 4927}
4928 4928
4929static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi) 4929static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
4930{ 4930{
4931 struct drbd_device *device; 4931 struct drbd_device *device;
4932 struct p_block_ack *p = pi->data; 4932 struct p_block_ack *p = pi->data;
4933 sector_t sector = be64_to_cpu(p->sector); 4933 sector_t sector = be64_to_cpu(p->sector);
4934 int blksize = be32_to_cpu(p->blksize); 4934 int blksize = be32_to_cpu(p->blksize);
4935 4935
4936 device = vnr_to_device(tconn, pi->vnr); 4936 device = vnr_to_device(connection, pi->vnr);
4937 if (!device) 4937 if (!device)
4938 return -EIO; 4938 return -EIO;
4939 4939
4940 D_ASSERT(device->tconn->agreed_pro_version >= 89); 4940 D_ASSERT(device->connection->agreed_pro_version >= 89);
4941 4941
4942 update_peer_seq(device, be32_to_cpu(p->seq_num)); 4942 update_peer_seq(device, be32_to_cpu(p->seq_num));
4943 4943
@@ -4962,21 +4962,21 @@ validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t secto
4962 struct drbd_request *req; 4962 struct drbd_request *req;
4963 struct bio_and_error m; 4963 struct bio_and_error m;
4964 4964
4965 spin_lock_irq(&device->tconn->req_lock); 4965 spin_lock_irq(&device->connection->req_lock);
4966 req = find_request(device, root, id, sector, missing_ok, func); 4966 req = find_request(device, root, id, sector, missing_ok, func);
4967 if (unlikely(!req)) { 4967 if (unlikely(!req)) {
4968 spin_unlock_irq(&device->tconn->req_lock); 4968 spin_unlock_irq(&device->connection->req_lock);
4969 return -EIO; 4969 return -EIO;
4970 } 4970 }
4971 __req_mod(req, what, &m); 4971 __req_mod(req, what, &m);
4972 spin_unlock_irq(&device->tconn->req_lock); 4972 spin_unlock_irq(&device->connection->req_lock);
4973 4973
4974 if (m.bio) 4974 if (m.bio)
4975 complete_master_bio(device, &m); 4975 complete_master_bio(device, &m);
4976 return 0; 4976 return 0;
4977} 4977}
4978 4978
4979static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi) 4979static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
4980{ 4980{
4981 struct drbd_device *device; 4981 struct drbd_device *device;
4982 struct p_block_ack *p = pi->data; 4982 struct p_block_ack *p = pi->data;
@@ -4984,7 +4984,7 @@ static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4984 int blksize = be32_to_cpu(p->blksize); 4984 int blksize = be32_to_cpu(p->blksize);
4985 enum drbd_req_event what; 4985 enum drbd_req_event what;
4986 4986
4987 device = vnr_to_device(tconn, pi->vnr); 4987 device = vnr_to_device(connection, pi->vnr);
4988 if (!device) 4988 if (!device)
4989 return -EIO; 4989 return -EIO;
4990 4990
@@ -5020,7 +5020,7 @@ static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
5020 what, false); 5020 what, false);
5021} 5021}
5022 5022
5023static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi) 5023static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
5024{ 5024{
5025 struct drbd_device *device; 5025 struct drbd_device *device;
5026 struct p_block_ack *p = pi->data; 5026 struct p_block_ack *p = pi->data;
@@ -5028,7 +5028,7 @@ static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5028 int size = be32_to_cpu(p->blksize); 5028 int size = be32_to_cpu(p->blksize);
5029 int err; 5029 int err;
5030 5030
5031 device = vnr_to_device(tconn, pi->vnr); 5031 device = vnr_to_device(connection, pi->vnr);
5032 if (!device) 5032 if (!device)
5033 return -EIO; 5033 return -EIO;
5034 5034
@@ -5054,13 +5054,13 @@ static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5054 return 0; 5054 return 0;
5055} 5055}
5056 5056
5057static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi) 5057static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
5058{ 5058{
5059 struct drbd_device *device; 5059 struct drbd_device *device;
5060 struct p_block_ack *p = pi->data; 5060 struct p_block_ack *p = pi->data;
5061 sector_t sector = be64_to_cpu(p->sector); 5061 sector_t sector = be64_to_cpu(p->sector);
5062 5062
5063 device = vnr_to_device(tconn, pi->vnr); 5063 device = vnr_to_device(connection, pi->vnr);
5064 if (!device) 5064 if (!device)
5065 return -EIO; 5065 return -EIO;
5066 5066
@@ -5074,14 +5074,14 @@ static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5074 NEG_ACKED, false); 5074 NEG_ACKED, false);
5075} 5075}
5076 5076
5077static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi) 5077static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
5078{ 5078{
5079 struct drbd_device *device; 5079 struct drbd_device *device;
5080 sector_t sector; 5080 sector_t sector;
5081 int size; 5081 int size;
5082 struct p_block_ack *p = pi->data; 5082 struct p_block_ack *p = pi->data;
5083 5083
5084 device = vnr_to_device(tconn, pi->vnr); 5084 device = vnr_to_device(connection, pi->vnr);
5085 if (!device) 5085 if (!device)
5086 return -EIO; 5086 return -EIO;
5087 5087
@@ -5108,16 +5108,16 @@ static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5108 return 0; 5108 return 0;
5109} 5109}
5110 5110
5111static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi) 5111static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
5112{ 5112{
5113 struct p_barrier_ack *p = pi->data; 5113 struct p_barrier_ack *p = pi->data;
5114 struct drbd_device *device; 5114 struct drbd_device *device;
5115 int vnr; 5115 int vnr;
5116 5116
5117 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size)); 5117 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
5118 5118
5119 rcu_read_lock(); 5119 rcu_read_lock();
5120 idr_for_each_entry(&tconn->volumes, device, vnr) { 5120 idr_for_each_entry(&connection->volumes, device, vnr) {
5121 if (device->state.conn == C_AHEAD && 5121 if (device->state.conn == C_AHEAD &&
5122 atomic_read(&device->ap_in_flight) == 0 && 5122 atomic_read(&device->ap_in_flight) == 0 &&
5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) { 5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
@@ -5130,7 +5130,7 @@ static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5130 return 0; 5130 return 0;
5131} 5131}
5132 5132
5133static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi) 5133static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
5134{ 5134{
5135 struct drbd_device *device; 5135 struct drbd_device *device;
5136 struct p_block_ack *p = pi->data; 5136 struct p_block_ack *p = pi->data;
@@ -5138,7 +5138,7 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5138 sector_t sector; 5138 sector_t sector;
5139 int size; 5139 int size;
5140 5140
5141 device = vnr_to_device(tconn, pi->vnr); 5141 device = vnr_to_device(connection, pi->vnr);
5142 if (!device) 5142 if (!device)
5143 return -EIO; 5143 return -EIO;
5144 5144
@@ -5169,7 +5169,7 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5169 if (w) { 5169 if (w) {
5170 w->cb = w_ov_finished; 5170 w->cb = w_ov_finished;
5171 w->device = device; 5171 w->device = device;
5172 drbd_queue_work(&device->tconn->sender_work, w); 5172 drbd_queue_work(&device->connection->sender_work, w);
5173 } else { 5173 } else {
5174 dev_err(DEV, "kmalloc(w) failed."); 5174 dev_err(DEV, "kmalloc(w) failed.");
5175 ov_out_of_sync_print(device); 5175 ov_out_of_sync_print(device);
@@ -5180,22 +5180,22 @@ static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5180 return 0; 5180 return 0;
5181} 5181}
5182 5182
5183static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi) 5183static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
5184{ 5184{
5185 return 0; 5185 return 0;
5186} 5186}
5187 5187
5188static int tconn_finish_peer_reqs(struct drbd_tconn *tconn) 5188static int connection_finish_peer_reqs(struct drbd_connection *connection)
5189{ 5189{
5190 struct drbd_device *device; 5190 struct drbd_device *device;
5191 int vnr, not_empty = 0; 5191 int vnr, not_empty = 0;
5192 5192
5193 do { 5193 do {
5194 clear_bit(SIGNAL_ASENDER, &tconn->flags); 5194 clear_bit(SIGNAL_ASENDER, &connection->flags);
5195 flush_signals(current); 5195 flush_signals(current);
5196 5196
5197 rcu_read_lock(); 5197 rcu_read_lock();
5198 idr_for_each_entry(&tconn->volumes, device, vnr) { 5198 idr_for_each_entry(&connection->volumes, device, vnr) {
5199 kref_get(&device->kref); 5199 kref_get(&device->kref);
5200 rcu_read_unlock(); 5200 rcu_read_unlock();
5201 if (drbd_finish_peer_reqs(device)) { 5201 if (drbd_finish_peer_reqs(device)) {
@@ -5205,15 +5205,15 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5205 kref_put(&device->kref, &drbd_minor_destroy); 5205 kref_put(&device->kref, &drbd_minor_destroy);
5206 rcu_read_lock(); 5206 rcu_read_lock();
5207 } 5207 }
5208 set_bit(SIGNAL_ASENDER, &tconn->flags); 5208 set_bit(SIGNAL_ASENDER, &connection->flags);
5209 5209
5210 spin_lock_irq(&tconn->req_lock); 5210 spin_lock_irq(&connection->req_lock);
5211 idr_for_each_entry(&tconn->volumes, device, vnr) { 5211 idr_for_each_entry(&connection->volumes, device, vnr) {
5212 not_empty = !list_empty(&device->done_ee); 5212 not_empty = !list_empty(&device->done_ee);
5213 if (not_empty) 5213 if (not_empty)
5214 break; 5214 break;
5215 } 5215 }
5216 spin_unlock_irq(&tconn->req_lock); 5216 spin_unlock_irq(&connection->req_lock);
5217 rcu_read_unlock(); 5217 rcu_read_unlock();
5218 } while (not_empty); 5218 } while (not_empty);
5219 5219
@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5222 5222
5223struct asender_cmd { 5223struct asender_cmd {
5224 size_t pkt_size; 5224 size_t pkt_size;
5225 int (*fn)(struct drbd_tconn *tconn, struct packet_info *); 5225 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5226}; 5226};
5227 5227
5228static struct asender_cmd asender_tbl[] = { 5228static struct asender_cmd asender_tbl[] = {
@@ -5247,13 +5247,13 @@ static struct asender_cmd asender_tbl[] = {
5247 5247
5248int drbd_asender(struct drbd_thread *thi) 5248int drbd_asender(struct drbd_thread *thi)
5249{ 5249{
5250 struct drbd_tconn *tconn = thi->tconn; 5250 struct drbd_connection *connection = thi->connection;
5251 struct asender_cmd *cmd = NULL; 5251 struct asender_cmd *cmd = NULL;
5252 struct packet_info pi; 5252 struct packet_info pi;
5253 int rv; 5253 int rv;
5254 void *buf = tconn->meta.rbuf; 5254 void *buf = connection->meta.rbuf;
5255 int received = 0; 5255 int received = 0;
5256 unsigned int header_size = drbd_header_size(tconn); 5256 unsigned int header_size = drbd_header_size(connection);
5257 int expect = header_size; 5257 int expect = header_size;
5258 bool ping_timeout_active = false; 5258 bool ping_timeout_active = false;
5259 struct net_conf *nc; 5259 struct net_conf *nc;
@@ -5262,45 +5262,45 @@ int drbd_asender(struct drbd_thread *thi)
5262 5262
5263 rv = sched_setscheduler(current, SCHED_RR, &param); 5263 rv = sched_setscheduler(current, SCHED_RR, &param);
5264 if (rv < 0) 5264 if (rv < 0)
5265 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv); 5265 conn_err(connection, "drbd_asender: ERROR set priority, ret=%d\n", rv);
5266 5266
5267 while (get_t_state(thi) == RUNNING) { 5267 while (get_t_state(thi) == RUNNING) {
5268 drbd_thread_current_set_cpu(thi); 5268 drbd_thread_current_set_cpu(thi);
5269 5269
5270 rcu_read_lock(); 5270 rcu_read_lock();
5271 nc = rcu_dereference(tconn->net_conf); 5271 nc = rcu_dereference(connection->net_conf);
5272 ping_timeo = nc->ping_timeo; 5272 ping_timeo = nc->ping_timeo;
5273 tcp_cork = nc->tcp_cork; 5273 tcp_cork = nc->tcp_cork;
5274 ping_int = nc->ping_int; 5274 ping_int = nc->ping_int;
5275 rcu_read_unlock(); 5275 rcu_read_unlock();
5276 5276
5277 if (test_and_clear_bit(SEND_PING, &tconn->flags)) { 5277 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5278 if (drbd_send_ping(tconn)) { 5278 if (drbd_send_ping(connection)) {
5279 conn_err(tconn, "drbd_send_ping has failed\n"); 5279 conn_err(connection, "drbd_send_ping has failed\n");
5280 goto reconnect; 5280 goto reconnect;
5281 } 5281 }
5282 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10; 5282 connection->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5283 ping_timeout_active = true; 5283 ping_timeout_active = true;
5284 } 5284 }
5285 5285
5286 /* TODO: conditionally cork; it may hurt latency if we cork without 5286 /* TODO: conditionally cork; it may hurt latency if we cork without
5287 much to send */ 5287 much to send */
5288 if (tcp_cork) 5288 if (tcp_cork)
5289 drbd_tcp_cork(tconn->meta.socket); 5289 drbd_tcp_cork(connection->meta.socket);
5290 if (tconn_finish_peer_reqs(tconn)) { 5290 if (connection_finish_peer_reqs(connection)) {
5291 conn_err(tconn, "tconn_finish_peer_reqs() failed\n"); 5291 conn_err(connection, "connection_finish_peer_reqs() failed\n");
5292 goto reconnect; 5292 goto reconnect;
5293 } 5293 }
5294 /* but unconditionally uncork unless disabled */ 5294 /* but unconditionally uncork unless disabled */
5295 if (tcp_cork) 5295 if (tcp_cork)
5296 drbd_tcp_uncork(tconn->meta.socket); 5296 drbd_tcp_uncork(connection->meta.socket);
5297 5297
5298 /* short circuit, recv_msg would return EINTR anyways. */ 5298 /* short circuit, recv_msg would return EINTR anyways. */
5299 if (signal_pending(current)) 5299 if (signal_pending(current))
5300 continue; 5300 continue;
5301 5301
5302 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0); 5302 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
5303 clear_bit(SIGNAL_ASENDER, &tconn->flags); 5303 clear_bit(SIGNAL_ASENDER, &connection->flags);
5304 5304
5305 flush_signals(current); 5305 flush_signals(current);
5306 5306
@@ -5318,51 +5318,51 @@ int drbd_asender(struct drbd_thread *thi)
5318 received += rv; 5318 received += rv;
5319 buf += rv; 5319 buf += rv;
5320 } else if (rv == 0) { 5320 } else if (rv == 0) {
5321 if (test_bit(DISCONNECT_SENT, &tconn->flags)) { 5321 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
5322 long t; 5322 long t;
5323 rcu_read_lock(); 5323 rcu_read_lock();
5324 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10; 5324 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
5325 rcu_read_unlock(); 5325 rcu_read_unlock();
5326 5326
5327 t = wait_event_timeout(tconn->ping_wait, 5327 t = wait_event_timeout(connection->ping_wait,
5328 tconn->cstate < C_WF_REPORT_PARAMS, 5328 connection->cstate < C_WF_REPORT_PARAMS,
5329 t); 5329 t);
5330 if (t) 5330 if (t)
5331 break; 5331 break;
5332 } 5332 }
5333 conn_err(tconn, "meta connection shut down by peer.\n"); 5333 conn_err(connection, "meta connection shut down by peer.\n");
5334 goto reconnect; 5334 goto reconnect;
5335 } else if (rv == -EAGAIN) { 5335 } else if (rv == -EAGAIN) {
5336 /* If the data socket received something meanwhile, 5336 /* If the data socket received something meanwhile,
5337 * that is good enough: peer is still alive. */ 5337 * that is good enough: peer is still alive. */
5338 if (time_after(tconn->last_received, 5338 if (time_after(connection->last_received,
5339 jiffies - tconn->meta.socket->sk->sk_rcvtimeo)) 5339 jiffies - connection->meta.socket->sk->sk_rcvtimeo))
5340 continue; 5340 continue;
5341 if (ping_timeout_active) { 5341 if (ping_timeout_active) {
5342 conn_err(tconn, "PingAck did not arrive in time.\n"); 5342 conn_err(connection, "PingAck did not arrive in time.\n");
5343 goto reconnect; 5343 goto reconnect;
5344 } 5344 }
5345 set_bit(SEND_PING, &tconn->flags); 5345 set_bit(SEND_PING, &connection->flags);
5346 continue; 5346 continue;
5347 } else if (rv == -EINTR) { 5347 } else if (rv == -EINTR) {
5348 continue; 5348 continue;
5349 } else { 5349 } else {
5350 conn_err(tconn, "sock_recvmsg returned %d\n", rv); 5350 conn_err(connection, "sock_recvmsg returned %d\n", rv);
5351 goto reconnect; 5351 goto reconnect;
5352 } 5352 }
5353 5353
5354 if (received == expect && cmd == NULL) { 5354 if (received == expect && cmd == NULL) {
5355 if (decode_header(tconn, tconn->meta.rbuf, &pi)) 5355 if (decode_header(connection, connection->meta.rbuf, &pi))
5356 goto reconnect; 5356 goto reconnect;
5357 cmd = &asender_tbl[pi.cmd]; 5357 cmd = &asender_tbl[pi.cmd];
5358 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) { 5358 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5359 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n", 5359 conn_err(connection, "Unexpected meta packet %s (0x%04x)\n",
5360 cmdname(pi.cmd), pi.cmd); 5360 cmdname(pi.cmd), pi.cmd);
5361 goto disconnect; 5361 goto disconnect;
5362 } 5362 }
5363 expect = header_size + cmd->pkt_size; 5363 expect = header_size + cmd->pkt_size;
5364 if (pi.size != expect - header_size) { 5364 if (pi.size != expect - header_size) {
5365 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n", 5365 conn_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
5366 pi.cmd, pi.size); 5366 pi.cmd, pi.size);
5367 goto reconnect; 5367 goto reconnect;
5368 } 5368 }
@@ -5370,21 +5370,21 @@ int drbd_asender(struct drbd_thread *thi)
5370 if (received == expect) { 5370 if (received == expect) {
5371 bool err; 5371 bool err;
5372 5372
5373 err = cmd->fn(tconn, &pi); 5373 err = cmd->fn(connection, &pi);
5374 if (err) { 5374 if (err) {
5375 conn_err(tconn, "%pf failed\n", cmd->fn); 5375 conn_err(connection, "%pf failed\n", cmd->fn);
5376 goto reconnect; 5376 goto reconnect;
5377 } 5377 }
5378 5378
5379 tconn->last_received = jiffies; 5379 connection->last_received = jiffies;
5380 5380
5381 if (cmd == &asender_tbl[P_PING_ACK]) { 5381 if (cmd == &asender_tbl[P_PING_ACK]) {
5382 /* restore idle timeout */ 5382 /* restore idle timeout */
5383 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ; 5383 connection->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5384 ping_timeout_active = false; 5384 ping_timeout_active = false;
5385 } 5385 }
5386 5386
5387 buf = tconn->meta.rbuf; 5387 buf = connection->meta.rbuf;
5388 received = 0; 5388 received = 0;
5389 expect = header_size; 5389 expect = header_size;
5390 cmd = NULL; 5390 cmd = NULL;
@@ -5393,16 +5393,16 @@ int drbd_asender(struct drbd_thread *thi)
5393 5393
5394 if (0) { 5394 if (0) {
5395reconnect: 5395reconnect:
5396 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); 5396 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5397 conn_md_sync(tconn); 5397 conn_md_sync(connection);
5398 } 5398 }
5399 if (0) { 5399 if (0) {
5400disconnect: 5400disconnect:
5401 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); 5401 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
5402 } 5402 }
5403 clear_bit(SIGNAL_ASENDER, &tconn->flags); 5403 clear_bit(SIGNAL_ASENDER, &connection->flags);
5404 5404
5405 conn_info(tconn, "asender terminated\n"); 5405 conn_info(connection, "asender terminated\n");
5406 5406
5407 return 0; 5407 return 0;
5408} 5408}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index ab6abcbea6ab..a33a35e4655d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -163,20 +163,21 @@ void drbd_req_destroy(struct kref *kref)
163 mempool_free(req, drbd_request_mempool); 163 mempool_free(req, drbd_request_mempool);
164} 164}
165 165
166static void wake_all_senders(struct drbd_tconn *tconn) { 166static void wake_all_senders(struct drbd_connection *connection)
167 wake_up(&tconn->sender_work.q_wait); 167{
168 wake_up(&connection->sender_work.q_wait);
168} 169}
169 170
170/* must hold resource->req_lock */ 171/* must hold resource->req_lock */
171void start_new_tl_epoch(struct drbd_tconn *tconn) 172void start_new_tl_epoch(struct drbd_connection *connection)
172{ 173{
173 /* no point closing an epoch, if it is empty, anyways. */ 174 /* no point closing an epoch, if it is empty, anyways. */
174 if (tconn->current_tle_writes == 0) 175 if (connection->current_tle_writes == 0)
175 return; 176 return;
176 177
177 tconn->current_tle_writes = 0; 178 connection->current_tle_writes = 0;
178 atomic_inc(&tconn->current_tle_nr); 179 atomic_inc(&connection->current_tle_nr);
179 wake_all_senders(tconn); 180 wake_all_senders(connection);
180} 181}
181 182
182void complete_master_bio(struct drbd_device *device, 183void complete_master_bio(struct drbd_device *device,
@@ -273,8 +274,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
273 * and reset the transfer log epoch write_cnt. 274 * and reset the transfer log epoch write_cnt.
274 */ 275 */
275 if (rw == WRITE && 276 if (rw == WRITE &&
276 req->epoch == atomic_read(&device->tconn->current_tle_nr)) 277 req->epoch == atomic_read(&device->connection->current_tle_nr))
277 start_new_tl_epoch(device->tconn); 278 start_new_tl_epoch(device->connection);
278 279
279 /* Update disk stats */ 280 /* Update disk stats */
280 _drbd_end_io_acct(device, req); 281 _drbd_end_io_acct(device, req);
@@ -476,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
476 * and from w_read_retry_remote */ 477 * and from w_read_retry_remote */
477 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 478 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
478 rcu_read_lock(); 479 rcu_read_lock();
479 nc = rcu_dereference(device->tconn->net_conf); 480 nc = rcu_dereference(device->connection->net_conf);
480 p = nc->wire_protocol; 481 p = nc->wire_protocol;
481 rcu_read_unlock(); 482 rcu_read_unlock();
482 req->rq_state |= 483 req->rq_state |=
@@ -541,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
541 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); 542 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
542 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 543 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
543 req->w.cb = w_send_read_req; 544 req->w.cb = w_send_read_req;
544 drbd_queue_work(&device->tconn->sender_work, &req->w); 545 drbd_queue_work(&device->connection->sender_work, &req->w);
545 break; 546 break;
546 547
547 case QUEUE_FOR_NET_WRITE: 548 case QUEUE_FOR_NET_WRITE:
@@ -576,22 +577,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
576 D_ASSERT(req->rq_state & RQ_NET_PENDING); 577 D_ASSERT(req->rq_state & RQ_NET_PENDING);
577 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); 578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
578 req->w.cb = w_send_dblock; 579 req->w.cb = w_send_dblock;
579 drbd_queue_work(&device->tconn->sender_work, &req->w); 580 drbd_queue_work(&device->connection->sender_work, &req->w);
580 581
581 /* close the epoch, in case it outgrew the limit */ 582 /* close the epoch, in case it outgrew the limit */
582 rcu_read_lock(); 583 rcu_read_lock();
583 nc = rcu_dereference(device->tconn->net_conf); 584 nc = rcu_dereference(device->connection->net_conf);
584 p = nc->max_epoch_size; 585 p = nc->max_epoch_size;
585 rcu_read_unlock(); 586 rcu_read_unlock();
586 if (device->tconn->current_tle_writes >= p) 587 if (device->connection->current_tle_writes >= p)
587 start_new_tl_epoch(device->tconn); 588 start_new_tl_epoch(device->connection);
588 589
589 break; 590 break;
590 591
591 case QUEUE_FOR_SEND_OOS: 592 case QUEUE_FOR_SEND_OOS:
592 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 593 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
593 req->w.cb = w_send_out_of_sync; 594 req->w.cb = w_send_out_of_sync;
594 drbd_queue_work(&device->tconn->sender_work, &req->w); 595 drbd_queue_work(&device->connection->sender_work, &req->w);
595 break; 596 break;
596 597
597 case READ_RETRY_REMOTE_CANCELED: 598 case READ_RETRY_REMOTE_CANCELED:
@@ -703,7 +704,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
703 704
704 get_ldev(device); /* always succeeds in this call path */ 705 get_ldev(device); /* always succeeds in this call path */
705 req->w.cb = w_restart_disk_io; 706 req->w.cb = w_restart_disk_io;
706 drbd_queue_work(&device->tconn->sender_work, &req->w); 707 drbd_queue_work(&device->connection->sender_work, &req->w);
707 break; 708 break;
708 709
709 case RESEND: 710 case RESEND:
@@ -724,7 +725,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
724 725
725 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); 726 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
726 if (req->w.cb) { 727 if (req->w.cb) {
727 drbd_queue_work(&device->tconn->sender_work, &req->w); 728 drbd_queue_work(&device->connection->sender_work, &req->w);
728 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; 729 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
729 } /* else: FIXME can this happen? */ 730 } /* else: FIXME can this happen? */
730 break; 731 break;
@@ -756,7 +757,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
756 break; 757 break;
757 758
758 case QUEUE_AS_DRBD_BARRIER: 759 case QUEUE_AS_DRBD_BARRIER:
759 start_new_tl_epoch(device->tconn); 760 start_new_tl_epoch(device->connection);
760 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); 761 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
761 break; 762 break;
762 }; 763 };
@@ -850,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
850 break; 851 break;
851 /* Indicate to wake up device->misc_wait on progress. */ 852 /* Indicate to wake up device->misc_wait on progress. */
852 i->waiting = true; 853 i->waiting = true;
853 spin_unlock_irq(&device->tconn->req_lock); 854 spin_unlock_irq(&device->connection->req_lock);
854 schedule(); 855 schedule();
855 spin_lock_irq(&device->tconn->req_lock); 856 spin_lock_irq(&device->connection->req_lock);
856 } 857 }
857 finish_wait(&device->misc_wait, &wait); 858 finish_wait(&device->misc_wait, &wait);
858} 859}
@@ -860,17 +861,17 @@ static void complete_conflicting_writes(struct drbd_request *req)
860/* called within req_lock and rcu_read_lock() */ 861/* called within req_lock and rcu_read_lock() */
861static void maybe_pull_ahead(struct drbd_device *device) 862static void maybe_pull_ahead(struct drbd_device *device)
862{ 863{
863 struct drbd_tconn *tconn = device->tconn; 864 struct drbd_connection *connection = device->connection;
864 struct net_conf *nc; 865 struct net_conf *nc;
865 bool congested = false; 866 bool congested = false;
866 enum drbd_on_congestion on_congestion; 867 enum drbd_on_congestion on_congestion;
867 868
868 rcu_read_lock(); 869 rcu_read_lock();
869 nc = rcu_dereference(tconn->net_conf); 870 nc = rcu_dereference(connection->net_conf);
870 on_congestion = nc ? nc->on_congestion : OC_BLOCK; 871 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
871 rcu_read_unlock(); 872 rcu_read_unlock();
872 if (on_congestion == OC_BLOCK || 873 if (on_congestion == OC_BLOCK ||
873 tconn->agreed_pro_version < 96) 874 connection->agreed_pro_version < 96)
874 return; 875 return;
875 876
876 /* If I don't even have good local storage, we can not reasonably try 877 /* If I don't even have good local storage, we can not reasonably try
@@ -893,7 +894,7 @@ static void maybe_pull_ahead(struct drbd_device *device)
893 894
894 if (congested) { 895 if (congested) {
895 /* start a new epoch for non-mirrored writes */ 896 /* start a new epoch for non-mirrored writes */
896 start_new_tl_epoch(device->tconn); 897 start_new_tl_epoch(device->connection);
897 898
898 if (on_congestion == OC_PULL_AHEAD) 899 if (on_congestion == OC_PULL_AHEAD)
899 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); 900 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
@@ -1077,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1077 struct bio_and_error m = { NULL, }; 1078 struct bio_and_error m = { NULL, };
1078 bool no_remote = false; 1079 bool no_remote = false;
1079 1080
1080 spin_lock_irq(&device->tconn->req_lock); 1081 spin_lock_irq(&device->connection->req_lock);
1081 if (rw == WRITE) { 1082 if (rw == WRITE) {
1082 /* This may temporarily give up the req_lock, 1083 /* This may temporarily give up the req_lock,
1083 * but will re-aquire it before it returns here. 1084 * but will re-aquire it before it returns here.
@@ -1111,15 +1112,15 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1111 } 1112 }
1112 1113
1113 /* which transfer log epoch does this belong to? */ 1114 /* which transfer log epoch does this belong to? */
1114 req->epoch = atomic_read(&device->tconn->current_tle_nr); 1115 req->epoch = atomic_read(&device->connection->current_tle_nr);
1115 1116
1116 /* no point in adding empty flushes to the transfer log, 1117 /* no point in adding empty flushes to the transfer log,
1117 * they are mapped to drbd barriers already. */ 1118 * they are mapped to drbd barriers already. */
1118 if (likely(req->i.size!=0)) { 1119 if (likely(req->i.size!=0)) {
1119 if (rw == WRITE) 1120 if (rw == WRITE)
1120 device->tconn->current_tle_writes++; 1121 device->connection->current_tle_writes++;
1121 1122
1122 list_add_tail(&req->tl_requests, &device->tconn->transfer_log); 1123 list_add_tail(&req->tl_requests, &device->connection->transfer_log);
1123 } 1124 }
1124 1125
1125 if (rw == WRITE) { 1126 if (rw == WRITE) {
@@ -1139,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1139 /* needs to be marked within the same spinlock */ 1140 /* needs to be marked within the same spinlock */
1140 _req_mod(req, TO_BE_SUBMITTED); 1141 _req_mod(req, TO_BE_SUBMITTED);
1141 /* but we need to give up the spinlock to submit */ 1142 /* but we need to give up the spinlock to submit */
1142 spin_unlock_irq(&device->tconn->req_lock); 1143 spin_unlock_irq(&device->connection->req_lock);
1143 drbd_submit_req_private_bio(req); 1144 drbd_submit_req_private_bio(req);
1144 spin_lock_irq(&device->tconn->req_lock); 1145 spin_lock_irq(&device->connection->req_lock);
1145 } else if (no_remote) { 1146 } else if (no_remote) {
1146nodata: 1147nodata:
1147 if (__ratelimit(&drbd_ratelimit_state)) 1148 if (__ratelimit(&drbd_ratelimit_state))
@@ -1154,7 +1155,7 @@ nodata:
1154out: 1155out:
1155 if (drbd_req_put_completion_ref(req, &m, 1)) 1156 if (drbd_req_put_completion_ref(req, &m, 1))
1156 kref_put(&req->kref, drbd_req_destroy); 1157 kref_put(&req->kref, drbd_req_destroy);
1157 spin_unlock_irq(&device->tconn->req_lock); 1158 spin_unlock_irq(&device->connection->req_lock);
1158 1159
1159 if (m.bio) 1160 if (m.bio)
1160 complete_master_bio(device, &m); 1161 complete_master_bio(device, &m);
@@ -1320,12 +1321,12 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
1320 return limit; 1321 return limit;
1321} 1322}
1322 1323
1323static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) 1324static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
1324{ 1325{
1325 /* Walk the transfer log, 1326 /* Walk the transfer log,
1326 * and find the oldest not yet completed request */ 1327 * and find the oldest not yet completed request */
1327 struct drbd_request *r; 1328 struct drbd_request *r;
1328 list_for_each_entry(r, &tconn->transfer_log, tl_requests) { 1329 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
1329 if (atomic_read(&r->completion_ref)) 1330 if (atomic_read(&r->completion_ref))
1330 return r; 1331 return r;
1331 } 1332 }
@@ -1335,14 +1336,14 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
1335void request_timer_fn(unsigned long data) 1336void request_timer_fn(unsigned long data)
1336{ 1337{
1337 struct drbd_device *device = (struct drbd_device *) data; 1338 struct drbd_device *device = (struct drbd_device *) data;
1338 struct drbd_tconn *tconn = device->tconn; 1339 struct drbd_connection *connection = device->connection;
1339 struct drbd_request *req; /* oldest request */ 1340 struct drbd_request *req; /* oldest request */
1340 struct net_conf *nc; 1341 struct net_conf *nc;
1341 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ 1342 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1342 unsigned long now; 1343 unsigned long now;
1343 1344
1344 rcu_read_lock(); 1345 rcu_read_lock();
1345 nc = rcu_dereference(tconn->net_conf); 1346 nc = rcu_dereference(connection->net_conf);
1346 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) 1347 if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
1347 ent = nc->timeout * HZ/10 * nc->ko_count; 1348 ent = nc->timeout * HZ/10 * nc->ko_count;
1348 1349
@@ -1359,10 +1360,10 @@ void request_timer_fn(unsigned long data)
1359 1360
1360 now = jiffies; 1361 now = jiffies;
1361 1362
1362 spin_lock_irq(&tconn->req_lock); 1363 spin_lock_irq(&connection->req_lock);
1363 req = find_oldest_request(tconn); 1364 req = find_oldest_request(connection);
1364 if (!req) { 1365 if (!req) {
1365 spin_unlock_irq(&tconn->req_lock); 1366 spin_unlock_irq(&connection->req_lock);
1366 mod_timer(&device->request_timer, now + et); 1367 mod_timer(&device->request_timer, now + et);
1367 return; 1368 return;
1368 } 1369 }
@@ -1385,7 +1386,7 @@ void request_timer_fn(unsigned long data)
1385 */ 1386 */
1386 if (ent && req->rq_state & RQ_NET_PENDING && 1387 if (ent && req->rq_state & RQ_NET_PENDING &&
1387 time_after(now, req->start_time + ent) && 1388 time_after(now, req->start_time + ent) &&
1388 !time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { 1389 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
1389 dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); 1390 dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
1390 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); 1391 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1391 } 1392 }
@@ -1396,6 +1397,6 @@ void request_timer_fn(unsigned long data)
1396 __drbd_chk_io_error(device, DRBD_FORCE_DETACH); 1397 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1397 } 1398 }
1398 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; 1399 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
1399 spin_unlock_irq(&tconn->req_lock); 1400 spin_unlock_irq(&connection->req_lock);
1400 mod_timer(&device->request_timer, nt); 1401 mod_timer(&device->request_timer, nt);
1401} 1402}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 3e32a7b8c6d9..407404bb8807 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -275,7 +275,7 @@ struct bio_and_error {
275 int error; 275 int error;
276}; 276};
277 277
278extern void start_new_tl_epoch(struct drbd_tconn *tconn); 278extern void start_new_tl_epoch(struct drbd_connection *connection);
279extern void drbd_req_destroy(struct kref *kref); 279extern void drbd_req_destroy(struct kref *kref);
280extern void _req_may_be_done(struct drbd_request *req, 280extern void _req_may_be_done(struct drbd_request *req,
281 struct bio_and_error *m); 281 struct bio_and_error *m);
@@ -284,8 +284,8 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
284extern void complete_master_bio(struct drbd_device *device, 284extern void complete_master_bio(struct drbd_device *device,
285 struct bio_and_error *m); 285 struct bio_and_error *m);
286extern void request_timer_fn(unsigned long data); 286extern void request_timer_fn(unsigned long data);
287extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what); 287extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
288extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what); 288extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
289 289
290/* this is in drbd_main.c */ 290/* this is in drbd_main.c */
291extern void drbd_restart_request(struct drbd_request *req); 291extern void drbd_restart_request(struct drbd_request *req);
@@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
318 struct bio_and_error m; 318 struct bio_and_error m;
319 int rv; 319 int rv;
320 320
321 spin_lock_irqsave(&device->tconn->req_lock, flags); 321 spin_lock_irqsave(&device->connection->req_lock, flags);
322 rv = __req_mod(req, what, &m); 322 rv = __req_mod(req, what, &m);
323 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 323 spin_unlock_irqrestore(&device->connection->req_lock, flags);
324 324
325 if (m.bio) 325 if (m.bio)
326 complete_master_bio(device, &m); 326 complete_master_bio(device, &m);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index ec0df4063b81..ecc63cf85d85 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -51,7 +51,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused);
51static void after_state_ch(struct drbd_device *device, union drbd_state os, 51static void after_state_ch(struct drbd_device *device, union drbd_state os,
52 union drbd_state ns, enum chg_state_flags flags); 52 union drbd_state ns, enum chg_state_flags flags);
53static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state); 53static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
54static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *); 54static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
55static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns); 55static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
56static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, 56static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
57 enum sanitize_state_warnings *warn); 57 enum sanitize_state_warnings *warn);
@@ -61,14 +61,14 @@ static inline bool is_susp(union drbd_state s)
61 return s.susp || s.susp_nod || s.susp_fen; 61 return s.susp || s.susp_nod || s.susp_fen;
62} 62}
63 63
64bool conn_all_vols_unconf(struct drbd_tconn *tconn) 64bool conn_all_vols_unconf(struct drbd_connection *connection)
65{ 65{
66 struct drbd_device *device; 66 struct drbd_device *device;
67 bool rv = true; 67 bool rv = true;
68 int vnr; 68 int vnr;
69 69
70 rcu_read_lock(); 70 rcu_read_lock();
71 idr_for_each_entry(&tconn->volumes, device, vnr) { 71 idr_for_each_entry(&connection->volumes, device, vnr) {
72 if (device->state.disk != D_DISKLESS || 72 if (device->state.disk != D_DISKLESS ||
73 device->state.conn != C_STANDALONE || 73 device->state.conn != C_STANDALONE ||
74 device->state.role != R_SECONDARY) { 74 device->state.role != R_SECONDARY) {
@@ -100,98 +100,98 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
100 return R_PRIMARY; 100 return R_PRIMARY;
101} 101}
102 102
103enum drbd_role conn_highest_role(struct drbd_tconn *tconn) 103enum drbd_role conn_highest_role(struct drbd_connection *connection)
104{ 104{
105 enum drbd_role role = R_UNKNOWN; 105 enum drbd_role role = R_UNKNOWN;
106 struct drbd_device *device; 106 struct drbd_device *device;
107 int vnr; 107 int vnr;
108 108
109 rcu_read_lock(); 109 rcu_read_lock();
110 idr_for_each_entry(&tconn->volumes, device, vnr) 110 idr_for_each_entry(&connection->volumes, device, vnr)
111 role = max_role(role, device->state.role); 111 role = max_role(role, device->state.role);
112 rcu_read_unlock(); 112 rcu_read_unlock();
113 113
114 return role; 114 return role;
115} 115}
116 116
117enum drbd_role conn_highest_peer(struct drbd_tconn *tconn) 117enum drbd_role conn_highest_peer(struct drbd_connection *connection)
118{ 118{
119 enum drbd_role peer = R_UNKNOWN; 119 enum drbd_role peer = R_UNKNOWN;
120 struct drbd_device *device; 120 struct drbd_device *device;
121 int vnr; 121 int vnr;
122 122
123 rcu_read_lock(); 123 rcu_read_lock();
124 idr_for_each_entry(&tconn->volumes, device, vnr) 124 idr_for_each_entry(&connection->volumes, device, vnr)
125 peer = max_role(peer, device->state.peer); 125 peer = max_role(peer, device->state.peer);
126 rcu_read_unlock(); 126 rcu_read_unlock();
127 127
128 return peer; 128 return peer;
129} 129}
130 130
131enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn) 131enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
132{ 132{
133 enum drbd_disk_state ds = D_DISKLESS; 133 enum drbd_disk_state ds = D_DISKLESS;
134 struct drbd_device *device; 134 struct drbd_device *device;
135 int vnr; 135 int vnr;
136 136
137 rcu_read_lock(); 137 rcu_read_lock();
138 idr_for_each_entry(&tconn->volumes, device, vnr) 138 idr_for_each_entry(&connection->volumes, device, vnr)
139 ds = max_t(enum drbd_disk_state, ds, device->state.disk); 139 ds = max_t(enum drbd_disk_state, ds, device->state.disk);
140 rcu_read_unlock(); 140 rcu_read_unlock();
141 141
142 return ds; 142 return ds;
143} 143}
144 144
145enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn) 145enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
146{ 146{
147 enum drbd_disk_state ds = D_MASK; 147 enum drbd_disk_state ds = D_MASK;
148 struct drbd_device *device; 148 struct drbd_device *device;
149 int vnr; 149 int vnr;
150 150
151 rcu_read_lock(); 151 rcu_read_lock();
152 idr_for_each_entry(&tconn->volumes, device, vnr) 152 idr_for_each_entry(&connection->volumes, device, vnr)
153 ds = min_t(enum drbd_disk_state, ds, device->state.disk); 153 ds = min_t(enum drbd_disk_state, ds, device->state.disk);
154 rcu_read_unlock(); 154 rcu_read_unlock();
155 155
156 return ds; 156 return ds;
157} 157}
158 158
159enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn) 159enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
160{ 160{
161 enum drbd_disk_state ds = D_DISKLESS; 161 enum drbd_disk_state ds = D_DISKLESS;
162 struct drbd_device *device; 162 struct drbd_device *device;
163 int vnr; 163 int vnr;
164 164
165 rcu_read_lock(); 165 rcu_read_lock();
166 idr_for_each_entry(&tconn->volumes, device, vnr) 166 idr_for_each_entry(&connection->volumes, device, vnr)
167 ds = max_t(enum drbd_disk_state, ds, device->state.pdsk); 167 ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
168 rcu_read_unlock(); 168 rcu_read_unlock();
169 169
170 return ds; 170 return ds;
171} 171}
172 172
173enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn) 173enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
174{ 174{
175 enum drbd_conns conn = C_MASK; 175 enum drbd_conns conn = C_MASK;
176 struct drbd_device *device; 176 struct drbd_device *device;
177 int vnr; 177 int vnr;
178 178
179 rcu_read_lock(); 179 rcu_read_lock();
180 idr_for_each_entry(&tconn->volumes, device, vnr) 180 idr_for_each_entry(&connection->volumes, device, vnr)
181 conn = min_t(enum drbd_conns, conn, device->state.conn); 181 conn = min_t(enum drbd_conns, conn, device->state.conn);
182 rcu_read_unlock(); 182 rcu_read_unlock();
183 183
184 return conn; 184 return conn;
185} 185}
186 186
187static bool no_peer_wf_report_params(struct drbd_tconn *tconn) 187static bool no_peer_wf_report_params(struct drbd_connection *connection)
188{ 188{
189 struct drbd_device *device; 189 struct drbd_device *device;
190 int vnr; 190 int vnr;
191 bool rv = true; 191 bool rv = true;
192 192
193 rcu_read_lock(); 193 rcu_read_lock();
194 idr_for_each_entry(&tconn->volumes, device, vnr) 194 idr_for_each_entry(&connection->volumes, device, vnr)
195 if (device->state.conn == C_WF_REPORT_PARAMS) { 195 if (device->state.conn == C_WF_REPORT_PARAMS) {
196 rv = false; 196 rv = false;
197 break; 197 break;
@@ -237,10 +237,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
237 union drbd_state ns; 237 union drbd_state ns;
238 enum drbd_state_rv rv; 238 enum drbd_state_rv rv;
239 239
240 spin_lock_irqsave(&device->tconn->req_lock, flags); 240 spin_lock_irqsave(&device->connection->req_lock, flags);
241 ns = apply_mask_val(drbd_read_state(device), mask, val); 241 ns = apply_mask_val(drbd_read_state(device), mask, val);
242 rv = _drbd_set_state(device, ns, f, NULL); 242 rv = _drbd_set_state(device, ns, f, NULL);
243 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 243 spin_unlock_irqrestore(&device->connection->req_lock, flags);
244 244
245 return rv; 245 return rv;
246} 246}
@@ -271,7 +271,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
271 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) 271 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
272 return SS_CW_FAILED_BY_PEER; 272 return SS_CW_FAILED_BY_PEER;
273 273
274 spin_lock_irqsave(&device->tconn->req_lock, flags); 274 spin_lock_irqsave(&device->connection->req_lock, flags);
275 os = drbd_read_state(device); 275 os = drbd_read_state(device);
276 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 276 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
277 rv = is_valid_transition(os, ns); 277 rv = is_valid_transition(os, ns);
@@ -283,12 +283,12 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
283 if (rv == SS_UNKNOWN_ERROR) { 283 if (rv == SS_UNKNOWN_ERROR) {
284 rv = is_valid_state(device, ns); 284 rv = is_valid_state(device, ns);
285 if (rv >= SS_SUCCESS) { 285 if (rv >= SS_SUCCESS) {
286 rv = is_valid_soft_transition(os, ns, device->tconn); 286 rv = is_valid_soft_transition(os, ns, device->connection);
287 if (rv >= SS_SUCCESS) 287 if (rv >= SS_SUCCESS)
288 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 288 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
289 } 289 }
290 } 290 }
291 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 291 spin_unlock_irqrestore(&device->connection->req_lock, flags);
292 292
293 return rv; 293 return rv;
294} 294}
@@ -317,20 +317,20 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
317 if (f & CS_SERIALIZE) 317 if (f & CS_SERIALIZE)
318 mutex_lock(device->state_mutex); 318 mutex_lock(device->state_mutex);
319 319
320 spin_lock_irqsave(&device->tconn->req_lock, flags); 320 spin_lock_irqsave(&device->connection->req_lock, flags);
321 os = drbd_read_state(device); 321 os = drbd_read_state(device);
322 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 322 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
323 rv = is_valid_transition(os, ns); 323 rv = is_valid_transition(os, ns);
324 if (rv < SS_SUCCESS) { 324 if (rv < SS_SUCCESS) {
325 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 325 spin_unlock_irqrestore(&device->connection->req_lock, flags);
326 goto abort; 326 goto abort;
327 } 327 }
328 328
329 if (cl_wide_st_chg(device, os, ns)) { 329 if (cl_wide_st_chg(device, os, ns)) {
330 rv = is_valid_state(device, ns); 330 rv = is_valid_state(device, ns);
331 if (rv == SS_SUCCESS) 331 if (rv == SS_SUCCESS)
332 rv = is_valid_soft_transition(os, ns, device->tconn); 332 rv = is_valid_soft_transition(os, ns, device->connection);
333 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 333 spin_unlock_irqrestore(&device->connection->req_lock, flags);
334 334
335 if (rv < SS_SUCCESS) { 335 if (rv < SS_SUCCESS) {
336 if (f & CS_VERBOSE) 336 if (f & CS_VERBOSE)
@@ -353,17 +353,17 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
353 print_st_err(device, os, ns, rv); 353 print_st_err(device, os, ns, rv);
354 goto abort; 354 goto abort;
355 } 355 }
356 spin_lock_irqsave(&device->tconn->req_lock, flags); 356 spin_lock_irqsave(&device->connection->req_lock, flags);
357 ns = apply_mask_val(drbd_read_state(device), mask, val); 357 ns = apply_mask_val(drbd_read_state(device), mask, val);
358 rv = _drbd_set_state(device, ns, f, &done); 358 rv = _drbd_set_state(device, ns, f, &done);
359 } else { 359 } else {
360 rv = _drbd_set_state(device, ns, f, &done); 360 rv = _drbd_set_state(device, ns, f, &done);
361 } 361 }
362 362
363 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 363 spin_unlock_irqrestore(&device->connection->req_lock, flags);
364 364
365 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 365 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
366 D_ASSERT(current != device->tconn->worker.task); 366 D_ASSERT(current != device->connection->worker.task);
367 wait_for_completion(&done); 367 wait_for_completion(&done);
368 } 368 }
369 369
@@ -480,7 +480,7 @@ static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os
480 dev_info(DEV, "%s\n", pb); 480 dev_info(DEV, "%s\n", pb);
481} 481}
482 482
483static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns, 483static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
484 enum chg_state_flags flags) 484 enum chg_state_flags flags)
485{ 485{
486 char pb[300]; 486 char pb[300];
@@ -494,7 +494,7 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
494 is_susp(ns)); 494 is_susp(ns));
495 495
496 if (pbp != pb) 496 if (pbp != pb)
497 conn_info(tconn, "%s\n", pb); 497 conn_info(connection, "%s\n", pb);
498} 498}
499 499
500 500
@@ -519,12 +519,12 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
519 put_ldev(device); 519 put_ldev(device);
520 } 520 }
521 521
522 nc = rcu_dereference(device->tconn->net_conf); 522 nc = rcu_dereference(device->connection->net_conf);
523 if (nc) { 523 if (nc) {
524 if (!nc->two_primaries && ns.role == R_PRIMARY) { 524 if (!nc->two_primaries && ns.role == R_PRIMARY) {
525 if (ns.peer == R_PRIMARY) 525 if (ns.peer == R_PRIMARY)
526 rv = SS_TWO_PRIMARIES; 526 rv = SS_TWO_PRIMARIES;
527 else if (conn_highest_peer(device->tconn) == R_PRIMARY) 527 else if (conn_highest_peer(device->connection) == R_PRIMARY)
528 rv = SS_O_VOL_PEER_PRI; 528 rv = SS_O_VOL_PEER_PRI;
529 } 529 }
530 } 530 }
@@ -565,7 +565,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
565 rv = SS_NO_VERIFY_ALG; 565 rv = SS_NO_VERIFY_ALG;
566 566
567 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && 567 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
568 device->tconn->agreed_pro_version < 88) 568 device->connection->agreed_pro_version < 88)
569 rv = SS_NOT_SUPPORTED; 569 rv = SS_NOT_SUPPORTED;
570 570
571 else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) 571 else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
@@ -592,7 +592,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
592 * @os: old state. 592 * @os: old state.
593 */ 593 */
594static enum drbd_state_rv 594static enum drbd_state_rv
595is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn) 595is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
596{ 596{
597 enum drbd_state_rv rv = SS_SUCCESS; 597 enum drbd_state_rv rv = SS_SUCCESS;
598 598
@@ -620,7 +620,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
620 620
621 /* While establishing a connection only allow cstate to change. 621 /* While establishing a connection only allow cstate to change.
622 Delay/refuse role changes, detach attach etc... */ 622 Delay/refuse role changes, detach attach etc... */
623 if (test_bit(STATE_SENT, &tconn->flags) && 623 if (test_bit(STATE_SENT, &connection->flags) &&
624 !(os.conn == C_WF_REPORT_PARAMS || 624 !(os.conn == C_WF_REPORT_PARAMS ||
625 (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION))) 625 (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
626 rv = SS_IN_TRANSIENT_STATE; 626 rv = SS_IN_TRANSIENT_STATE;
@@ -871,7 +871,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
871 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) 871 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
872 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ 872 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
873 873
874 if (device->tconn->res_opts.on_no_data == OND_SUSPEND_IO && 874 if (device->connection->res_opts.on_no_data == OND_SUSPEND_IO &&
875 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) 875 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
876 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ 876 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
877 877
@@ -899,7 +899,7 @@ void drbd_resume_al(struct drbd_device *device)
899/* helper for __drbd_set_state */ 899/* helper for __drbd_set_state */
900static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) 900static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
901{ 901{
902 if (device->tconn->agreed_pro_version < 90) 902 if (device->connection->agreed_pro_version < 90)
903 device->ov_start_sector = 0; 903 device->ov_start_sector = 0;
904 device->rs_total = drbd_bm_bits(device); 904 device->rs_total = drbd_bm_bits(device);
905 device->ov_position = 0; 905 device->ov_position = 0;
@@ -962,9 +962,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
962 this happen...*/ 962 this happen...*/
963 963
964 if (is_valid_state(device, os) == rv) 964 if (is_valid_state(device, os) == rv)
965 rv = is_valid_soft_transition(os, ns, device->tconn); 965 rv = is_valid_soft_transition(os, ns, device->connection);
966 } else 966 } else
967 rv = is_valid_soft_transition(os, ns, device->tconn); 967 rv = is_valid_soft_transition(os, ns, device->connection);
968 } 968 }
969 969
970 if (rv < SS_SUCCESS) { 970 if (rv < SS_SUCCESS) {
@@ -981,7 +981,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
981 sanitize_state(). Only display it here if we where not called from 981 sanitize_state(). Only display it here if we where not called from
982 _conn_request_state() */ 982 _conn_request_state() */
983 if (!(flags & CS_DC_SUSP)) 983 if (!(flags & CS_DC_SUSP))
984 conn_pr_state_change(device->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP); 984 conn_pr_state_change(device->connection, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
985 985
986 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference 986 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
987 * on the ldev here, to be sure the transition -> D_DISKLESS resp. 987 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -994,25 +994,25 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
994 did_remote = drbd_should_do_remote(device->state); 994 did_remote = drbd_should_do_remote(device->state);
995 device->state.i = ns.i; 995 device->state.i = ns.i;
996 should_do_remote = drbd_should_do_remote(device->state); 996 should_do_remote = drbd_should_do_remote(device->state);
997 device->tconn->susp = ns.susp; 997 device->connection->susp = ns.susp;
998 device->tconn->susp_nod = ns.susp_nod; 998 device->connection->susp_nod = ns.susp_nod;
999 device->tconn->susp_fen = ns.susp_fen; 999 device->connection->susp_fen = ns.susp_fen;
1000 1000
1001 /* put replicated vs not-replicated requests in seperate epochs */ 1001 /* put replicated vs not-replicated requests in seperate epochs */
1002 if (did_remote != should_do_remote) 1002 if (did_remote != should_do_remote)
1003 start_new_tl_epoch(device->tconn); 1003 start_new_tl_epoch(device->connection);
1004 1004
1005 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 1005 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1006 drbd_print_uuids(device, "attached to UUIDs"); 1006 drbd_print_uuids(device, "attached to UUIDs");
1007 1007
1008 /* Wake up role changes, that were delayed because of connection establishing */ 1008 /* Wake up role changes, that were delayed because of connection establishing */
1009 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && 1009 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
1010 no_peer_wf_report_params(device->tconn)) 1010 no_peer_wf_report_params(device->connection))
1011 clear_bit(STATE_SENT, &device->tconn->flags); 1011 clear_bit(STATE_SENT, &device->connection->flags);
1012 1012
1013 wake_up(&device->misc_wait); 1013 wake_up(&device->misc_wait);
1014 wake_up(&device->state_wait); 1014 wake_up(&device->state_wait);
1015 wake_up(&device->tconn->ping_wait); 1015 wake_up(&device->connection->ping_wait);
1016 1016
1017 /* Aborted verify run, or we reached the stop sector. 1017 /* Aborted verify run, or we reached the stop sector.
1018 * Log the last position, unless end-of-device. */ 1018 * Log the last position, unless end-of-device. */
@@ -1101,21 +1101,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1101 1101
1102 /* Receiver should clean up itself */ 1102 /* Receiver should clean up itself */
1103 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) 1103 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1104 drbd_thread_stop_nowait(&device->tconn->receiver); 1104 drbd_thread_stop_nowait(&device->connection->receiver);
1105 1105
1106 /* Now the receiver finished cleaning up itself, it should die */ 1106 /* Now the receiver finished cleaning up itself, it should die */
1107 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) 1107 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1108 drbd_thread_stop_nowait(&device->tconn->receiver); 1108 drbd_thread_stop_nowait(&device->connection->receiver);
1109 1109
1110 /* Upon network failure, we need to restart the receiver. */ 1110 /* Upon network failure, we need to restart the receiver. */
1111 if (os.conn > C_WF_CONNECTION && 1111 if (os.conn > C_WF_CONNECTION &&
1112 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) 1112 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1113 drbd_thread_restart_nowait(&device->tconn->receiver); 1113 drbd_thread_restart_nowait(&device->connection->receiver);
1114 1114
1115 /* Resume AL writing if we get a connection */ 1115 /* Resume AL writing if we get a connection */
1116 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1116 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1117 drbd_resume_al(device); 1117 drbd_resume_al(device);
1118 device->tconn->connect_cnt++; 1118 device->connection->connect_cnt++;
1119 } 1119 }
1120 1120
1121 /* remember last attach time so request_timer_fn() won't 1121 /* remember last attach time so request_timer_fn() won't
@@ -1133,7 +1133,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1133 ascw->w.cb = w_after_state_ch; 1133 ascw->w.cb = w_after_state_ch;
1134 ascw->w.device = device; 1134 ascw->w.device = device;
1135 ascw->done = done; 1135 ascw->done = done;
1136 drbd_queue_work(&device->tconn->sender_work, &ascw->w); 1136 drbd_queue_work(&device->connection->sender_work, &ascw->w);
1137 } else { 1137 } else {
1138 dev_err(DEV, "Could not kmalloc an ascw\n"); 1138 dev_err(DEV, "Could not kmalloc an ascw\n");
1139 } 1139 }
@@ -1181,7 +1181,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
1181{ 1181{
1182 int rv; 1182 int rv;
1183 1183
1184 D_ASSERT(current == device->tconn->worker.task); 1184 D_ASSERT(current == device->connection->worker.task);
1185 1185
1186 /* open coded non-blocking drbd_suspend_io(device); */ 1186 /* open coded non-blocking drbd_suspend_io(device); */
1187 set_bit(SUSPEND_IO, &device->flags); 1187 set_bit(SUSPEND_IO, &device->flags);
@@ -1228,47 +1228,47 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1228 state change. This function might sleep */ 1228 state change. This function might sleep */
1229 1229
1230 if (ns.susp_nod) { 1230 if (ns.susp_nod) {
1231 struct drbd_tconn *tconn = device->tconn; 1231 struct drbd_connection *connection = device->connection;
1232 enum drbd_req_event what = NOTHING; 1232 enum drbd_req_event what = NOTHING;
1233 1233
1234 spin_lock_irq(&tconn->req_lock); 1234 spin_lock_irq(&connection->req_lock);
1235 if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED) 1235 if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
1236 what = RESEND; 1236 what = RESEND;
1237 1237
1238 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) && 1238 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1239 conn_lowest_disk(tconn) > D_NEGOTIATING) 1239 conn_lowest_disk(connection) > D_NEGOTIATING)
1240 what = RESTART_FROZEN_DISK_IO; 1240 what = RESTART_FROZEN_DISK_IO;
1241 1241
1242 if (tconn->susp_nod && what != NOTHING) { 1242 if (connection->susp_nod && what != NOTHING) {
1243 _tl_restart(tconn, what); 1243 _tl_restart(connection, what);
1244 _conn_request_state(tconn, 1244 _conn_request_state(connection,
1245 (union drbd_state) { { .susp_nod = 1 } }, 1245 (union drbd_state) { { .susp_nod = 1 } },
1246 (union drbd_state) { { .susp_nod = 0 } }, 1246 (union drbd_state) { { .susp_nod = 0 } },
1247 CS_VERBOSE); 1247 CS_VERBOSE);
1248 } 1248 }
1249 spin_unlock_irq(&tconn->req_lock); 1249 spin_unlock_irq(&connection->req_lock);
1250 } 1250 }
1251 1251
1252 if (ns.susp_fen) { 1252 if (ns.susp_fen) {
1253 struct drbd_tconn *tconn = device->tconn; 1253 struct drbd_connection *connection = device->connection;
1254 1254
1255 spin_lock_irq(&tconn->req_lock); 1255 spin_lock_irq(&connection->req_lock);
1256 if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) { 1256 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
1257 /* case2: The connection was established again: */ 1257 /* case2: The connection was established again: */
1258 struct drbd_device *odev; 1258 struct drbd_device *odev;
1259 int vnr; 1259 int vnr;
1260 1260
1261 rcu_read_lock(); 1261 rcu_read_lock();
1262 idr_for_each_entry(&tconn->volumes, odev, vnr) 1262 idr_for_each_entry(&connection->volumes, odev, vnr)
1263 clear_bit(NEW_CUR_UUID, &odev->flags); 1263 clear_bit(NEW_CUR_UUID, &odev->flags);
1264 rcu_read_unlock(); 1264 rcu_read_unlock();
1265 _tl_restart(tconn, RESEND); 1265 _tl_restart(connection, RESEND);
1266 _conn_request_state(tconn, 1266 _conn_request_state(connection,
1267 (union drbd_state) { { .susp_fen = 1 } }, 1267 (union drbd_state) { { .susp_fen = 1 } },
1268 (union drbd_state) { { .susp_fen = 0 } }, 1268 (union drbd_state) { { .susp_fen = 0 } },
1269 CS_VERBOSE); 1269 CS_VERBOSE);
1270 } 1270 }
1271 spin_unlock_irq(&tconn->req_lock); 1271 spin_unlock_irq(&connection->req_lock);
1272 } 1272 }
1273 1273
1274 /* Became sync source. With protocol >= 96, we still need to send out 1274 /* Became sync source. With protocol >= 96, we still need to send out
@@ -1277,7 +1277,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1277 * which is unexpected. */ 1277 * which is unexpected. */
1278 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && 1278 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1279 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && 1279 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1280 device->tconn->agreed_pro_version >= 96 && get_ldev(device)) { 1280 device->connection->agreed_pro_version >= 96 && get_ldev(device)) {
1281 drbd_gen_and_send_sync_uuid(device); 1281 drbd_gen_and_send_sync_uuid(device);
1282 put_ldev(device); 1282 put_ldev(device);
1283 } 1283 }
@@ -1526,7 +1526,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1526{ 1526{
1527 struct after_conn_state_chg_work *acscw = 1527 struct after_conn_state_chg_work *acscw =
1528 container_of(w, struct after_conn_state_chg_work, w); 1528 container_of(w, struct after_conn_state_chg_work, w);
1529 struct drbd_tconn *tconn = w->tconn; 1529 struct drbd_connection *connection = w->connection;
1530 enum drbd_conns oc = acscw->oc; 1530 enum drbd_conns oc = acscw->oc;
1531 union drbd_state ns_max = acscw->ns_max; 1531 union drbd_state ns_max = acscw->ns_max;
1532 struct drbd_device *device; 1532 struct drbd_device *device;
@@ -1536,18 +1536,18 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1536 1536
1537 /* Upon network configuration, we need to start the receiver */ 1537 /* Upon network configuration, we need to start the receiver */
1538 if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED) 1538 if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
1539 drbd_thread_start(&tconn->receiver); 1539 drbd_thread_start(&connection->receiver);
1540 1540
1541 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) { 1541 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1542 struct net_conf *old_conf; 1542 struct net_conf *old_conf;
1543 1543
1544 mutex_lock(&tconn->conf_update); 1544 mutex_lock(&connection->conf_update);
1545 old_conf = tconn->net_conf; 1545 old_conf = connection->net_conf;
1546 tconn->my_addr_len = 0; 1546 connection->my_addr_len = 0;
1547 tconn->peer_addr_len = 0; 1547 connection->peer_addr_len = 0;
1548 rcu_assign_pointer(tconn->net_conf, NULL); 1548 rcu_assign_pointer(connection->net_conf, NULL);
1549 conn_free_crypto(tconn); 1549 conn_free_crypto(connection);
1550 mutex_unlock(&tconn->conf_update); 1550 mutex_unlock(&connection->conf_update);
1551 1551
1552 synchronize_rcu(); 1552 synchronize_rcu();
1553 kfree(old_conf); 1553 kfree(old_conf);
@@ -1557,30 +1557,30 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1557 /* case1: The outdate peer handler is successful: */ 1557 /* case1: The outdate peer handler is successful: */
1558 if (ns_max.pdsk <= D_OUTDATED) { 1558 if (ns_max.pdsk <= D_OUTDATED) {
1559 rcu_read_lock(); 1559 rcu_read_lock();
1560 idr_for_each_entry(&tconn->volumes, device, vnr) { 1560 idr_for_each_entry(&connection->volumes, device, vnr) {
1561 if (test_bit(NEW_CUR_UUID, &device->flags)) { 1561 if (test_bit(NEW_CUR_UUID, &device->flags)) {
1562 drbd_uuid_new_current(device); 1562 drbd_uuid_new_current(device);
1563 clear_bit(NEW_CUR_UUID, &device->flags); 1563 clear_bit(NEW_CUR_UUID, &device->flags);
1564 } 1564 }
1565 } 1565 }
1566 rcu_read_unlock(); 1566 rcu_read_unlock();
1567 spin_lock_irq(&tconn->req_lock); 1567 spin_lock_irq(&connection->req_lock);
1568 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); 1568 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
1569 _conn_request_state(tconn, 1569 _conn_request_state(connection,
1570 (union drbd_state) { { .susp_fen = 1 } }, 1570 (union drbd_state) { { .susp_fen = 1 } },
1571 (union drbd_state) { { .susp_fen = 0 } }, 1571 (union drbd_state) { { .susp_fen = 0 } },
1572 CS_VERBOSE); 1572 CS_VERBOSE);
1573 spin_unlock_irq(&tconn->req_lock); 1573 spin_unlock_irq(&connection->req_lock);
1574 } 1574 }
1575 } 1575 }
1576 kref_put(&tconn->kref, &conn_destroy); 1576 kref_put(&connection->kref, &conn_destroy);
1577 1577
1578 conn_md_sync(tconn); 1578 conn_md_sync(connection);
1579 1579
1580 return 0; 1580 return 0;
1581} 1581}
1582 1582
1583void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf) 1583void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
1584{ 1584{
1585 enum chg_state_flags flags = ~0; 1585 enum chg_state_flags flags = ~0;
1586 struct drbd_device *device; 1586 struct drbd_device *device;
@@ -1588,13 +1588,13 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
1588 union drbd_dev_state os, cs = { 1588 union drbd_dev_state os, cs = {
1589 { .role = R_SECONDARY, 1589 { .role = R_SECONDARY,
1590 .peer = R_UNKNOWN, 1590 .peer = R_UNKNOWN,
1591 .conn = tconn->cstate, 1591 .conn = connection->cstate,
1592 .disk = D_DISKLESS, 1592 .disk = D_DISKLESS,
1593 .pdsk = D_UNKNOWN, 1593 .pdsk = D_UNKNOWN,
1594 } }; 1594 } };
1595 1595
1596 rcu_read_lock(); 1596 rcu_read_lock();
1597 idr_for_each_entry(&tconn->volumes, device, vnr) { 1597 idr_for_each_entry(&connection->volumes, device, vnr) {
1598 os = device->state; 1598 os = device->state;
1599 1599
1600 if (first_vol) { 1600 if (first_vol) {
@@ -1626,7 +1626,7 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
1626} 1626}
1627 1627
1628static enum drbd_state_rv 1628static enum drbd_state_rv
1629conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 1629conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1630 enum chg_state_flags flags) 1630 enum chg_state_flags flags)
1631{ 1631{
1632 enum drbd_state_rv rv = SS_SUCCESS; 1632 enum drbd_state_rv rv = SS_SUCCESS;
@@ -1635,7 +1635,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
1635 int vnr; 1635 int vnr;
1636 1636
1637 rcu_read_lock(); 1637 rcu_read_lock();
1638 idr_for_each_entry(&tconn->volumes, device, vnr) { 1638 idr_for_each_entry(&connection->volumes, device, vnr) {
1639 os = drbd_read_state(device); 1639 os = drbd_read_state(device);
1640 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 1640 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
1641 1641
@@ -1653,9 +1653,9 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
1653 rv = is_valid_state(device, ns); 1653 rv = is_valid_state(device, ns);
1654 if (rv < SS_SUCCESS) { 1654 if (rv < SS_SUCCESS) {
1655 if (is_valid_state(device, os) == rv) 1655 if (is_valid_state(device, os) == rv)
1656 rv = is_valid_soft_transition(os, ns, tconn); 1656 rv = is_valid_soft_transition(os, ns, connection);
1657 } else 1657 } else
1658 rv = is_valid_soft_transition(os, ns, tconn); 1658 rv = is_valid_soft_transition(os, ns, connection);
1659 } 1659 }
1660 if (rv < SS_SUCCESS) 1660 if (rv < SS_SUCCESS)
1661 break; 1661 break;
@@ -1669,7 +1669,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
1669} 1669}
1670 1670
1671void 1671void
1672conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 1672conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1673 union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags) 1673 union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
1674{ 1674{
1675 union drbd_state ns, os, ns_max = { }; 1675 union drbd_state ns, os, ns_max = { };
@@ -1688,14 +1688,14 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
1688 /* remember last connect time so request_timer_fn() won't 1688 /* remember last connect time so request_timer_fn() won't
1689 * kill newly established sessions while we are still trying to thaw 1689 * kill newly established sessions while we are still trying to thaw
1690 * previously frozen IO */ 1690 * previously frozen IO */
1691 if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS) 1691 if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
1692 tconn->last_reconnect_jif = jiffies; 1692 connection->last_reconnect_jif = jiffies;
1693 1693
1694 tconn->cstate = val.conn; 1694 connection->cstate = val.conn;
1695 } 1695 }
1696 1696
1697 rcu_read_lock(); 1697 rcu_read_lock();
1698 idr_for_each_entry(&tconn->volumes, device, vnr) { 1698 idr_for_each_entry(&connection->volumes, device, vnr) {
1699 number_of_volumes++; 1699 number_of_volumes++;
1700 os = drbd_read_state(device); 1700 os = drbd_read_state(device);
1701 ns = apply_mask_val(os, mask, val); 1701 ns = apply_mask_val(os, mask, val);
@@ -1733,39 +1733,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
1733 } }; 1733 } };
1734 } 1734 }
1735 1735
1736 ns_min.susp = ns_max.susp = tconn->susp; 1736 ns_min.susp = ns_max.susp = connection->susp;
1737 ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod; 1737 ns_min.susp_nod = ns_max.susp_nod = connection->susp_nod;
1738 ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen; 1738 ns_min.susp_fen = ns_max.susp_fen = connection->susp_fen;
1739 1739
1740 *pns_min = ns_min; 1740 *pns_min = ns_min;
1741 *pns_max = ns_max; 1741 *pns_max = ns_max;
1742} 1742}
1743 1743
1744static enum drbd_state_rv 1744static enum drbd_state_rv
1745_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val) 1745_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1746{ 1746{
1747 enum drbd_state_rv rv; 1747 enum drbd_state_rv rv;
1748 1748
1749 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags)) 1749 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
1750 return SS_CW_SUCCESS; 1750 return SS_CW_SUCCESS;
1751 1751
1752 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags)) 1752 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
1753 return SS_CW_FAILED_BY_PEER; 1753 return SS_CW_FAILED_BY_PEER;
1754 1754
1755 rv = conn_is_valid_transition(tconn, mask, val, 0); 1755 rv = conn_is_valid_transition(connection, mask, val, 0);
1756 if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS) 1756 if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
1757 rv = SS_UNKNOWN_ERROR; /* continue waiting */ 1757 rv = SS_UNKNOWN_ERROR; /* continue waiting */
1758 1758
1759 return rv; 1759 return rv;
1760} 1760}
1761 1761
1762enum drbd_state_rv 1762enum drbd_state_rv
1763_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 1763_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1764 enum chg_state_flags flags) 1764 enum chg_state_flags flags)
1765{ 1765{
1766 enum drbd_state_rv rv = SS_SUCCESS; 1766 enum drbd_state_rv rv = SS_SUCCESS;
1767 struct after_conn_state_chg_work *acscw; 1767 struct after_conn_state_chg_work *acscw;
1768 enum drbd_conns oc = tconn->cstate; 1768 enum drbd_conns oc = connection->cstate;
1769 union drbd_state ns_max, ns_min, os; 1769 union drbd_state ns_max, ns_min, os;
1770 bool have_mutex = false; 1770 bool have_mutex = false;
1771 1771
@@ -1775,7 +1775,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
1775 goto abort; 1775 goto abort;
1776 } 1776 }
1777 1777
1778 rv = conn_is_valid_transition(tconn, mask, val, flags); 1778 rv = conn_is_valid_transition(connection, mask, val, flags);
1779 if (rv < SS_SUCCESS) 1779 if (rv < SS_SUCCESS)
1780 goto abort; 1780 goto abort;
1781 1781
@@ -1785,38 +1785,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
1785 /* This will be a cluster-wide state change. 1785 /* This will be a cluster-wide state change.
1786 * Need to give up the spinlock, grab the mutex, 1786 * Need to give up the spinlock, grab the mutex,
1787 * then send the state change request, ... */ 1787 * then send the state change request, ... */
1788 spin_unlock_irq(&tconn->req_lock); 1788 spin_unlock_irq(&connection->req_lock);
1789 mutex_lock(&tconn->cstate_mutex); 1789 mutex_lock(&connection->cstate_mutex);
1790 have_mutex = true; 1790 have_mutex = true;
1791 1791
1792 set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); 1792 set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1793 if (conn_send_state_req(tconn, mask, val)) { 1793 if (conn_send_state_req(connection, mask, val)) {
1794 /* sending failed. */ 1794 /* sending failed. */
1795 clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); 1795 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1796 rv = SS_CW_FAILED_BY_PEER; 1796 rv = SS_CW_FAILED_BY_PEER;
1797 /* need to re-aquire the spin lock, though */ 1797 /* need to re-aquire the spin lock, though */
1798 goto abort_unlocked; 1798 goto abort_unlocked;
1799 } 1799 }
1800 1800
1801 if (val.conn == C_DISCONNECTING) 1801 if (val.conn == C_DISCONNECTING)
1802 set_bit(DISCONNECT_SENT, &tconn->flags); 1802 set_bit(DISCONNECT_SENT, &connection->flags);
1803 1803
1804 /* ... and re-aquire the spinlock. 1804 /* ... and re-aquire the spinlock.
1805 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call 1805 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1806 * conn_set_state() within the same spinlock. */ 1806 * conn_set_state() within the same spinlock. */
1807 spin_lock_irq(&tconn->req_lock); 1807 spin_lock_irq(&connection->req_lock);
1808 wait_event_lock_irq(tconn->ping_wait, 1808 wait_event_lock_irq(connection->ping_wait,
1809 (rv = _conn_rq_cond(tconn, mask, val)), 1809 (rv = _conn_rq_cond(connection, mask, val)),
1810 tconn->req_lock); 1810 connection->req_lock);
1811 clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); 1811 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1812 if (rv < SS_SUCCESS) 1812 if (rv < SS_SUCCESS)
1813 goto abort; 1813 goto abort;
1814 } 1814 }
1815 1815
1816 conn_old_common_state(tconn, &os, &flags); 1816 conn_old_common_state(connection, &os, &flags);
1817 flags |= CS_DC_SUSP; 1817 flags |= CS_DC_SUSP;
1818 conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags); 1818 conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
1819 conn_pr_state_change(tconn, os, ns_max, flags); 1819 conn_pr_state_change(connection, os, ns_max, flags);
1820 1820
1821 acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC); 1821 acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1822 if (acscw) { 1822 if (acscw) {
@@ -1825,39 +1825,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
1825 acscw->ns_max = ns_max; 1825 acscw->ns_max = ns_max;
1826 acscw->flags = flags; 1826 acscw->flags = flags;
1827 acscw->w.cb = w_after_conn_state_ch; 1827 acscw->w.cb = w_after_conn_state_ch;
1828 kref_get(&tconn->kref); 1828 kref_get(&connection->kref);
1829 acscw->w.tconn = tconn; 1829 acscw->w.connection = connection;
1830 drbd_queue_work(&tconn->sender_work, &acscw->w); 1830 drbd_queue_work(&connection->sender_work, &acscw->w);
1831 } else { 1831 } else {
1832 conn_err(tconn, "Could not kmalloc an acscw\n"); 1832 conn_err(connection, "Could not kmalloc an acscw\n");
1833 } 1833 }
1834 1834
1835 abort: 1835 abort:
1836 if (have_mutex) { 1836 if (have_mutex) {
1837 /* mutex_unlock() "... must not be used in interrupt context.", 1837 /* mutex_unlock() "... must not be used in interrupt context.",
1838 * so give up the spinlock, then re-aquire it */ 1838 * so give up the spinlock, then re-aquire it */
1839 spin_unlock_irq(&tconn->req_lock); 1839 spin_unlock_irq(&connection->req_lock);
1840 abort_unlocked: 1840 abort_unlocked:
1841 mutex_unlock(&tconn->cstate_mutex); 1841 mutex_unlock(&connection->cstate_mutex);
1842 spin_lock_irq(&tconn->req_lock); 1842 spin_lock_irq(&connection->req_lock);
1843 } 1843 }
1844 if (rv < SS_SUCCESS && flags & CS_VERBOSE) { 1844 if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
1845 conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv)); 1845 conn_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
1846 conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i); 1846 conn_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
1847 conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn)); 1847 conn_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
1848 } 1848 }
1849 return rv; 1849 return rv;
1850} 1850}
1851 1851
1852enum drbd_state_rv 1852enum drbd_state_rv
1853conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 1853conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
1854 enum chg_state_flags flags) 1854 enum chg_state_flags flags)
1855{ 1855{
1856 enum drbd_state_rv rv; 1856 enum drbd_state_rv rv;
1857 1857
1858 spin_lock_irq(&tconn->req_lock); 1858 spin_lock_irq(&connection->req_lock);
1859 rv = _conn_request_state(tconn, mask, val, flags); 1859 rv = _conn_request_state(connection, mask, val, flags);
1860 spin_unlock_irq(&tconn->req_lock); 1860 spin_unlock_irq(&connection->req_lock);
1861 1861
1862 return rv; 1862 return rv;
1863} 1863}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 033668a64b45..cc41605ba21c 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -2,7 +2,7 @@
2#define DRBD_STATE_H 2#define DRBD_STATE_H
3 3
4struct drbd_device; 4struct drbd_device;
5struct drbd_tconn; 5struct drbd_connection;
6 6
7/** 7/**
8 * DOC: DRBD State macros 8 * DOC: DRBD State macros
@@ -124,15 +124,15 @@ extern void print_st_err(struct drbd_device *, union drbd_state,
124 union drbd_state, int); 124 union drbd_state, int);
125 125
126enum drbd_state_rv 126enum drbd_state_rv
127_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 127_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
128 enum chg_state_flags flags); 128 enum chg_state_flags flags);
129 129
130enum drbd_state_rv 130enum drbd_state_rv
131conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, 131conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
132 enum chg_state_flags flags); 132 enum chg_state_flags flags);
133 133
134extern void drbd_resume_al(struct drbd_device *device); 134extern void drbd_resume_al(struct drbd_device *device);
135extern bool conn_all_vols_unconf(struct drbd_tconn *tconn); 135extern bool conn_all_vols_unconf(struct drbd_connection *connection);
136 136
137/** 137/**
138 * drbd_request_state() - Reqest a state change 138 * drbd_request_state() - Reqest a state change
@@ -151,11 +151,11 @@ static inline int drbd_request_state(struct drbd_device *device,
151 return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED); 151 return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
152} 152}
153 153
154enum drbd_role conn_highest_role(struct drbd_tconn *tconn); 154enum drbd_role conn_highest_role(struct drbd_connection *connection);
155enum drbd_role conn_highest_peer(struct drbd_tconn *tconn); 155enum drbd_role conn_highest_peer(struct drbd_connection *connection);
156enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn); 156enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
157enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn); 157enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
158enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn); 158enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
159enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn); 159enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
160 160
161#endif 161#endif
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1d230b506c86..5b3f12a42230 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,16 +102,16 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
102 unsigned long flags = 0; 102 unsigned long flags = 0;
103 struct drbd_device *device = peer_req->w.device; 103 struct drbd_device *device = peer_req->w.device;
104 104
105 spin_lock_irqsave(&device->tconn->req_lock, flags); 105 spin_lock_irqsave(&device->connection->req_lock, flags);
106 device->read_cnt += peer_req->i.size >> 9; 106 device->read_cnt += peer_req->i.size >> 9;
107 list_del(&peer_req->w.list); 107 list_del(&peer_req->w.list);
108 if (list_empty(&device->read_ee)) 108 if (list_empty(&device->read_ee))
109 wake_up(&device->ee_wait); 109 wake_up(&device->ee_wait);
110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
111 __drbd_chk_io_error(device, DRBD_READ_ERROR); 111 __drbd_chk_io_error(device, DRBD_READ_ERROR);
112 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 112 spin_unlock_irqrestore(&device->connection->req_lock, flags);
113 113
114 drbd_queue_work(&device->tconn->sender_work, &peer_req->w); 114 drbd_queue_work(&device->connection->sender_work, &peer_req->w);
115 put_ldev(device); 115 put_ldev(device);
116} 116}
117 117
@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; 134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
135 block_id = peer_req->block_id; 135 block_id = peer_req->block_id;
136 136
137 spin_lock_irqsave(&device->tconn->req_lock, flags); 137 spin_lock_irqsave(&device->connection->req_lock, flags);
138 device->writ_cnt += peer_req->i.size >> 9; 138 device->writ_cnt += peer_req->i.size >> 9;
139 list_move_tail(&peer_req->w.list, &device->done_ee); 139 list_move_tail(&peer_req->w.list, &device->done_ee);
140 140
@@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
150 150
151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); 152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
153 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 153 spin_unlock_irqrestore(&device->connection->req_lock, flags);
154 154
155 if (block_id == ID_SYNCER) 155 if (block_id == ID_SYNCER)
156 drbd_rs_complete_io(device, i.sector); 156 drbd_rs_complete_io(device, i.sector);
@@ -161,7 +161,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
161 if (do_al_complete_io) 161 if (do_al_complete_io)
162 drbd_al_complete_io(device, &i); 162 drbd_al_complete_io(device, &i);
163 163
164 wake_asender(device->tconn); 164 wake_asender(device->connection);
165 put_ldev(device); 165 put_ldev(device);
166} 166}
167 167
@@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
273 req->private_bio = ERR_PTR(error); 273 req->private_bio = ERR_PTR(error);
274 274
275 /* not req_mod(), we need irqsave here! */ 275 /* not req_mod(), we need irqsave here! */
276 spin_lock_irqsave(&device->tconn->req_lock, flags); 276 spin_lock_irqsave(&device->connection->req_lock, flags);
277 __req_mod(req, what, &m); 277 __req_mod(req, what, &m);
278 spin_unlock_irqrestore(&device->tconn->req_lock, flags); 278 spin_unlock_irqrestore(&device->connection->req_lock, flags);
279 put_ldev(device); 279 put_ldev(device);
280 280
281 if (m.bio) 281 if (m.bio)
@@ -345,12 +345,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
345 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) 345 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
346 goto out; 346 goto out;
347 347
348 digest_size = crypto_hash_digestsize(device->tconn->csums_tfm); 348 digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
349 digest = kmalloc(digest_size, GFP_NOIO); 349 digest = kmalloc(digest_size, GFP_NOIO);
350 if (digest) { 350 if (digest) {
351 sector_t sector = peer_req->i.sector; 351 sector_t sector = peer_req->i.sector;
352 unsigned int size = peer_req->i.size; 352 unsigned int size = peer_req->i.size;
353 drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest); 353 drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
354 /* Free peer_req and pages before send. 354 /* Free peer_req and pages before send.
355 * In case we block on congestion, we could otherwise run into 355 * In case we block on congestion, we could otherwise run into
356 * some distributed deadlock, if the other side blocks on 356 * some distributed deadlock, if the other side blocks on
@@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
397 goto defer; 397 goto defer;
398 398
399 peer_req->w.cb = w_e_send_csum; 399 peer_req->w.cb = w_e_send_csum;
400 spin_lock_irq(&device->tconn->req_lock); 400 spin_lock_irq(&device->connection->req_lock);
401 list_add(&peer_req->w.list, &device->read_ee); 401 list_add(&peer_req->w.list, &device->read_ee);
402 spin_unlock_irq(&device->tconn->req_lock); 402 spin_unlock_irq(&device->connection->req_lock);
403 403
404 atomic_add(size >> 9, &device->rs_sect_ev); 404 atomic_add(size >> 9, &device->rs_sect_ev);
405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) 405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
@@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
409 * because bio_add_page failed (probably broken lower level driver), 409 * because bio_add_page failed (probably broken lower level driver),
410 * retry may or may not help. 410 * retry may or may not help.
411 * If it does not, you may need to force disconnect. */ 411 * If it does not, you may need to force disconnect. */
412 spin_lock_irq(&device->tconn->req_lock); 412 spin_lock_irq(&device->connection->req_lock);
413 list_del(&peer_req->w.list); 413 list_del(&peer_req->w.list);
414 spin_unlock_irq(&device->tconn->req_lock); 414 spin_unlock_irq(&device->connection->req_lock);
415 415
416 drbd_free_peer_req(device, peer_req); 416 drbd_free_peer_req(device, peer_req);
417defer: 417defer:
@@ -439,7 +439,7 @@ void resync_timer_fn(unsigned long data)
439 struct drbd_device *device = (struct drbd_device *) data; 439 struct drbd_device *device = (struct drbd_device *) data;
440 440
441 if (list_empty(&device->resync_work.list)) 441 if (list_empty(&device->resync_work.list))
442 drbd_queue_work(&device->tconn->sender_work, &device->resync_work); 442 drbd_queue_work(&device->connection->sender_work, &device->resync_work);
443} 443}
444 444
445static void fifo_set(struct fifo_buffer *fb, int value) 445static void fifo_set(struct fifo_buffer *fb, int value)
@@ -597,15 +597,15 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
597 597
598 for (i = 0; i < number; i++) { 598 for (i = 0; i < number; i++) {
599 /* Stop generating RS requests, when half of the send buffer is filled */ 599 /* Stop generating RS requests, when half of the send buffer is filled */
600 mutex_lock(&device->tconn->data.mutex); 600 mutex_lock(&device->connection->data.mutex);
601 if (device->tconn->data.socket) { 601 if (device->connection->data.socket) {
602 queued = device->tconn->data.socket->sk->sk_wmem_queued; 602 queued = device->connection->data.socket->sk->sk_wmem_queued;
603 sndbuf = device->tconn->data.socket->sk->sk_sndbuf; 603 sndbuf = device->connection->data.socket->sk->sk_sndbuf;
604 } else { 604 } else {
605 queued = 1; 605 queued = 1;
606 sndbuf = 0; 606 sndbuf = 0;
607 } 607 }
608 mutex_unlock(&device->tconn->data.mutex); 608 mutex_unlock(&device->connection->data.mutex);
609 if (queued > sndbuf / 2) 609 if (queued > sndbuf / 2)
610 goto requeue; 610 goto requeue;
611 611
@@ -675,7 +675,7 @@ next_sector:
675 /* adjust very last sectors, in case we are oddly sized */ 675 /* adjust very last sectors, in case we are oddly sized */
676 if (sector + (size>>9) > capacity) 676 if (sector + (size>>9) > capacity)
677 size = (capacity-sector)<<9; 677 size = (capacity-sector)<<9;
678 if (device->tconn->agreed_pro_version >= 89 && device->tconn->csums_tfm) { 678 if (device->connection->agreed_pro_version >= 89 && device->connection->csums_tfm) {
679 switch (read_for_csum(device, sector, size)) { 679 switch (read_for_csum(device, sector, size)) {
680 case -EIO: /* Disk failure */ 680 case -EIO: /* Disk failure */
681 put_ldev(device); 681 put_ldev(device);
@@ -800,12 +800,12 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
800 800
801static void ping_peer(struct drbd_device *device) 801static void ping_peer(struct drbd_device *device)
802{ 802{
803 struct drbd_tconn *tconn = device->tconn; 803 struct drbd_connection *connection = device->connection;
804 804
805 clear_bit(GOT_PING_ACK, &tconn->flags); 805 clear_bit(GOT_PING_ACK, &connection->flags);
806 request_ping(tconn); 806 request_ping(connection);
807 wait_event(tconn->ping_wait, 807 wait_event(connection->ping_wait,
808 test_bit(GOT_PING_ACK, &tconn->flags) || device->state.conn < C_CONNECTED); 808 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
809} 809}
810 810
811int drbd_resync_finished(struct drbd_device *device) 811int drbd_resync_finished(struct drbd_device *device)
@@ -831,7 +831,7 @@ int drbd_resync_finished(struct drbd_device *device)
831 if (w) { 831 if (w) {
832 w->cb = w_resync_finished; 832 w->cb = w_resync_finished;
833 w->device = device; 833 w->device = device;
834 drbd_queue_work(&device->tconn->sender_work, w); 834 drbd_queue_work(&device->connection->sender_work, w);
835 return 1; 835 return 1;
836 } 836 }
837 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); 837 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
@@ -854,7 +854,7 @@ int drbd_resync_finished(struct drbd_device *device)
854 854
855 ping_peer(device); 855 ping_peer(device);
856 856
857 spin_lock_irq(&device->tconn->req_lock); 857 spin_lock_irq(&device->connection->req_lock);
858 os = drbd_read_state(device); 858 os = drbd_read_state(device);
859 859
860 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 860 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -885,7 +885,7 @@ int drbd_resync_finished(struct drbd_device *device)
885 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) 885 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
886 khelper_cmd = "after-resync-target"; 886 khelper_cmd = "after-resync-target";
887 887
888 if (device->tconn->csums_tfm && device->rs_total) { 888 if (device->connection->csums_tfm && device->rs_total) {
889 const unsigned long s = device->rs_same_csum; 889 const unsigned long s = device->rs_same_csum;
890 const unsigned long t = device->rs_total; 890 const unsigned long t = device->rs_total;
891 const int ratio = 891 const int ratio =
@@ -943,7 +943,7 @@ int drbd_resync_finished(struct drbd_device *device)
943 943
944 _drbd_set_state(device, ns, CS_VERBOSE, NULL); 944 _drbd_set_state(device, ns, CS_VERBOSE, NULL);
945out_unlock: 945out_unlock:
946 spin_unlock_irq(&device->tconn->req_lock); 946 spin_unlock_irq(&device->connection->req_lock);
947 put_ldev(device); 947 put_ldev(device);
948out: 948out:
949 device->rs_total = 0; 949 device->rs_total = 0;
@@ -970,9 +970,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
970 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 970 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
971 atomic_add(i, &device->pp_in_use_by_net); 971 atomic_add(i, &device->pp_in_use_by_net);
972 atomic_sub(i, &device->pp_in_use); 972 atomic_sub(i, &device->pp_in_use);
973 spin_lock_irq(&device->tconn->req_lock); 973 spin_lock_irq(&device->connection->req_lock);
974 list_add_tail(&peer_req->w.list, &device->net_ee); 974 list_add_tail(&peer_req->w.list, &device->net_ee);
975 spin_unlock_irq(&device->tconn->req_lock); 975 spin_unlock_irq(&device->connection->req_lock);
976 wake_up(&drbd_pp_wait); 976 wake_up(&drbd_pp_wait);
977 } else 977 } else
978 drbd_free_peer_req(device, peer_req); 978 drbd_free_peer_req(device, peer_req);
@@ -1096,13 +1096,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1096 /* quick hack to try to avoid a race against reconfiguration. 1096 /* quick hack to try to avoid a race against reconfiguration.
1097 * a real fix would be much more involved, 1097 * a real fix would be much more involved,
1098 * introducing more locking mechanisms */ 1098 * introducing more locking mechanisms */
1099 if (device->tconn->csums_tfm) { 1099 if (device->connection->csums_tfm) {
1100 digest_size = crypto_hash_digestsize(device->tconn->csums_tfm); 1100 digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
1101 D_ASSERT(digest_size == di->digest_size); 1101 D_ASSERT(digest_size == di->digest_size);
1102 digest = kmalloc(digest_size, GFP_NOIO); 1102 digest = kmalloc(digest_size, GFP_NOIO);
1103 } 1103 }
1104 if (digest) { 1104 if (digest) {
1105 drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest); 1105 drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
1106 eq = !memcmp(digest, di->digest, digest_size); 1106 eq = !memcmp(digest, di->digest, digest_size);
1107 kfree(digest); 1107 kfree(digest);
1108 } 1108 }
@@ -1146,7 +1146,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1146 if (unlikely(cancel)) 1146 if (unlikely(cancel))
1147 goto out; 1147 goto out;
1148 1148
1149 digest_size = crypto_hash_digestsize(device->tconn->verify_tfm); 1149 digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
1150 digest = kmalloc(digest_size, GFP_NOIO); 1150 digest = kmalloc(digest_size, GFP_NOIO);
1151 if (!digest) { 1151 if (!digest) {
1152 err = 1; /* terminate the connection in case the allocation failed */ 1152 err = 1; /* terminate the connection in case the allocation failed */
@@ -1154,7 +1154,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
1154 } 1154 }
1155 1155
1156 if (likely(!(peer_req->flags & EE_WAS_ERROR))) 1156 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1157 drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest); 1157 drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
1158 else 1158 else
1159 memset(digest, 0, digest_size); 1159 memset(digest, 0, digest_size);
1160 1160
@@ -1217,10 +1217,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1217 di = peer_req->digest; 1217 di = peer_req->digest;
1218 1218
1219 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1219 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1220 digest_size = crypto_hash_digestsize(device->tconn->verify_tfm); 1220 digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
1221 digest = kmalloc(digest_size, GFP_NOIO); 1221 digest = kmalloc(digest_size, GFP_NOIO);
1222 if (digest) { 1222 if (digest) {
1223 drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest); 1223 drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
1224 1224
1225 D_ASSERT(digest_size == di->digest_size); 1225 D_ASSERT(digest_size == di->digest_size);
1226 eq = !memcmp(digest, di->digest, digest_size); 1226 eq = !memcmp(digest, di->digest, digest_size);
@@ -1274,20 +1274,20 @@ int w_prev_work_done(struct drbd_work *w, int cancel)
1274 * and to be able to wait for them. 1274 * and to be able to wait for them.
1275 * See also comment in drbd_adm_attach before drbd_suspend_io. 1275 * See also comment in drbd_adm_attach before drbd_suspend_io.
1276 */ 1276 */
1277static int drbd_send_barrier(struct drbd_tconn *tconn) 1277static int drbd_send_barrier(struct drbd_connection *connection)
1278{ 1278{
1279 struct p_barrier *p; 1279 struct p_barrier *p;
1280 struct drbd_socket *sock; 1280 struct drbd_socket *sock;
1281 1281
1282 sock = &tconn->data; 1282 sock = &connection->data;
1283 p = conn_prepare_command(tconn, sock); 1283 p = conn_prepare_command(connection, sock);
1284 if (!p) 1284 if (!p)
1285 return -EIO; 1285 return -EIO;
1286 p->barrier = tconn->send.current_epoch_nr; 1286 p->barrier = connection->send.current_epoch_nr;
1287 p->pad = 0; 1287 p->pad = 0;
1288 tconn->send.current_epoch_writes = 0; 1288 connection->send.current_epoch_writes = 0;
1289 1289
1290 return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0); 1290 return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
1291} 1291}
1292 1292
1293int w_send_write_hint(struct drbd_work *w, int cancel) 1293int w_send_write_hint(struct drbd_work *w, int cancel)
@@ -1297,30 +1297,30 @@ int w_send_write_hint(struct drbd_work *w, int cancel)
1297 1297
1298 if (cancel) 1298 if (cancel)
1299 return 0; 1299 return 0;
1300 sock = &device->tconn->data; 1300 sock = &device->connection->data;
1301 if (!drbd_prepare_command(device, sock)) 1301 if (!drbd_prepare_command(device, sock))
1302 return -EIO; 1302 return -EIO;
1303 return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0); 1303 return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
1304} 1304}
1305 1305
1306static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch) 1306static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
1307{ 1307{
1308 if (!tconn->send.seen_any_write_yet) { 1308 if (!connection->send.seen_any_write_yet) {
1309 tconn->send.seen_any_write_yet = true; 1309 connection->send.seen_any_write_yet = true;
1310 tconn->send.current_epoch_nr = epoch; 1310 connection->send.current_epoch_nr = epoch;
1311 tconn->send.current_epoch_writes = 0; 1311 connection->send.current_epoch_writes = 0;
1312 } 1312 }
1313} 1313}
1314 1314
1315static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch) 1315static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
1316{ 1316{
1317 /* re-init if first write on this connection */ 1317 /* re-init if first write on this connection */
1318 if (!tconn->send.seen_any_write_yet) 1318 if (!connection->send.seen_any_write_yet)
1319 return; 1319 return;
1320 if (tconn->send.current_epoch_nr != epoch) { 1320 if (connection->send.current_epoch_nr != epoch) {
1321 if (tconn->send.current_epoch_writes) 1321 if (connection->send.current_epoch_writes)
1322 drbd_send_barrier(tconn); 1322 drbd_send_barrier(connection);
1323 tconn->send.current_epoch_nr = epoch; 1323 connection->send.current_epoch_nr = epoch;
1324 } 1324 }
1325} 1325}
1326 1326
@@ -1328,7 +1328,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
1328{ 1328{
1329 struct drbd_request *req = container_of(w, struct drbd_request, w); 1329 struct drbd_request *req = container_of(w, struct drbd_request, w);
1330 struct drbd_device *device = w->device; 1330 struct drbd_device *device = w->device;
1331 struct drbd_tconn *tconn = device->tconn; 1331 struct drbd_connection *connection = device->connection;
1332 int err; 1332 int err;
1333 1333
1334 if (unlikely(cancel)) { 1334 if (unlikely(cancel)) {
@@ -1336,11 +1336,11 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
1336 return 0; 1336 return 0;
1337 } 1337 }
1338 1338
1339 /* this time, no tconn->send.current_epoch_writes++; 1339 /* this time, no connection->send.current_epoch_writes++;
1340 * If it was sent, it was the closing barrier for the last 1340 * If it was sent, it was the closing barrier for the last
1341 * replicated epoch, before we went into AHEAD mode. 1341 * replicated epoch, before we went into AHEAD mode.
1342 * No more barriers will be sent, until we leave AHEAD mode again. */ 1342 * No more barriers will be sent, until we leave AHEAD mode again. */
1343 maybe_send_barrier(tconn, req->epoch); 1343 maybe_send_barrier(connection, req->epoch);
1344 1344
1345 err = drbd_send_out_of_sync(device, req); 1345 err = drbd_send_out_of_sync(device, req);
1346 req_mod(req, OOS_HANDED_TO_NETWORK); 1346 req_mod(req, OOS_HANDED_TO_NETWORK);
@@ -1358,7 +1358,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
1358{ 1358{
1359 struct drbd_request *req = container_of(w, struct drbd_request, w); 1359 struct drbd_request *req = container_of(w, struct drbd_request, w);
1360 struct drbd_device *device = w->device; 1360 struct drbd_device *device = w->device;
1361 struct drbd_tconn *tconn = device->tconn; 1361 struct drbd_connection *connection = device->connection;
1362 int err; 1362 int err;
1363 1363
1364 if (unlikely(cancel)) { 1364 if (unlikely(cancel)) {
@@ -1366,9 +1366,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
1366 return 0; 1366 return 0;
1367 } 1367 }
1368 1368
1369 re_init_if_first_write(tconn, req->epoch); 1369 re_init_if_first_write(connection, req->epoch);
1370 maybe_send_barrier(tconn, req->epoch); 1370 maybe_send_barrier(connection, req->epoch);
1371 tconn->send.current_epoch_writes++; 1371 connection->send.current_epoch_writes++;
1372 1372
1373 err = drbd_send_dblock(device, req); 1373 err = drbd_send_dblock(device, req);
1374 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); 1374 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
@@ -1386,7 +1386,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
1386{ 1386{
1387 struct drbd_request *req = container_of(w, struct drbd_request, w); 1387 struct drbd_request *req = container_of(w, struct drbd_request, w);
1388 struct drbd_device *device = w->device; 1388 struct drbd_device *device = w->device;
1389 struct drbd_tconn *tconn = device->tconn; 1389 struct drbd_connection *connection = device->connection;
1390 int err; 1390 int err;
1391 1391
1392 if (unlikely(cancel)) { 1392 if (unlikely(cancel)) {
@@ -1396,7 +1396,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
1396 1396
1397 /* Even read requests may close a write epoch, 1397 /* Even read requests may close a write epoch,
1398 * if there was any yet. */ 1398 * if there was any yet. */
1399 maybe_send_barrier(tconn, req->epoch); 1399 maybe_send_barrier(connection, req->epoch);
1400 1400
1401 err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size, 1401 err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size,
1402 (unsigned long)req); 1402 (unsigned long)req);
@@ -1581,7 +1581,7 @@ void start_resync_timer_fn(unsigned long data)
1581{ 1581{
1582 struct drbd_device *device = (struct drbd_device *) data; 1582 struct drbd_device *device = (struct drbd_device *) data;
1583 1583
1584 drbd_queue_work(&device->tconn->sender_work, &device->start_resync_work); 1584 drbd_queue_work(&device->connection->sender_work, &device->start_resync_work);
1585} 1585}
1586 1586
1587int w_start_resync(struct drbd_work *w, int cancel) 1587int w_start_resync(struct drbd_work *w, int cancel)
@@ -1628,7 +1628,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1628 if (r > 0) { 1628 if (r > 0) {
1629 dev_info(DEV, "before-resync-target handler returned %d, " 1629 dev_info(DEV, "before-resync-target handler returned %d, "
1630 "dropping connection.\n", r); 1630 "dropping connection.\n", r);
1631 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 1631 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
1632 return; 1632 return;
1633 } 1633 }
1634 } else /* C_SYNC_SOURCE */ { 1634 } else /* C_SYNC_SOURCE */ {
@@ -1641,14 +1641,14 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1641 } else { 1641 } else {
1642 dev_info(DEV, "before-resync-source handler returned %d, " 1642 dev_info(DEV, "before-resync-source handler returned %d, "
1643 "dropping connection.\n", r); 1643 "dropping connection.\n", r);
1644 conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); 1644 conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
1645 return; 1645 return;
1646 } 1646 }
1647 } 1647 }
1648 } 1648 }
1649 } 1649 }
1650 1650
1651 if (current == device->tconn->worker.task) { 1651 if (current == device->connection->worker.task) {
1652 /* The worker should not sleep waiting for state_mutex, 1652 /* The worker should not sleep waiting for state_mutex,
1653 that can take long */ 1653 that can take long */
1654 if (!mutex_trylock(device->state_mutex)) { 1654 if (!mutex_trylock(device->state_mutex)) {
@@ -1727,10 +1727,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1727 * drbd_resync_finished from here in that case. 1727 * drbd_resync_finished from here in that case.
1728 * We drbd_gen_and_send_sync_uuid here for protocol < 96, 1728 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1729 * and from after_state_ch otherwise. */ 1729 * and from after_state_ch otherwise. */
1730 if (side == C_SYNC_SOURCE && device->tconn->agreed_pro_version < 96) 1730 if (side == C_SYNC_SOURCE && device->connection->agreed_pro_version < 96)
1731 drbd_gen_and_send_sync_uuid(device); 1731 drbd_gen_and_send_sync_uuid(device);
1732 1732
1733 if (device->tconn->agreed_pro_version < 95 && device->rs_total == 0) { 1733 if (device->connection->agreed_pro_version < 95 && device->rs_total == 0) {
1734 /* This still has a race (about when exactly the peers 1734 /* This still has a race (about when exactly the peers
1735 * detect connection loss) that can lead to a full sync 1735 * detect connection loss) that can lead to a full sync
1736 * on next handshake. In 8.3.9 we fixed this with explicit 1736 * on next handshake. In 8.3.9 we fixed this with explicit
@@ -1746,7 +1746,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1746 int timeo; 1746 int timeo;
1747 1747
1748 rcu_read_lock(); 1748 rcu_read_lock();
1749 nc = rcu_dereference(device->tconn->net_conf); 1749 nc = rcu_dereference(device->connection->net_conf);
1750 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; 1750 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
1751 rcu_read_unlock(); 1751 rcu_read_unlock();
1752 schedule_timeout_interruptible(timeo); 1752 schedule_timeout_interruptible(timeo);
@@ -1772,7 +1772,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1772 * (because we have not yet seen new requests), we should send the 1772 * (because we have not yet seen new requests), we should send the
1773 * corresponding barrier now. Must be checked within the same spinlock 1773 * corresponding barrier now. Must be checked within the same spinlock
1774 * that is used to check for new requests. */ 1774 * that is used to check for new requests. */
1775static bool need_to_send_barrier(struct drbd_tconn *connection) 1775static bool need_to_send_barrier(struct drbd_connection *connection)
1776{ 1776{
1777 if (!connection->send.seen_any_write_yet) 1777 if (!connection->send.seen_any_write_yet)
1778 return false; 1778 return false;
@@ -1813,7 +1813,7 @@ static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *w
1813 return !list_empty(work_list); 1813 return !list_empty(work_list);
1814} 1814}
1815 1815
1816static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list) 1816static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
1817{ 1817{
1818 DEFINE_WAIT(wait); 1818 DEFINE_WAIT(wait);
1819 struct net_conf *nc; 1819 struct net_conf *nc;
@@ -1884,7 +1884,7 @@ static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_
1884 1884
1885int drbd_worker(struct drbd_thread *thi) 1885int drbd_worker(struct drbd_thread *thi)
1886{ 1886{
1887 struct drbd_tconn *tconn = thi->tconn; 1887 struct drbd_connection *connection = thi->connection;
1888 struct drbd_work *w = NULL; 1888 struct drbd_work *w = NULL;
1889 struct drbd_device *device; 1889 struct drbd_device *device;
1890 LIST_HEAD(work_list); 1890 LIST_HEAD(work_list);
@@ -1896,12 +1896,12 @@ int drbd_worker(struct drbd_thread *thi)
1896 /* as long as we use drbd_queue_work_front(), 1896 /* as long as we use drbd_queue_work_front(),
1897 * we may only dequeue single work items here, not batches. */ 1897 * we may only dequeue single work items here, not batches. */
1898 if (list_empty(&work_list)) 1898 if (list_empty(&work_list))
1899 wait_for_work(tconn, &work_list); 1899 wait_for_work(connection, &work_list);
1900 1900
1901 if (signal_pending(current)) { 1901 if (signal_pending(current)) {
1902 flush_signals(current); 1902 flush_signals(current);
1903 if (get_t_state(thi) == RUNNING) { 1903 if (get_t_state(thi) == RUNNING) {
1904 conn_warn(tconn, "Worker got an unexpected signal\n"); 1904 conn_warn(connection, "Worker got an unexpected signal\n");
1905 continue; 1905 continue;
1906 } 1906 }
1907 break; 1907 break;
@@ -1913,10 +1913,10 @@ int drbd_worker(struct drbd_thread *thi)
1913 while (!list_empty(&work_list)) { 1913 while (!list_empty(&work_list)) {
1914 w = list_first_entry(&work_list, struct drbd_work, list); 1914 w = list_first_entry(&work_list, struct drbd_work, list);
1915 list_del_init(&w->list); 1915 list_del_init(&w->list);
1916 if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0) 1916 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
1917 continue; 1917 continue;
1918 if (tconn->cstate >= C_WF_REPORT_PARAMS) 1918 if (connection->cstate >= C_WF_REPORT_PARAMS)
1919 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); 1919 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
1920 } 1920 }
1921 } 1921 }
1922 1922
@@ -1926,11 +1926,11 @@ int drbd_worker(struct drbd_thread *thi)
1926 list_del_init(&w->list); 1926 list_del_init(&w->list);
1927 w->cb(w, 1); 1927 w->cb(w, 1);
1928 } 1928 }
1929 dequeue_work_batch(&tconn->sender_work, &work_list); 1929 dequeue_work_batch(&connection->sender_work, &work_list);
1930 } while (!list_empty(&work_list)); 1930 } while (!list_empty(&work_list));
1931 1931
1932 rcu_read_lock(); 1932 rcu_read_lock();
1933 idr_for_each_entry(&tconn->volumes, device, vnr) { 1933 idr_for_each_entry(&connection->volumes, device, vnr) {
1934 D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); 1934 D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
1935 kref_get(&device->kref); 1935 kref_get(&device->kref);
1936 rcu_read_unlock(); 1936 rcu_read_unlock();