aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/drbd/drbd_int.h20
-rw-r--r--drivers/block/drbd/drbd_main.c100
-rw-r--r--drivers/block/drbd/drbd_nl.c34
-rw-r--r--drivers/block/drbd/drbd_receiver.c96
-rw-r--r--drivers/block/drbd/drbd_req.c48
-rw-r--r--drivers/block/drbd/drbd_req.h4
-rw-r--r--drivers/block/drbd/drbd_worker.c38
7 files changed, 170 insertions, 170 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9efe499b1122..b440ffd14989 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -976,6 +976,12 @@ struct drbd_tconn { /* is a resource from the config file */
976 unsigned long last_received; /* in jiffies, either socket */ 976 unsigned long last_received; /* in jiffies, either socket */
977 unsigned int ko_count; 977 unsigned int ko_count;
978 978
979 spinlock_t req_lock;
980 struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
981 struct drbd_tl_epoch *newest_tle;
982 struct drbd_tl_epoch *oldest_tle;
983 struct list_head out_of_sequence_requests;
984
979 struct drbd_thread receiver; 985 struct drbd_thread receiver;
980 struct drbd_thread worker; 986 struct drbd_thread worker;
981 struct drbd_thread asender; 987 struct drbd_thread asender;
@@ -1031,12 +1037,6 @@ struct drbd_conf {
1031 atomic_t unacked_cnt; /* Need to send replys for */ 1037 atomic_t unacked_cnt; /* Need to send replys for */
1032 atomic_t local_cnt; /* Waiting for local completion */ 1038 atomic_t local_cnt; /* Waiting for local completion */
1033 1039
1034 spinlock_t req_lock;
1035 struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
1036 struct drbd_tl_epoch *newest_tle;
1037 struct drbd_tl_epoch *oldest_tle;
1038 struct list_head out_of_sequence_requests;
1039
1040 /* Interval tree of pending local requests */ 1040 /* Interval tree of pending local requests */
1041 struct rb_root read_requests; 1041 struct rb_root read_requests;
1042 struct rb_root write_requests; 1042 struct rb_root write_requests;
@@ -1868,9 +1868,9 @@ static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1868{ 1868{
1869 if (error) { 1869 if (error) {
1870 unsigned long flags; 1870 unsigned long flags;
1871 spin_lock_irqsave(&mdev->req_lock, flags); 1871 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1872 __drbd_chk_io_error_(mdev, forcedetach, where); 1872 __drbd_chk_io_error_(mdev, forcedetach, where);
1873 spin_unlock_irqrestore(&mdev->req_lock, flags); 1873 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1874 } 1874 }
1875} 1875}
1876 1876
@@ -2366,11 +2366,11 @@ static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
2366{ 2366{
2367 bool rv = false; 2367 bool rv = false;
2368 2368
2369 spin_lock_irq(&mdev->req_lock); 2369 spin_lock_irq(&mdev->tconn->req_lock);
2370 rv = may_inc_ap_bio(mdev); 2370 rv = may_inc_ap_bio(mdev);
2371 if (rv) 2371 if (rv)
2372 atomic_add(count, &mdev->ap_bio_cnt); 2372 atomic_add(count, &mdev->ap_bio_cnt);
2373 spin_unlock_irq(&mdev->req_lock); 2373 spin_unlock_irq(&mdev->tconn->req_lock);
2374 2374
2375 return rv; 2375 return rv;
2376} 2376}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e06ca4a0d906..c063cd513223 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -185,7 +185,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
185 * DOC: The transfer log 185 * DOC: The transfer log
186 * 186 *
187 * The transfer log is a single linked list of &struct drbd_tl_epoch objects. 187 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
188 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail 188 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
189 * of the list. There is always at least one &struct drbd_tl_epoch object. 189 * of the list. There is always at least one &struct drbd_tl_epoch object.
190 * 190 *
191 * Each &struct drbd_tl_epoch has a circular double linked list of requests 191 * Each &struct drbd_tl_epoch has a circular double linked list of requests
@@ -206,21 +206,21 @@ static int tl_init(struct drbd_conf *mdev)
206 b->n_writes = 0; 206 b->n_writes = 0;
207 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ 207 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
208 208
209 mdev->oldest_tle = b; 209 mdev->tconn->oldest_tle = b;
210 mdev->newest_tle = b; 210 mdev->tconn->newest_tle = b;
211 INIT_LIST_HEAD(&mdev->out_of_sequence_requests); 211 INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests);
212 212
213 return 1; 213 return 1;
214} 214}
215 215
216static void tl_cleanup(struct drbd_conf *mdev) 216static void tl_cleanup(struct drbd_conf *mdev)
217{ 217{
218 D_ASSERT(mdev->oldest_tle == mdev->newest_tle); 218 D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle);
219 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 219 D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
220 kfree(mdev->oldest_tle); 220 kfree(mdev->tconn->oldest_tle);
221 mdev->oldest_tle = NULL; 221 mdev->tconn->oldest_tle = NULL;
222 kfree(mdev->unused_spare_tle); 222 kfree(mdev->tconn->unused_spare_tle);
223 mdev->unused_spare_tle = NULL; 223 mdev->tconn->unused_spare_tle = NULL;
224} 224}
225 225
226/** 226/**
@@ -240,13 +240,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240 new->next = NULL; 240 new->next = NULL;
241 new->n_writes = 0; 241 new->n_writes = 0;
242 242
243 newest_before = mdev->newest_tle; 243 newest_before = mdev->tconn->newest_tle;
244 /* never send a barrier number == 0, because that is special-cased 244 /* never send a barrier number == 0, because that is special-cased
245 * when using TCQ for our write ordering code */ 245 * when using TCQ for our write ordering code */
246 new->br_number = (newest_before->br_number+1) ?: 1; 246 new->br_number = (newest_before->br_number+1) ?: 1;
247 if (mdev->newest_tle != new) { 247 if (mdev->tconn->newest_tle != new) {
248 mdev->newest_tle->next = new; 248 mdev->tconn->newest_tle->next = new;
249 mdev->newest_tle = new; 249 mdev->tconn->newest_tle = new;
250 } 250 }
251} 251}
252 252
@@ -267,9 +267,9 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
267 struct list_head *le, *tle; 267 struct list_head *le, *tle;
268 struct drbd_request *r; 268 struct drbd_request *r;
269 269
270 spin_lock_irq(&mdev->req_lock); 270 spin_lock_irq(&mdev->tconn->req_lock);
271 271
272 b = mdev->oldest_tle; 272 b = mdev->tconn->oldest_tle;
273 273
274 /* first some paranoia code */ 274 /* first some paranoia code */
275 if (b == NULL) { 275 if (b == NULL) {
@@ -312,22 +312,22 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
312 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 312 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
313 _tl_add_barrier(mdev, b); 313 _tl_add_barrier(mdev, b);
314 if (nob) 314 if (nob)
315 mdev->oldest_tle = nob; 315 mdev->tconn->oldest_tle = nob;
316 /* if nob == NULL b was the only barrier, and becomes the new 316 /* if nob == NULL b was the only barrier, and becomes the new
317 barrier. Therefore mdev->oldest_tle points already to b */ 317 barrier. Therefore mdev->tconn->oldest_tle points already to b */
318 } else { 318 } else {
319 D_ASSERT(nob != NULL); 319 D_ASSERT(nob != NULL);
320 mdev->oldest_tle = nob; 320 mdev->tconn->oldest_tle = nob;
321 kfree(b); 321 kfree(b);
322 } 322 }
323 323
324 spin_unlock_irq(&mdev->req_lock); 324 spin_unlock_irq(&mdev->tconn->req_lock);
325 dec_ap_pending(mdev); 325 dec_ap_pending(mdev);
326 326
327 return; 327 return;
328 328
329bail: 329bail:
330 spin_unlock_irq(&mdev->req_lock); 330 spin_unlock_irq(&mdev->tconn->req_lock);
331 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 331 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
332} 332}
333 333
@@ -347,8 +347,8 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
347 struct drbd_request *req; 347 struct drbd_request *req;
348 int rv, n_writes, n_reads; 348 int rv, n_writes, n_reads;
349 349
350 b = mdev->oldest_tle; 350 b = mdev->tconn->oldest_tle;
351 pn = &mdev->oldest_tle; 351 pn = &mdev->tconn->oldest_tle;
352 while (b) { 352 while (b) {
353 n_writes = 0; 353 n_writes = 0;
354 n_reads = 0; 354 n_reads = 0;
@@ -387,7 +387,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
387 if (b->w.cb != NULL) 387 if (b->w.cb != NULL)
388 dec_ap_pending(mdev); 388 dec_ap_pending(mdev);
389 389
390 if (b == mdev->newest_tle) { 390 if (b == mdev->tconn->newest_tle) {
391 /* recycle, but reinit! */ 391 /* recycle, but reinit! */
392 D_ASSERT(tmp == NULL); 392 D_ASSERT(tmp == NULL);
393 INIT_LIST_HEAD(&b->requests); 393 INIT_LIST_HEAD(&b->requests);
@@ -422,15 +422,15 @@ void tl_clear(struct drbd_conf *mdev)
422 struct list_head *le, *tle; 422 struct list_head *le, *tle;
423 struct drbd_request *r; 423 struct drbd_request *r;
424 424
425 spin_lock_irq(&mdev->req_lock); 425 spin_lock_irq(&mdev->tconn->req_lock);
426 426
427 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING); 427 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
428 428
429 /* we expect this list to be empty. */ 429 /* we expect this list to be empty. */
430 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 430 D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
431 431
432 /* but just in case, clean it up anyways! */ 432 /* but just in case, clean it up anyways! */
433 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) { 433 list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) {
434 r = list_entry(le, struct drbd_request, tl_requests); 434 r = list_entry(le, struct drbd_request, tl_requests);
435 /* It would be nice to complete outside of spinlock. 435 /* It would be nice to complete outside of spinlock.
436 * But this is easier for now. */ 436 * But this is easier for now. */
@@ -440,14 +440,14 @@ void tl_clear(struct drbd_conf *mdev)
440 /* ensure bit indicating barrier is required is clear */ 440 /* ensure bit indicating barrier is required is clear */
441 clear_bit(CREATE_BARRIER, &mdev->flags); 441 clear_bit(CREATE_BARRIER, &mdev->flags);
442 442
443 spin_unlock_irq(&mdev->req_lock); 443 spin_unlock_irq(&mdev->tconn->req_lock);
444} 444}
445 445
446void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) 446void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
447{ 447{
448 spin_lock_irq(&mdev->req_lock); 448 spin_lock_irq(&mdev->tconn->req_lock);
449 _tl_restart(mdev, what); 449 _tl_restart(mdev, what);
450 spin_unlock_irq(&mdev->req_lock); 450 spin_unlock_irq(&mdev->tconn->req_lock);
451} 451}
452 452
453/** 453/**
@@ -476,12 +476,12 @@ drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
476 union drbd_state os, ns; 476 union drbd_state os, ns;
477 enum drbd_state_rv rv; 477 enum drbd_state_rv rv;
478 478
479 spin_lock_irqsave(&mdev->req_lock, flags); 479 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
480 os = mdev->state; 480 os = mdev->state;
481 ns.i = (os.i & ~mask.i) | val.i; 481 ns.i = (os.i & ~mask.i) | val.i;
482 rv = _drbd_set_state(mdev, ns, f, NULL); 482 rv = _drbd_set_state(mdev, ns, f, NULL);
483 ns = mdev->state; 483 ns = mdev->state;
484 spin_unlock_irqrestore(&mdev->req_lock, flags); 484 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
485 485
486 return rv; 486 return rv;
487} 487}
@@ -522,7 +522,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
522 return SS_CW_FAILED_BY_PEER; 522 return SS_CW_FAILED_BY_PEER;
523 523
524 rv = 0; 524 rv = 0;
525 spin_lock_irqsave(&mdev->req_lock, flags); 525 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
526 os = mdev->state; 526 os = mdev->state;
527 ns.i = (os.i & ~mask.i) | val.i; 527 ns.i = (os.i & ~mask.i) | val.i;
528 ns = sanitize_state(mdev, os, ns, NULL); 528 ns = sanitize_state(mdev, os, ns, NULL);
@@ -537,7 +537,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
537 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 537 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
538 } 538 }
539 } 539 }
540 spin_unlock_irqrestore(&mdev->req_lock, flags); 540 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
541 541
542 return rv; 542 return rv;
543} 543}
@@ -566,7 +566,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
566 if (f & CS_SERIALIZE) 566 if (f & CS_SERIALIZE)
567 mutex_lock(&mdev->state_mutex); 567 mutex_lock(&mdev->state_mutex);
568 568
569 spin_lock_irqsave(&mdev->req_lock, flags); 569 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
570 os = mdev->state; 570 os = mdev->state;
571 ns.i = (os.i & ~mask.i) | val.i; 571 ns.i = (os.i & ~mask.i) | val.i;
572 ns = sanitize_state(mdev, os, ns, NULL); 572 ns = sanitize_state(mdev, os, ns, NULL);
@@ -575,7 +575,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
575 rv = is_valid_state(mdev, ns); 575 rv = is_valid_state(mdev, ns);
576 if (rv == SS_SUCCESS) 576 if (rv == SS_SUCCESS)
577 rv = is_valid_state_transition(mdev, ns, os); 577 rv = is_valid_state_transition(mdev, ns, os);
578 spin_unlock_irqrestore(&mdev->req_lock, flags); 578 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
579 579
580 if (rv < SS_SUCCESS) { 580 if (rv < SS_SUCCESS) {
581 if (f & CS_VERBOSE) 581 if (f & CS_VERBOSE)
@@ -601,7 +601,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
601 print_st_err(mdev, os, ns, rv); 601 print_st_err(mdev, os, ns, rv);
602 goto abort; 602 goto abort;
603 } 603 }
604 spin_lock_irqsave(&mdev->req_lock, flags); 604 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
605 os = mdev->state; 605 os = mdev->state;
606 ns.i = (os.i & ~mask.i) | val.i; 606 ns.i = (os.i & ~mask.i) | val.i;
607 rv = _drbd_set_state(mdev, ns, f, &done); 607 rv = _drbd_set_state(mdev, ns, f, &done);
@@ -610,7 +610,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
610 rv = _drbd_set_state(mdev, ns, f, &done); 610 rv = _drbd_set_state(mdev, ns, f, &done);
611 } 611 }
612 612
613 spin_unlock_irqrestore(&mdev->req_lock, flags); 613 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
614 614
615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
616 D_ASSERT(current != mdev->tconn->worker.task); 616 D_ASSERT(current != mdev->tconn->worker.task);
@@ -1367,9 +1367,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1367 drbd_uuid_new_current(mdev); 1367 drbd_uuid_new_current(mdev);
1368 clear_bit(NEW_CUR_UUID, &mdev->flags); 1368 clear_bit(NEW_CUR_UUID, &mdev->flags);
1369 } 1369 }
1370 spin_lock_irq(&mdev->req_lock); 1370 spin_lock_irq(&mdev->tconn->req_lock);
1371 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1371 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1372 spin_unlock_irq(&mdev->req_lock); 1372 spin_unlock_irq(&mdev->tconn->req_lock);
1373 } 1373 }
1374 /* case2: The connection was established again: */ 1374 /* case2: The connection was established again: */
1375 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1375 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
@@ -1380,11 +1380,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1380 } 1380 }
1381 1381
1382 if (what != NOTHING) { 1382 if (what != NOTHING) {
1383 spin_lock_irq(&mdev->req_lock); 1383 spin_lock_irq(&mdev->tconn->req_lock);
1384 _tl_restart(mdev, what); 1384 _tl_restart(mdev, what);
1385 nsm.i &= mdev->state.i; 1385 nsm.i &= mdev->state.i;
1386 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL); 1386 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1387 spin_unlock_irq(&mdev->req_lock); 1387 spin_unlock_irq(&mdev->tconn->req_lock);
1388 } 1388 }
1389 1389
1390 /* Became sync source. With protocol >= 96, we still need to send out 1390 /* Became sync source. With protocol >= 96, we still need to send out
@@ -2898,7 +2898,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
2898 int rv = 0; 2898 int rv = 0;
2899 2899
2900 mutex_lock(&drbd_main_mutex); 2900 mutex_lock(&drbd_main_mutex);
2901 spin_lock_irqsave(&mdev->req_lock, flags); 2901 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
2902 /* to have a stable mdev->state.role 2902 /* to have a stable mdev->state.role
2903 * and no race with updating open_cnt */ 2903 * and no race with updating open_cnt */
2904 2904
@@ -2911,7 +2911,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
2911 2911
2912 if (!rv) 2912 if (!rv)
2913 mdev->open_cnt++; 2913 mdev->open_cnt++;
2914 spin_unlock_irqrestore(&mdev->req_lock, flags); 2914 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
2915 mutex_unlock(&drbd_main_mutex); 2915 mutex_unlock(&drbd_main_mutex);
2916 2916
2917 return rv; 2917 return rv;
@@ -2990,7 +2990,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2990 spin_lock_init(&mdev->tconn->meta.work.q_lock); 2990 spin_lock_init(&mdev->tconn->meta.work.q_lock);
2991 2991
2992 spin_lock_init(&mdev->al_lock); 2992 spin_lock_init(&mdev->al_lock);
2993 spin_lock_init(&mdev->req_lock); 2993 spin_lock_init(&mdev->tconn->req_lock);
2994 spin_lock_init(&mdev->peer_seq_lock); 2994 spin_lock_init(&mdev->peer_seq_lock);
2995 spin_lock_init(&mdev->epoch_lock); 2995 spin_lock_init(&mdev->epoch_lock);
2996 2996
@@ -3451,7 +3451,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3451 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 3451 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3452 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3452 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3453 blk_queue_merge_bvec(q, drbd_merge_bvec); 3453 blk_queue_merge_bvec(q, drbd_merge_bvec);
3454 q->queue_lock = &mdev->req_lock; 3454 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
3455 3455
3456 mdev->md_io_page = alloc_page(GFP_KERNEL); 3456 mdev->md_io_page = alloc_page(GFP_KERNEL);
3457 if (!mdev->md_io_page) 3457 if (!mdev->md_io_page)
@@ -3784,14 +3784,14 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3784 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); 3784 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3785 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3785 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3786 3786
3787 spin_lock_irq(&mdev->req_lock); 3787 spin_lock_irq(&mdev->tconn->req_lock);
3788 if (mdev->state.conn < C_CONNECTED) { 3788 if (mdev->state.conn < C_CONNECTED) {
3789 int peer; 3789 int peer;
3790 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3790 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3791 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); 3791 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3792 mdev->peer_max_bio_size = peer; 3792 mdev->peer_max_bio_size = peer;
3793 } 3793 }
3794 spin_unlock_irq(&mdev->req_lock); 3794 spin_unlock_irq(&mdev->tconn->req_lock);
3795 3795
3796 if (mdev->sync_conf.al_extents < 7) 3796 if (mdev->sync_conf.al_extents < 7)
3797 mdev->sync_conf.al_extents = 127; 3797 mdev->sync_conf.al_extents = 127;
@@ -4046,13 +4046,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4046 mdev->bm_io_work.why = why; 4046 mdev->bm_io_work.why = why;
4047 mdev->bm_io_work.flags = flags; 4047 mdev->bm_io_work.flags = flags;
4048 4048
4049 spin_lock_irq(&mdev->req_lock); 4049 spin_lock_irq(&mdev->tconn->req_lock);
4050 set_bit(BITMAP_IO, &mdev->flags); 4050 set_bit(BITMAP_IO, &mdev->flags);
4051 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 4051 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4052 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 4052 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4053 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); 4053 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
4054 } 4054 }
4055 spin_unlock_irq(&mdev->req_lock); 4055 spin_unlock_irq(&mdev->tconn->req_lock);
4056} 4056}
4057 4057
4058/** 4058/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index a9ede8fc8880..4eaf81a463b5 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -287,13 +287,13 @@ static int _try_outdate_peer_async(void *data)
287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid, 287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
288 therefore we have to have the pre state change check here. 288 therefore we have to have the pre state change check here.
289 */ 289 */
290 spin_lock_irq(&mdev->req_lock); 290 spin_lock_irq(&mdev->tconn->req_lock);
291 ns = mdev->state; 291 ns = mdev->state;
292 if (ns.conn < C_WF_REPORT_PARAMS) { 292 if (ns.conn < C_WF_REPORT_PARAMS) {
293 ns.pdsk = nps; 293 ns.pdsk = nps;
294 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 294 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
295 } 295 }
296 spin_unlock_irq(&mdev->req_lock); 296 spin_unlock_irq(&mdev->tconn->req_lock);
297 297
298 return 0; 298 return 0;
299} 299}
@@ -884,7 +884,7 @@ static void drbd_reconfig_start(struct drbd_conf *mdev)
884 * wakes potential waiters */ 884 * wakes potential waiters */
885static void drbd_reconfig_done(struct drbd_conf *mdev) 885static void drbd_reconfig_done(struct drbd_conf *mdev)
886{ 886{
887 spin_lock_irq(&mdev->req_lock); 887 spin_lock_irq(&mdev->tconn->req_lock);
888 if (mdev->state.disk == D_DISKLESS && 888 if (mdev->state.disk == D_DISKLESS &&
889 mdev->state.conn == C_STANDALONE && 889 mdev->state.conn == C_STANDALONE &&
890 mdev->state.role == R_SECONDARY) { 890 mdev->state.role == R_SECONDARY) {
@@ -892,7 +892,7 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
892 drbd_thread_stop_nowait(&mdev->tconn->worker); 892 drbd_thread_stop_nowait(&mdev->tconn->worker);
893 } else 893 } else
894 clear_bit(CONFIG_PENDING, &mdev->flags); 894 clear_bit(CONFIG_PENDING, &mdev->flags);
895 spin_unlock_irq(&mdev->req_lock); 895 spin_unlock_irq(&mdev->tconn->req_lock);
896 wake_up(&mdev->state_wait); 896 wake_up(&mdev->state_wait);
897} 897}
898 898
@@ -909,11 +909,11 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
909 return; 909 return;
910 } 910 }
911 911
912 spin_lock_irq(&mdev->req_lock); 912 spin_lock_irq(&mdev->tconn->req_lock);
913 if (mdev->state.conn < C_CONNECTED) 913 if (mdev->state.conn < C_CONNECTED)
914 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); 914 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
915 915
916 spin_unlock_irq(&mdev->req_lock); 916 spin_unlock_irq(&mdev->tconn->req_lock);
917 917
918 if (s) 918 if (s)
919 dev_info(DEV, "Suspended AL updates\n"); 919 dev_info(DEV, "Suspended AL updates\n");
@@ -1240,7 +1240,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1240 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) 1240 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1241 drbd_suspend_al(mdev); /* IO is still suspended here... */ 1241 drbd_suspend_al(mdev); /* IO is still suspended here... */
1242 1242
1243 spin_lock_irq(&mdev->req_lock); 1243 spin_lock_irq(&mdev->tconn->req_lock);
1244 os = mdev->state; 1244 os = mdev->state;
1245 ns.i = os.i; 1245 ns.i = os.i;
1246 /* If MDF_CONSISTENT is not set go into inconsistent state, 1246 /* If MDF_CONSISTENT is not set go into inconsistent state,
@@ -1285,7 +1285,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1285 1285
1286 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1286 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1287 ns = mdev->state; 1287 ns = mdev->state;
1288 spin_unlock_irq(&mdev->req_lock); 1288 spin_unlock_irq(&mdev->tconn->req_lock);
1289 1289
1290 if (rv < SS_SUCCESS) 1290 if (rv < SS_SUCCESS)
1291 goto force_diskless_dec; 1291 goto force_diskless_dec;
@@ -1521,10 +1521,10 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1521 } 1521 }
1522 1522
1523 drbd_flush_workqueue(mdev); 1523 drbd_flush_workqueue(mdev);
1524 spin_lock_irq(&mdev->req_lock); 1524 spin_lock_irq(&mdev->tconn->req_lock);
1525 if (mdev->tconn->net_conf != NULL) { 1525 if (mdev->tconn->net_conf != NULL) {
1526 retcode = ERR_NET_CONFIGURED; 1526 retcode = ERR_NET_CONFIGURED;
1527 spin_unlock_irq(&mdev->req_lock); 1527 spin_unlock_irq(&mdev->tconn->req_lock);
1528 goto fail; 1528 goto fail;
1529 } 1529 }
1530 mdev->tconn->net_conf = new_conf; 1530 mdev->tconn->net_conf = new_conf;
@@ -1548,7 +1548,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1548 mdev->int_dig_in=int_dig_in; 1548 mdev->int_dig_in=int_dig_in;
1549 mdev->int_dig_vv=int_dig_vv; 1549 mdev->int_dig_vv=int_dig_vv;
1550 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); 1550 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
1551 spin_unlock_irq(&mdev->req_lock); 1551 spin_unlock_irq(&mdev->tconn->req_lock);
1552 1552
1553 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1553 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1554 reply->ret_code = retcode; 1554 reply->ret_code = retcode;
@@ -1582,10 +1582,10 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1582 } 1582 }
1583 1583
1584 if (dc.force) { 1584 if (dc.force) {
1585 spin_lock_irq(&mdev->req_lock); 1585 spin_lock_irq(&mdev->tconn->req_lock);
1586 if (mdev->state.conn >= C_WF_CONNECTION) 1586 if (mdev->state.conn >= C_WF_CONNECTION)
1587 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); 1587 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
1588 spin_unlock_irq(&mdev->req_lock); 1588 spin_unlock_irq(&mdev->tconn->req_lock);
1589 goto done; 1589 goto done;
1590 } 1590 }
1591 1591
@@ -1917,10 +1917,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1917 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1917 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1918 1918
1919 while (retcode == SS_NEED_CONNECTION) { 1919 while (retcode == SS_NEED_CONNECTION) {
1920 spin_lock_irq(&mdev->req_lock); 1920 spin_lock_irq(&mdev->tconn->req_lock);
1921 if (mdev->state.conn < C_CONNECTED) 1921 if (mdev->state.conn < C_CONNECTED)
1922 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1922 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1923 spin_unlock_irq(&mdev->req_lock); 1923 spin_unlock_irq(&mdev->tconn->req_lock);
1924 1924
1925 if (retcode != SS_NEED_CONNECTION) 1925 if (retcode != SS_NEED_CONNECTION)
1926 break; 1926 break;
@@ -2193,10 +2193,10 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
2193 drbd_send_uuids_skip_initial_sync(mdev); 2193 drbd_send_uuids_skip_initial_sync(mdev);
2194 _drbd_uuid_set(mdev, UI_BITMAP, 0); 2194 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2195 drbd_print_uuids(mdev, "cleared bitmap UUID"); 2195 drbd_print_uuids(mdev, "cleared bitmap UUID");
2196 spin_lock_irq(&mdev->req_lock); 2196 spin_lock_irq(&mdev->tconn->req_lock);
2197 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 2197 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2198 CS_VERBOSE, NULL); 2198 CS_VERBOSE, NULL);
2199 spin_unlock_irq(&mdev->req_lock); 2199 spin_unlock_irq(&mdev->tconn->req_lock);
2200 } 2200 }
2201 } 2201 }
2202 2202
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 27a8363510dd..af968a0bae07 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -210,9 +210,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed); 210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t; 211 struct drbd_epoch_entry *e, *t;
212 212
213 spin_lock_irq(&mdev->req_lock); 213 spin_lock_irq(&mdev->tconn->req_lock);
214 reclaim_net_ee(mdev, &reclaimed); 214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock); 215 spin_unlock_irq(&mdev->tconn->req_lock);
216 216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list) 217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e); 218 drbd_free_net_ee(mdev, e);
@@ -269,7 +269,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
269} 269}
270 270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 272 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
273 * Either links the page chain back to the global pool, 273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */ 274 * or returns all pages to the system. */
275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) 275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
@@ -371,9 +371,9 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
371 int count = 0; 371 int count = 0;
372 int is_net = list == &mdev->net_ee; 372 int is_net = list == &mdev->net_ee;
373 373
374 spin_lock_irq(&mdev->req_lock); 374 spin_lock_irq(&mdev->tconn->req_lock);
375 list_splice_init(list, &work_list); 375 list_splice_init(list, &work_list);
376 spin_unlock_irq(&mdev->req_lock); 376 spin_unlock_irq(&mdev->tconn->req_lock);
377 377
378 list_for_each_entry_safe(e, t, &work_list, w.list) { 378 list_for_each_entry_safe(e, t, &work_list, w.list) {
379 drbd_free_some_ee(mdev, e, is_net); 379 drbd_free_some_ee(mdev, e, is_net);
@@ -399,10 +399,10 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
399 struct drbd_epoch_entry *e, *t; 399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
401 401
402 spin_lock_irq(&mdev->req_lock); 402 spin_lock_irq(&mdev->tconn->req_lock);
403 reclaim_net_ee(mdev, &reclaimed); 403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list); 404 list_splice_init(&mdev->done_ee, &work_list);
405 spin_unlock_irq(&mdev->req_lock); 405 spin_unlock_irq(&mdev->tconn->req_lock);
406 406
407 list_for_each_entry_safe(e, t, &reclaimed, w.list) 407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
408 drbd_free_net_ee(mdev, e); 408 drbd_free_net_ee(mdev, e);
@@ -429,18 +429,18 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
429 * and calling prepare_to_wait in the fast path */ 429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) { 430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
432 spin_unlock_irq(&mdev->req_lock); 432 spin_unlock_irq(&mdev->tconn->req_lock);
433 io_schedule(); 433 io_schedule();
434 finish_wait(&mdev->ee_wait, &wait); 434 finish_wait(&mdev->ee_wait, &wait);
435 spin_lock_irq(&mdev->req_lock); 435 spin_lock_irq(&mdev->tconn->req_lock);
436 } 436 }
437} 437}
438 438
439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
440{ 440{
441 spin_lock_irq(&mdev->req_lock); 441 spin_lock_irq(&mdev->tconn->req_lock);
442 _drbd_wait_ee_list_empty(mdev, head); 442 _drbd_wait_ee_list_empty(mdev, head);
443 spin_unlock_irq(&mdev->req_lock); 443 spin_unlock_irq(&mdev->tconn->req_lock);
444} 444}
445 445
446/* see also kernel_accept; which is only present since 2.6.18. 446/* see also kernel_accept; which is only present since 2.6.18.
@@ -1452,9 +1452,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1452 1452
1453 e->w.cb = e_end_resync_block; 1453 e->w.cb = e_end_resync_block;
1454 1454
1455 spin_lock_irq(&mdev->req_lock); 1455 spin_lock_irq(&mdev->tconn->req_lock);
1456 list_add(&e->w.list, &mdev->sync_ee); 1456 list_add(&e->w.list, &mdev->sync_ee);
1457 spin_unlock_irq(&mdev->req_lock); 1457 spin_unlock_irq(&mdev->tconn->req_lock);
1458 1458
1459 atomic_add(data_size >> 9, &mdev->rs_sect_ev); 1459 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1460 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1460 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1462,9 +1462,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1462 1462
1463 /* don't care for the reason here */ 1463 /* don't care for the reason here */
1464 dev_err(DEV, "submit failed, triggering re-connect\n"); 1464 dev_err(DEV, "submit failed, triggering re-connect\n");
1465 spin_lock_irq(&mdev->req_lock); 1465 spin_lock_irq(&mdev->tconn->req_lock);
1466 list_del(&e->w.list); 1466 list_del(&e->w.list);
1467 spin_unlock_irq(&mdev->req_lock); 1467 spin_unlock_irq(&mdev->tconn->req_lock);
1468 1468
1469 drbd_free_ee(mdev, e); 1469 drbd_free_ee(mdev, e);
1470fail: 1470fail:
@@ -1498,9 +1498,9 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
1498 1498
1499 sector = be64_to_cpu(p->sector); 1499 sector = be64_to_cpu(p->sector);
1500 1500
1501 spin_lock_irq(&mdev->req_lock); 1501 spin_lock_irq(&mdev->tconn->req_lock);
1502 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); 1502 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1503 spin_unlock_irq(&mdev->req_lock); 1503 spin_unlock_irq(&mdev->tconn->req_lock);
1504 if (unlikely(!req)) 1504 if (unlikely(!req))
1505 return false; 1505 return false;
1506 1506
@@ -1574,11 +1574,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1574 /* we delete from the conflict detection hash _after_ we sent out the 1574 /* we delete from the conflict detection hash _after_ we sent out the
1575 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1575 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1576 if (mdev->tconn->net_conf->two_primaries) { 1576 if (mdev->tconn->net_conf->two_primaries) {
1577 spin_lock_irq(&mdev->req_lock); 1577 spin_lock_irq(&mdev->tconn->req_lock);
1578 D_ASSERT(!drbd_interval_empty(&e->i)); 1578 D_ASSERT(!drbd_interval_empty(&e->i));
1579 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1579 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1580 drbd_clear_interval(&e->i); 1580 drbd_clear_interval(&e->i);
1581 spin_unlock_irq(&mdev->req_lock); 1581 spin_unlock_irq(&mdev->tconn->req_lock);
1582 } else 1582 } else
1583 D_ASSERT(drbd_interval_empty(&e->i)); 1583 D_ASSERT(drbd_interval_empty(&e->i));
1584 1584
@@ -1595,11 +1595,11 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
1595 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 1595 D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
1596 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1596 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1597 1597
1598 spin_lock_irq(&mdev->req_lock); 1598 spin_lock_irq(&mdev->tconn->req_lock);
1599 D_ASSERT(!drbd_interval_empty(&e->i)); 1599 D_ASSERT(!drbd_interval_empty(&e->i));
1600 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1600 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1601 drbd_clear_interval(&e->i); 1601 drbd_clear_interval(&e->i);
1602 spin_unlock_irq(&mdev->req_lock); 1602 spin_unlock_irq(&mdev->tconn->req_lock);
1603 1603
1604 dec_unacked(mdev); 1604 dec_unacked(mdev);
1605 1605
@@ -1718,7 +1718,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1718 1718
1719 /* I'm the receiver, I do hold a net_cnt reference. */ 1719 /* I'm the receiver, I do hold a net_cnt reference. */
1720 if (!mdev->tconn->net_conf->two_primaries) { 1720 if (!mdev->tconn->net_conf->two_primaries) {
1721 spin_lock_irq(&mdev->req_lock); 1721 spin_lock_irq(&mdev->tconn->req_lock);
1722 } else { 1722 } else {
1723 /* don't get the req_lock yet, 1723 /* don't get the req_lock yet,
1724 * we may sleep in drbd_wait_peer_seq */ 1724 * we may sleep in drbd_wait_peer_seq */
@@ -1765,7 +1765,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1765 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1765 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1766 goto out_interrupted; 1766 goto out_interrupted;
1767 1767
1768 spin_lock_irq(&mdev->req_lock); 1768 spin_lock_irq(&mdev->tconn->req_lock);
1769 1769
1770 drbd_insert_interval(&mdev->epoch_entries, &e->i); 1770 drbd_insert_interval(&mdev->epoch_entries, &e->i);
1771 1771
@@ -1805,7 +1805,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1805 e->w.cb = e_send_discard_ack; 1805 e->w.cb = e_send_discard_ack;
1806 list_add_tail(&e->w.list, &mdev->done_ee); 1806 list_add_tail(&e->w.list, &mdev->done_ee);
1807 1807
1808 spin_unlock_irq(&mdev->req_lock); 1808 spin_unlock_irq(&mdev->tconn->req_lock);
1809 1809
1810 /* we could probably send that P_DISCARD_ACK ourselves, 1810 /* we could probably send that P_DISCARD_ACK ourselves,
1811 * but I don't like the receiver using the msock */ 1811 * but I don't like the receiver using the msock */
@@ -1820,13 +1820,13 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1820 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1820 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1821 drbd_clear_interval(&e->i); 1821 drbd_clear_interval(&e->i);
1822 1822
1823 spin_unlock_irq(&mdev->req_lock); 1823 spin_unlock_irq(&mdev->tconn->req_lock);
1824 1824
1825 finish_wait(&mdev->misc_wait, &wait); 1825 finish_wait(&mdev->misc_wait, &wait);
1826 goto out_interrupted; 1826 goto out_interrupted;
1827 } 1827 }
1828 1828
1829 spin_unlock_irq(&mdev->req_lock); 1829 spin_unlock_irq(&mdev->tconn->req_lock);
1830 if (first) { 1830 if (first) {
1831 first = 0; 1831 first = 0;
1832 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1832 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
@@ -1837,13 +1837,13 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1837 D_ASSERT(have_unacked == 0); 1837 D_ASSERT(have_unacked == 0);
1838 } 1838 }
1839 schedule(); 1839 schedule();
1840 spin_lock_irq(&mdev->req_lock); 1840 spin_lock_irq(&mdev->tconn->req_lock);
1841 } 1841 }
1842 finish_wait(&mdev->misc_wait, &wait); 1842 finish_wait(&mdev->misc_wait, &wait);
1843 } 1843 }
1844 1844
1845 list_add(&e->w.list, &mdev->active_ee); 1845 list_add(&e->w.list, &mdev->active_ee);
1846 spin_unlock_irq(&mdev->req_lock); 1846 spin_unlock_irq(&mdev->tconn->req_lock);
1847 1847
1848 switch (mdev->tconn->net_conf->wire_protocol) { 1848 switch (mdev->tconn->net_conf->wire_protocol) {
1849 case DRBD_PROT_C: 1849 case DRBD_PROT_C:
@@ -1874,11 +1874,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1874 1874
1875 /* don't care for the reason here */ 1875 /* don't care for the reason here */
1876 dev_err(DEV, "submit failed, triggering re-connect\n"); 1876 dev_err(DEV, "submit failed, triggering re-connect\n");
1877 spin_lock_irq(&mdev->req_lock); 1877 spin_lock_irq(&mdev->tconn->req_lock);
1878 list_del(&e->w.list); 1878 list_del(&e->w.list);
1879 drbd_remove_interval(&mdev->epoch_entries, &e->i); 1879 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1880 drbd_clear_interval(&e->i); 1880 drbd_clear_interval(&e->i);
1881 spin_unlock_irq(&mdev->req_lock); 1881 spin_unlock_irq(&mdev->tconn->req_lock);
1882 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1882 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1883 drbd_al_complete_io(mdev, e->i.sector); 1883 drbd_al_complete_io(mdev, e->i.sector);
1884 1884
@@ -2122,18 +2122,18 @@ submit_for_resync:
2122 2122
2123submit: 2123submit:
2124 inc_unacked(mdev); 2124 inc_unacked(mdev);
2125 spin_lock_irq(&mdev->req_lock); 2125 spin_lock_irq(&mdev->tconn->req_lock);
2126 list_add_tail(&e->w.list, &mdev->read_ee); 2126 list_add_tail(&e->w.list, &mdev->read_ee);
2127 spin_unlock_irq(&mdev->req_lock); 2127 spin_unlock_irq(&mdev->tconn->req_lock);
2128 2128
2129 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2129 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2130 return true; 2130 return true;
2131 2131
2132 /* don't care for the reason here */ 2132 /* don't care for the reason here */
2133 dev_err(DEV, "submit failed, triggering re-connect\n"); 2133 dev_err(DEV, "submit failed, triggering re-connect\n");
2134 spin_lock_irq(&mdev->req_lock); 2134 spin_lock_irq(&mdev->tconn->req_lock);
2135 list_del(&e->w.list); 2135 list_del(&e->w.list);
2136 spin_unlock_irq(&mdev->req_lock); 2136 spin_unlock_irq(&mdev->tconn->req_lock);
2137 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2137 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2138 2138
2139out_free_e: 2139out_free_e:
@@ -3183,10 +3183,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3183 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3183 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3184 } 3184 }
3185 3185
3186 spin_lock_irq(&mdev->req_lock); 3186 spin_lock_irq(&mdev->tconn->req_lock);
3187 retry: 3187 retry:
3188 os = ns = mdev->state; 3188 os = ns = mdev->state;
3189 spin_unlock_irq(&mdev->req_lock); 3189 spin_unlock_irq(&mdev->tconn->req_lock);
3190 3190
3191 /* peer says his disk is uptodate, while we think it is inconsistent, 3191 /* peer says his disk is uptodate, while we think it is inconsistent,
3192 * and this happens while we think we have a sync going on. */ 3192 * and this happens while we think we have a sync going on. */
@@ -3270,7 +3270,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3270 } 3270 }
3271 } 3271 }
3272 3272
3273 spin_lock_irq(&mdev->req_lock); 3273 spin_lock_irq(&mdev->tconn->req_lock);
3274 if (mdev->state.i != os.i) 3274 if (mdev->state.i != os.i)
3275 goto retry; 3275 goto retry;
3276 clear_bit(CONSIDER_RESYNC, &mdev->flags); 3276 clear_bit(CONSIDER_RESYNC, &mdev->flags);
@@ -3284,7 +3284,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3284 test_bit(NEW_CUR_UUID, &mdev->flags)) { 3284 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3285 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3285 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3286 for temporal network outages! */ 3286 for temporal network outages! */
3287 spin_unlock_irq(&mdev->req_lock); 3287 spin_unlock_irq(&mdev->tconn->req_lock);
3288 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3288 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3289 tl_clear(mdev); 3289 tl_clear(mdev);
3290 drbd_uuid_new_current(mdev); 3290 drbd_uuid_new_current(mdev);
@@ -3294,7 +3294,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3294 } 3294 }
3295 rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3295 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3296 ns = mdev->state; 3296 ns = mdev->state;
3297 spin_unlock_irq(&mdev->req_lock); 3297 spin_unlock_irq(&mdev->tconn->req_lock);
3298 3298
3299 if (rv < SS_SUCCESS) { 3299 if (rv < SS_SUCCESS) {
3300 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3300 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3772,11 +3772,11 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3772 drbd_free_sock(mdev); 3772 drbd_free_sock(mdev);
3773 3773
3774 /* wait for current activity to cease. */ 3774 /* wait for current activity to cease. */
3775 spin_lock_irq(&mdev->req_lock); 3775 spin_lock_irq(&mdev->tconn->req_lock);
3776 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3776 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3777 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3777 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3778 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3778 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3779 spin_unlock_irq(&mdev->req_lock); 3779 spin_unlock_irq(&mdev->tconn->req_lock);
3780 3780
3781 /* We do not have data structures that would allow us to 3781 /* We do not have data structures that would allow us to
3782 * get the rs_pending_cnt down to 0 again. 3782 * get the rs_pending_cnt down to 0 again.
@@ -3828,7 +3828,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3828 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) 3828 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3829 drbd_try_outdate_peer_async(mdev); 3829 drbd_try_outdate_peer_async(mdev);
3830 3830
3831 spin_lock_irq(&mdev->req_lock); 3831 spin_lock_irq(&mdev->tconn->req_lock);
3832 os = mdev->state; 3832 os = mdev->state;
3833 if (os.conn >= C_UNCONNECTED) { 3833 if (os.conn >= C_UNCONNECTED) {
3834 /* Do not restart in case we are C_DISCONNECTING */ 3834 /* Do not restart in case we are C_DISCONNECTING */
@@ -3836,7 +3836,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3836 ns.conn = C_UNCONNECTED; 3836 ns.conn = C_UNCONNECTED;
3837 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3837 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3838 } 3838 }
3839 spin_unlock_irq(&mdev->req_lock); 3839 spin_unlock_irq(&mdev->tconn->req_lock);
3840 3840
3841 if (os.conn == C_DISCONNECTING) { 3841 if (os.conn == C_DISCONNECTING) {
3842 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0); 3842 wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
@@ -4245,14 +4245,14 @@ validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4245 struct drbd_request *req; 4245 struct drbd_request *req;
4246 struct bio_and_error m; 4246 struct bio_and_error m;
4247 4247
4248 spin_lock_irq(&mdev->req_lock); 4248 spin_lock_irq(&mdev->tconn->req_lock);
4249 req = find_request(mdev, root, id, sector, missing_ok, func); 4249 req = find_request(mdev, root, id, sector, missing_ok, func);
4250 if (unlikely(!req)) { 4250 if (unlikely(!req)) {
4251 spin_unlock_irq(&mdev->req_lock); 4251 spin_unlock_irq(&mdev->tconn->req_lock);
4252 return false; 4252 return false;
4253 } 4253 }
4254 __req_mod(req, what, &m); 4254 __req_mod(req, what, &m);
4255 spin_unlock_irq(&mdev->req_lock); 4255 spin_unlock_irq(&mdev->tconn->req_lock);
4256 4256
4257 if (m.bio) 4257 if (m.bio)
4258 complete_master_bio(mdev, &m); 4258 complete_master_bio(mdev, &m);
@@ -4518,9 +4518,9 @@ int drbd_asender(struct drbd_thread *thi)
4518 goto reconnect; 4518 goto reconnect;
4519 /* to avoid race with newly queued ACKs */ 4519 /* to avoid race with newly queued ACKs */
4520 set_bit(SIGNAL_ASENDER, &mdev->flags); 4520 set_bit(SIGNAL_ASENDER, &mdev->flags);
4521 spin_lock_irq(&mdev->req_lock); 4521 spin_lock_irq(&mdev->tconn->req_lock);
4522 empty = list_empty(&mdev->done_ee); 4522 empty = list_empty(&mdev->done_ee);
4523 spin_unlock_irq(&mdev->req_lock); 4523 spin_unlock_irq(&mdev->tconn->req_lock);
4524 /* new ack may have been queued right here, 4524 /* new ack may have been queued right here,
4525 * but then there is also a signal pending, 4525 * but then there is also a signal pending,
4526 * and we start over... */ 4526 * and we start over... */
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index c871ef2414fa..74179f7986e1 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -120,7 +120,7 @@ static void queue_barrier(struct drbd_conf *mdev)
120 if (test_bit(CREATE_BARRIER, &mdev->flags)) 120 if (test_bit(CREATE_BARRIER, &mdev->flags))
121 return; 121 return;
122 122
123 b = mdev->newest_tle; 123 b = mdev->tconn->newest_tle;
124 b->w.cb = w_send_barrier; 124 b->w.cb = w_send_barrier;
125 /* inc_ap_pending done here, so we won't 125 /* inc_ap_pending done here, so we won't
126 * get imbalanced on connection loss. 126 * get imbalanced on connection loss.
@@ -144,7 +144,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
144 */ 144 */
145 if (mdev->state.conn >= C_CONNECTED && 145 if (mdev->state.conn >= C_CONNECTED &&
146 (s & RQ_NET_SENT) != 0 && 146 (s & RQ_NET_SENT) != 0 &&
147 req->epoch == mdev->newest_tle->br_number) 147 req->epoch == mdev->tconn->newest_tle->br_number)
148 queue_barrier(mdev); 148 queue_barrier(mdev);
149 149
150 /* we need to do the conflict detection stuff, 150 /* we need to do the conflict detection stuff,
@@ -516,10 +516,10 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
516 * just after it grabs the req_lock */ 516 * just after it grabs the req_lock */
517 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); 517 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
518 518
519 req->epoch = mdev->newest_tle->br_number; 519 req->epoch = mdev->tconn->newest_tle->br_number;
520 520
521 /* increment size of current epoch */ 521 /* increment size of current epoch */
522 mdev->newest_tle->n_writes++; 522 mdev->tconn->newest_tle->n_writes++;
523 523
524 /* queue work item to send data */ 524 /* queue work item to send data */
525 D_ASSERT(req->rq_state & RQ_NET_PENDING); 525 D_ASSERT(req->rq_state & RQ_NET_PENDING);
@@ -528,7 +528,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
528 drbd_queue_work(&mdev->tconn->data.work, &req->w); 528 drbd_queue_work(&mdev->tconn->data.work, &req->w);
529 529
530 /* close the epoch, in case it outgrew the limit */ 530 /* close the epoch, in case it outgrew the limit */
531 if (mdev->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size) 531 if (mdev->tconn->newest_tle->n_writes >= mdev->tconn->net_conf->max_epoch_size)
532 queue_barrier(mdev); 532 queue_barrier(mdev);
533 533
534 break; 534 break;
@@ -693,7 +693,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
693 * this is bad, because if the connection is lost now, 693 * this is bad, because if the connection is lost now,
694 * we won't be able to clean them up... */ 694 * we won't be able to clean them up... */
695 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); 695 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
696 list_move(&req->tl_requests, &mdev->out_of_sequence_requests); 696 list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
697 } 697 }
698 if ((req->rq_state & RQ_NET_MASK) != 0) { 698 if ((req->rq_state & RQ_NET_MASK) != 0) {
699 req->rq_state |= RQ_NET_DONE; 699 req->rq_state |= RQ_NET_DONE;
@@ -834,7 +834,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
834 * spinlock, and grabbing the spinlock. 834 * spinlock, and grabbing the spinlock.
835 * if we lost that race, we retry. */ 835 * if we lost that race, we retry. */
836 if (rw == WRITE && (remote || send_oos) && 836 if (rw == WRITE && (remote || send_oos) &&
837 mdev->unused_spare_tle == NULL && 837 mdev->tconn->unused_spare_tle == NULL &&
838 test_bit(CREATE_BARRIER, &mdev->flags)) { 838 test_bit(CREATE_BARRIER, &mdev->flags)) {
839allocate_barrier: 839allocate_barrier:
840 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); 840 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
@@ -846,7 +846,7 @@ allocate_barrier:
846 } 846 }
847 847
848 /* GOOD, everything prepared, grab the spin_lock */ 848 /* GOOD, everything prepared, grab the spin_lock */
849 spin_lock_irq(&mdev->req_lock); 849 spin_lock_irq(&mdev->tconn->req_lock);
850 850
851 if (is_susp(mdev->state)) { 851 if (is_susp(mdev->state)) {
852 /* If we got suspended, use the retry mechanism of 852 /* If we got suspended, use the retry mechanism of
@@ -854,7 +854,7 @@ allocate_barrier:
854 bio. In the next call to drbd_make_request 854 bio. In the next call to drbd_make_request
855 we sleep in inc_ap_bio() */ 855 we sleep in inc_ap_bio() */
856 ret = 1; 856 ret = 1;
857 spin_unlock_irq(&mdev->req_lock); 857 spin_unlock_irq(&mdev->tconn->req_lock);
858 goto fail_free_complete; 858 goto fail_free_complete;
859 } 859 }
860 860
@@ -867,21 +867,21 @@ allocate_barrier:
867 dev_warn(DEV, "lost connection while grabbing the req_lock!\n"); 867 dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
868 if (!(local || remote)) { 868 if (!(local || remote)) {
869 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 869 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
870 spin_unlock_irq(&mdev->req_lock); 870 spin_unlock_irq(&mdev->tconn->req_lock);
871 goto fail_free_complete; 871 goto fail_free_complete;
872 } 872 }
873 } 873 }
874 874
875 if (b && mdev->unused_spare_tle == NULL) { 875 if (b && mdev->tconn->unused_spare_tle == NULL) {
876 mdev->unused_spare_tle = b; 876 mdev->tconn->unused_spare_tle = b;
877 b = NULL; 877 b = NULL;
878 } 878 }
879 if (rw == WRITE && (remote || send_oos) && 879 if (rw == WRITE && (remote || send_oos) &&
880 mdev->unused_spare_tle == NULL && 880 mdev->tconn->unused_spare_tle == NULL &&
881 test_bit(CREATE_BARRIER, &mdev->flags)) { 881 test_bit(CREATE_BARRIER, &mdev->flags)) {
882 /* someone closed the current epoch 882 /* someone closed the current epoch
883 * while we were grabbing the spinlock */ 883 * while we were grabbing the spinlock */
884 spin_unlock_irq(&mdev->req_lock); 884 spin_unlock_irq(&mdev->tconn->req_lock);
885 goto allocate_barrier; 885 goto allocate_barrier;
886 } 886 }
887 887
@@ -899,10 +899,10 @@ allocate_barrier:
899 * barrier packet. To get the write ordering right, we only have to 899 * barrier packet. To get the write ordering right, we only have to
900 * make sure that, if this is a write request and it triggered a 900 * make sure that, if this is a write request and it triggered a
901 * barrier packet, this request is queued within the same spinlock. */ 901 * barrier packet, this request is queued within the same spinlock. */
902 if ((remote || send_oos) && mdev->unused_spare_tle && 902 if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
903 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 903 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
904 _tl_add_barrier(mdev, mdev->unused_spare_tle); 904 _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
905 mdev->unused_spare_tle = NULL; 905 mdev->tconn->unused_spare_tle = NULL;
906 } else { 906 } else {
907 D_ASSERT(!(remote && rw == WRITE && 907 D_ASSERT(!(remote && rw == WRITE &&
908 test_bit(CREATE_BARRIER, &mdev->flags))); 908 test_bit(CREATE_BARRIER, &mdev->flags)));
@@ -934,7 +934,7 @@ allocate_barrier:
934 if (rw == WRITE && _req_conflicts(req)) 934 if (rw == WRITE && _req_conflicts(req))
935 goto fail_conflicting; 935 goto fail_conflicting;
936 936
937 list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); 937 list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
938 938
939 /* NOTE remote first: to get the concurrent write detection right, 939 /* NOTE remote first: to get the concurrent write detection right,
940 * we must register the request before start of local IO. */ 940 * we must register the request before start of local IO. */
@@ -975,7 +975,7 @@ allocate_barrier:
975 } 975 }
976 } 976 }
977 977
978 spin_unlock_irq(&mdev->req_lock); 978 spin_unlock_irq(&mdev->tconn->req_lock);
979 kfree(b); /* if someone else has beaten us to it... */ 979 kfree(b); /* if someone else has beaten us to it... */
980 980
981 if (local) { 981 if (local) {
@@ -1008,7 +1008,7 @@ fail_conflicting:
1008 * pretend that it was successfully served right now. 1008 * pretend that it was successfully served right now.
1009 */ 1009 */
1010 _drbd_end_io_acct(mdev, req); 1010 _drbd_end_io_acct(mdev, req);
1011 spin_unlock_irq(&mdev->req_lock); 1011 spin_unlock_irq(&mdev->tconn->req_lock);
1012 if (remote) 1012 if (remote)
1013 dec_ap_pending(mdev); 1013 dec_ap_pending(mdev);
1014 /* THINK: do we want to fail it (-EIO), or pretend success? 1014 /* THINK: do we want to fail it (-EIO), or pretend success?
@@ -1188,10 +1188,10 @@ void request_timer_fn(unsigned long data)
1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS) 1188 if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
1189 return; /* Recurring timer stopped */ 1189 return; /* Recurring timer stopped */
1190 1190
1191 spin_lock_irq(&mdev->req_lock); 1191 spin_lock_irq(&mdev->tconn->req_lock);
1192 le = &mdev->oldest_tle->requests; 1192 le = &mdev->tconn->oldest_tle->requests;
1193 if (list_empty(le)) { 1193 if (list_empty(le)) {
1194 spin_unlock_irq(&mdev->req_lock); 1194 spin_unlock_irq(&mdev->tconn->req_lock);
1195 mod_timer(&mdev->request_timer, jiffies + et); 1195 mod_timer(&mdev->request_timer, jiffies + et);
1196 return; 1196 return;
1197 } 1197 }
@@ -1210,5 +1210,5 @@ void request_timer_fn(unsigned long data)
1210 mod_timer(&mdev->request_timer, req->start_time + et); 1210 mod_timer(&mdev->request_timer, req->start_time + et);
1211 } 1211 }
1212 1212
1213 spin_unlock_irq(&mdev->req_lock); 1213 spin_unlock_irq(&mdev->tconn->req_lock);
1214} 1214}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 9d75647cae8f..4b0858bf2866 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -305,9 +305,9 @@ static inline int req_mod(struct drbd_request *req,
305 struct bio_and_error m; 305 struct bio_and_error m;
306 int rv; 306 int rv;
307 307
308 spin_lock_irqsave(&mdev->req_lock, flags); 308 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
309 rv = __req_mod(req, what, &m); 309 rv = __req_mod(req, what, &m);
310 spin_unlock_irqrestore(&mdev->req_lock, flags); 310 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
311 311
312 if (m.bio) 312 if (m.bio)
313 complete_master_bio(mdev, &m); 313 complete_master_bio(mdev, &m);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ec26df378845..671251af6bcf 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -85,14 +85,14 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
85 unsigned long flags = 0; 85 unsigned long flags = 0;
86 struct drbd_conf *mdev = e->mdev; 86 struct drbd_conf *mdev = e->mdev;
87 87
88 spin_lock_irqsave(&mdev->req_lock, flags); 88 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
89 mdev->read_cnt += e->i.size >> 9; 89 mdev->read_cnt += e->i.size >> 9;
90 list_del(&e->w.list); 90 list_del(&e->w.list);
91 if (list_empty(&mdev->read_ee)) 91 if (list_empty(&mdev->read_ee))
92 wake_up(&mdev->ee_wait); 92 wake_up(&mdev->ee_wait);
93 if (test_bit(__EE_WAS_ERROR, &e->flags)) 93 if (test_bit(__EE_WAS_ERROR, &e->flags))
94 __drbd_chk_io_error(mdev, false); 94 __drbd_chk_io_error(mdev, false);
95 spin_unlock_irqrestore(&mdev->req_lock, flags); 95 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
96 96
97 drbd_queue_work(&mdev->tconn->data.work, &e->w); 97 drbd_queue_work(&mdev->tconn->data.work, &e->w);
98 put_ldev(mdev); 98 put_ldev(mdev);
@@ -117,7 +117,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
117 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO; 117 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
118 block_id = e->block_id; 118 block_id = e->block_id;
119 119
120 spin_lock_irqsave(&mdev->req_lock, flags); 120 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
121 mdev->writ_cnt += e->i.size >> 9; 121 mdev->writ_cnt += e->i.size >> 9;
122 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 122 list_del(&e->w.list); /* has been on active_ee or sync_ee */
123 list_add_tail(&e->w.list, &mdev->done_ee); 123 list_add_tail(&e->w.list, &mdev->done_ee);
@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
134 134
135 if (test_bit(__EE_WAS_ERROR, &e->flags)) 135 if (test_bit(__EE_WAS_ERROR, &e->flags))
136 __drbd_chk_io_error(mdev, false); 136 __drbd_chk_io_error(mdev, false);
137 spin_unlock_irqrestore(&mdev->req_lock, flags); 137 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
138 138
139 if (block_id == ID_SYNCER) 139 if (block_id == ID_SYNCER)
140 drbd_rs_complete_io(mdev, e_sector); 140 drbd_rs_complete_io(mdev, e_sector);
@@ -220,9 +220,9 @@ void drbd_endio_pri(struct bio *bio, int error)
220 req->private_bio = ERR_PTR(error); 220 req->private_bio = ERR_PTR(error);
221 221
222 /* not req_mod(), we need irqsave here! */ 222 /* not req_mod(), we need irqsave here! */
223 spin_lock_irqsave(&mdev->req_lock, flags); 223 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
224 __req_mod(req, what, &m); 224 __req_mod(req, what, &m);
225 spin_unlock_irqrestore(&mdev->req_lock, flags); 225 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
226 226
227 if (m.bio) 227 if (m.bio)
228 complete_master_bio(mdev, &m); 228 complete_master_bio(mdev, &m);
@@ -236,13 +236,13 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
236 * but try to WRITE the P_DATA_REPLY to the failed location, 236 * but try to WRITE the P_DATA_REPLY to the failed location,
237 * to give the disk the chance to relocate that block */ 237 * to give the disk the chance to relocate that block */
238 238
239 spin_lock_irq(&mdev->req_lock); 239 spin_lock_irq(&mdev->tconn->req_lock);
240 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { 240 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
241 _req_mod(req, READ_RETRY_REMOTE_CANCELED); 241 _req_mod(req, READ_RETRY_REMOTE_CANCELED);
242 spin_unlock_irq(&mdev->req_lock); 242 spin_unlock_irq(&mdev->tconn->req_lock);
243 return 1; 243 return 1;
244 } 244 }
245 spin_unlock_irq(&mdev->req_lock); 245 spin_unlock_irq(&mdev->tconn->req_lock);
246 246
247 return w_send_read_req(mdev, w, 0); 247 return w_send_read_req(mdev, w, 0);
248} 248}
@@ -359,9 +359,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
359 goto defer; 359 goto defer;
360 360
361 e->w.cb = w_e_send_csum; 361 e->w.cb = w_e_send_csum;
362 spin_lock_irq(&mdev->req_lock); 362 spin_lock_irq(&mdev->tconn->req_lock);
363 list_add(&e->w.list, &mdev->read_ee); 363 list_add(&e->w.list, &mdev->read_ee);
364 spin_unlock_irq(&mdev->req_lock); 364 spin_unlock_irq(&mdev->tconn->req_lock);
365 365
366 atomic_add(size >> 9, &mdev->rs_sect_ev); 366 atomic_add(size >> 9, &mdev->rs_sect_ev);
367 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) 367 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
@@ -371,9 +371,9 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
371 * because bio_add_page failed (probably broken lower level driver), 371 * because bio_add_page failed (probably broken lower level driver),
372 * retry may or may not help. 372 * retry may or may not help.
373 * If it does not, you may need to force disconnect. */ 373 * If it does not, you may need to force disconnect. */
374 spin_lock_irq(&mdev->req_lock); 374 spin_lock_irq(&mdev->tconn->req_lock);
375 list_del(&e->w.list); 375 list_del(&e->w.list);
376 spin_unlock_irq(&mdev->req_lock); 376 spin_unlock_irq(&mdev->tconn->req_lock);
377 377
378 drbd_free_ee(mdev, e); 378 drbd_free_ee(mdev, e);
379defer: 379defer:
@@ -793,7 +793,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
793 793
794 ping_peer(mdev); 794 ping_peer(mdev);
795 795
796 spin_lock_irq(&mdev->req_lock); 796 spin_lock_irq(&mdev->tconn->req_lock);
797 os = mdev->state; 797 os = mdev->state;
798 798
799 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 799 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -882,7 +882,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
882 882
883 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 883 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
884out_unlock: 884out_unlock:
885 spin_unlock_irq(&mdev->req_lock); 885 spin_unlock_irq(&mdev->tconn->req_lock);
886 put_ldev(mdev); 886 put_ldev(mdev);
887out: 887out:
888 mdev->rs_total = 0; 888 mdev->rs_total = 0;
@@ -907,9 +907,9 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent
907 int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 907 int i = (e->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
908 atomic_add(i, &mdev->pp_in_use_by_net); 908 atomic_add(i, &mdev->pp_in_use_by_net);
909 atomic_sub(i, &mdev->pp_in_use); 909 atomic_sub(i, &mdev->pp_in_use);
910 spin_lock_irq(&mdev->req_lock); 910 spin_lock_irq(&mdev->tconn->req_lock);
911 list_add_tail(&e->w.list, &mdev->net_ee); 911 list_add_tail(&e->w.list, &mdev->net_ee);
912 spin_unlock_irq(&mdev->req_lock); 912 spin_unlock_irq(&mdev->tconn->req_lock);
913 wake_up(&drbd_pp_wait); 913 wake_up(&drbd_pp_wait);
914 } else 914 } else
915 drbd_free_ee(mdev, e); 915 drbd_free_ee(mdev, e);
@@ -1210,10 +1210,10 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1210 * actually, this race was harmless, since we only try to send the 1210 * actually, this race was harmless, since we only try to send the
1211 * barrier packet here, and otherwise do nothing with the object. 1211 * barrier packet here, and otherwise do nothing with the object.
1212 * but compare with the head of w_clear_epoch */ 1212 * but compare with the head of w_clear_epoch */
1213 spin_lock_irq(&mdev->req_lock); 1213 spin_lock_irq(&mdev->tconn->req_lock);
1214 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED) 1214 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1215 cancel = 1; 1215 cancel = 1;
1216 spin_unlock_irq(&mdev->req_lock); 1216 spin_unlock_irq(&mdev->tconn->req_lock);
1217 if (cancel) 1217 if (cancel)
1218 return 1; 1218 return 1;
1219 1219