aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-07-07 08:19:42 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:46:46 -0500
commit0500813fe0c9a617ace86d91344e36839050dad6 (patch)
treee866ddce790b671cea8dd2034a3de6f08d50f1ff
parent3ab706fe52a5cc12b021d7861943581db766a171 (diff)
drbd: Move conf_mutex from connection to resource
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_nl.c50
-rw-r--r--drivers/block/drbd/drbd_receiver.c116
-rw-r--r--drivers/block/drbd/drbd_req.c18
-rw-r--r--drivers/block/drbd/drbd_req.h4
-rw-r--r--drivers/block/drbd/drbd_state.c48
-rw-r--r--drivers/block/drbd/drbd_worker.c34
8 files changed, 160 insertions, 161 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 34ba7439abe1..3f379ff779b2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -518,7 +518,7 @@ struct drbd_backing_dev {
518 struct block_device *backing_bdev; 518 struct block_device *backing_bdev;
519 struct block_device *md_bdev; 519 struct block_device *md_bdev;
520 struct drbd_md md; 520 struct drbd_md md;
521 struct disk_conf *disk_conf; /* RCU, for updates: first_peer_device(device)->connection->conf_update */ 521 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
522 sector_t known_size; /* last known size of that backing device */ 522 sector_t known_size; /* last known size of that backing device */
523}; 523};
524 524
@@ -578,6 +578,8 @@ struct drbd_resource {
578 struct list_head connections; 578 struct list_head connections;
579 struct list_head resources; 579 struct list_head resources;
580 struct res_opts res_opts; 580 struct res_opts res_opts;
581 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
582 spinlock_t req_lock;
581}; 583};
582 584
583struct drbd_connection { 585struct drbd_connection {
@@ -594,7 +596,6 @@ struct drbd_connection {
594 596
595 unsigned long flags; 597 unsigned long flags;
596 struct net_conf *net_conf; /* content protected by rcu */ 598 struct net_conf *net_conf; /* content protected by rcu */
597 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
598 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ 599 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
599 600
600 struct sockaddr_storage my_addr; 601 struct sockaddr_storage my_addr;
@@ -608,8 +609,6 @@ struct drbd_connection {
608 unsigned long last_received; /* in jiffies, either socket */ 609 unsigned long last_received; /* in jiffies, either socket */
609 unsigned int ko_count; 610 unsigned int ko_count;
610 611
611 spinlock_t req_lock;
612
613 struct list_head transfer_log; /* all requests not yet fully processed */ 612 struct list_head transfer_log; /* all requests not yet fully processed */
614 613
615 struct crypto_hash *cram_hmac_tfm; 614 struct crypto_hash *cram_hmac_tfm;
@@ -1595,9 +1594,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
1595{ 1594{
1596 if (error) { 1595 if (error) {
1597 unsigned long flags; 1596 unsigned long flags;
1598 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 1597 spin_lock_irqsave(&device->resource->req_lock, flags);
1599 __drbd_chk_io_error_(device, forcedetach, where); 1598 __drbd_chk_io_error_(device, forcedetach, where);
1600 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 1599 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1601 } 1600 }
1602} 1601}
1603 1602
@@ -2069,11 +2068,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
2069{ 2068{
2070 bool rv = false; 2069 bool rv = false;
2071 2070
2072 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2071 spin_lock_irq(&device->resource->req_lock);
2073 rv = may_inc_ap_bio(device); 2072 rv = may_inc_ap_bio(device);
2074 if (rv) 2073 if (rv)
2075 atomic_inc(&device->ap_bio_cnt); 2074 atomic_inc(&device->ap_bio_cnt);
2076 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2075 spin_unlock_irq(&device->resource->req_lock);
2077 2076
2078 return rv; 2077 return rv;
2079} 2078}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 54df98fa2881..fc439605aa69 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -198,7 +198,7 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
198 int expect_epoch = 0; 198 int expect_epoch = 0;
199 int expect_size = 0; 199 int expect_size = 0;
200 200
201 spin_lock_irq(&connection->req_lock); 201 spin_lock_irq(&connection->resource->req_lock);
202 202
203 /* find oldest not yet barrier-acked write request, 203 /* find oldest not yet barrier-acked write request,
204 * count writes in its epoch. */ 204 * count writes in its epoch. */
@@ -255,12 +255,12 @@ void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
255 break; 255 break;
256 _req_mod(req, BARRIER_ACKED); 256 _req_mod(req, BARRIER_ACKED);
257 } 257 }
258 spin_unlock_irq(&connection->req_lock); 258 spin_unlock_irq(&connection->resource->req_lock);
259 259
260 return; 260 return;
261 261
262bail: 262bail:
263 spin_unlock_irq(&connection->req_lock); 263 spin_unlock_irq(&connection->resource->req_lock);
264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
265} 265}
266 266
@@ -284,9 +284,9 @@ void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
284 284
285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what) 285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
286{ 286{
287 spin_lock_irq(&connection->req_lock); 287 spin_lock_irq(&connection->resource->req_lock);
288 _tl_restart(connection, what); 288 _tl_restart(connection, what);
289 spin_unlock_irq(&connection->req_lock); 289 spin_unlock_irq(&connection->resource->req_lock);
290} 290}
291 291
292/** 292/**
@@ -311,7 +311,7 @@ void tl_abort_disk_io(struct drbd_device *device)
311 struct drbd_connection *connection = first_peer_device(device)->connection; 311 struct drbd_connection *connection = first_peer_device(device)->connection;
312 struct drbd_request *req, *r; 312 struct drbd_request *req, *r;
313 313
314 spin_lock_irq(&connection->req_lock); 314 spin_lock_irq(&connection->resource->req_lock);
315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) { 315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
316 if (!(req->rq_state & RQ_LOCAL_PENDING)) 316 if (!(req->rq_state & RQ_LOCAL_PENDING))
317 continue; 317 continue;
@@ -319,7 +319,7 @@ void tl_abort_disk_io(struct drbd_device *device)
319 continue; 319 continue;
320 _req_mod(req, ABORT_DISK_IO); 320 _req_mod(req, ABORT_DISK_IO);
321 } 321 }
322 spin_unlock_irq(&connection->req_lock); 322 spin_unlock_irq(&connection->resource->req_lock);
323} 323}
324 324
325static int drbd_thread_setup(void *arg) 325static int drbd_thread_setup(void *arg)
@@ -1836,7 +1836,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1836 int rv = 0; 1836 int rv = 0;
1837 1837
1838 mutex_lock(&drbd_main_mutex); 1838 mutex_lock(&drbd_main_mutex);
1839 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 1839 spin_lock_irqsave(&device->resource->req_lock, flags);
1840 /* to have a stable device->state.role 1840 /* to have a stable device->state.role
1841 * and no race with updating open_cnt */ 1841 * and no race with updating open_cnt */
1842 1842
@@ -1849,7 +1849,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
1849 1849
1850 if (!rv) 1850 if (!rv)
1851 device->open_cnt++; 1851 device->open_cnt++;
1852 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 1852 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1853 mutex_unlock(&drbd_main_mutex); 1853 mutex_unlock(&drbd_main_mutex);
1854 1854
1855 return rv; 1855 return rv;
@@ -2546,6 +2546,8 @@ struct drbd_resource *drbd_create_resource(const char *name)
2546 idr_init(&resource->devices); 2546 idr_init(&resource->devices);
2547 INIT_LIST_HEAD(&resource->connections); 2547 INIT_LIST_HEAD(&resource->connections);
2548 list_add_tail_rcu(&resource->resources, &drbd_resources); 2548 list_add_tail_rcu(&resource->resources, &drbd_resources);
2549 mutex_init(&resource->conf_update);
2550 spin_lock_init(&resource->req_lock);
2549 return resource; 2551 return resource;
2550} 2552}
2551 2553
@@ -2588,8 +2590,6 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2588 2590
2589 connection->cstate = C_STANDALONE; 2591 connection->cstate = C_STANDALONE;
2590 mutex_init(&connection->cstate_mutex); 2592 mutex_init(&connection->cstate_mutex);
2591 spin_lock_init(&connection->req_lock);
2592 mutex_init(&connection->conf_update);
2593 init_waitqueue_head(&connection->ping_wait); 2593 init_waitqueue_head(&connection->ping_wait);
2594 idr_init(&connection->peer_devices); 2594 idr_init(&connection->peer_devices);
2595 2595
@@ -2720,7 +2720,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i
2720 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 2720 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2721 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 2721 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2722 blk_queue_merge_bvec(q, drbd_merge_bvec); 2722 blk_queue_merge_bvec(q, drbd_merge_bvec);
2723 q->queue_lock = &connection->req_lock; 2723 q->queue_lock = &resource->req_lock;
2724 2724
2725 device->md_io_page = alloc_page(GFP_KERNEL); 2725 device->md_io_page = alloc_page(GFP_KERNEL);
2726 if (!device->md_io_page) 2726 if (!device->md_io_page)
@@ -3281,14 +3281,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3281 3281
3282 rv = NO_ERROR; 3282 rv = NO_ERROR;
3283 3283
3284 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3284 spin_lock_irq(&device->resource->req_lock);
3285 if (device->state.conn < C_CONNECTED) { 3285 if (device->state.conn < C_CONNECTED) {
3286 unsigned int peer; 3286 unsigned int peer;
3287 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3287 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3288 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); 3288 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3289 device->peer_max_bio_size = peer; 3289 device->peer_max_bio_size = peer;
3290 } 3290 }
3291 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3291 spin_unlock_irq(&device->resource->req_lock);
3292 3292
3293 err: 3293 err:
3294 drbd_md_put_buffer(device); 3294 drbd_md_put_buffer(device);
@@ -3577,13 +3577,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3577 device->bm_io_work.why = why; 3577 device->bm_io_work.why = why;
3578 device->bm_io_work.flags = flags; 3578 device->bm_io_work.flags = flags;
3579 3579
3580 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3580 spin_lock_irq(&device->resource->req_lock);
3581 set_bit(BITMAP_IO, &device->flags); 3581 set_bit(BITMAP_IO, &device->flags);
3582 if (atomic_read(&device->ap_bio_cnt) == 0) { 3582 if (atomic_read(&device->ap_bio_cnt) == 0) {
3583 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 3583 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3584 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w); 3584 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
3585 } 3585 }
3586 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3586 spin_unlock_irq(&device->resource->req_lock);
3587} 3587}
3588 3588
3589/** 3589/**
@@ -3751,10 +3751,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3751 /* Indicate to wake up device->misc_wait on progress. */ 3751 /* Indicate to wake up device->misc_wait on progress. */
3752 i->waiting = true; 3752 i->waiting = true;
3753 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); 3753 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3754 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3754 spin_unlock_irq(&device->resource->req_lock);
3755 timeout = schedule_timeout(timeout); 3755 timeout = schedule_timeout(timeout);
3756 finish_wait(&device->misc_wait, &wait); 3756 finish_wait(&device->misc_wait, &wait);
3757 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3757 spin_lock_irq(&device->resource->req_lock);
3758 if (!timeout || device->state.conn < C_CONNECTED) 3758 if (!timeout || device->state.conn < C_CONNECTED)
3759 return -ETIMEDOUT; 3759 return -ETIMEDOUT;
3760 if (signal_pending(current)) 3760 if (signal_pending(current))
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 664e913cef43..684be38932e3 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -443,9 +443,9 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
443 return false; 443 return false;
444 } 444 }
445 445
446 spin_lock_irq(&connection->req_lock); 446 spin_lock_irq(&connection->resource->req_lock);
447 connect_cnt = connection->connect_cnt; 447 connect_cnt = connection->connect_cnt;
448 spin_unlock_irq(&connection->req_lock); 448 spin_unlock_irq(&connection->resource->req_lock);
449 449
450 fp = highest_fencing_policy(connection); 450 fp = highest_fencing_policy(connection);
451 switch (fp) { 451 switch (fp) {
@@ -510,7 +510,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
510 conn_request_state(connection, mask, val, CS_VERBOSE); 510 conn_request_state(connection, mask, val, CS_VERBOSE);
511 here, because we might were able to re-establish the connection in the 511 here, because we might were able to re-establish the connection in the
512 meantime. */ 512 meantime. */
513 spin_lock_irq(&connection->req_lock); 513 spin_lock_irq(&connection->resource->req_lock);
514 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) { 514 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
515 if (connection->connect_cnt != connect_cnt) 515 if (connection->connect_cnt != connect_cnt)
516 /* In case the connection was established and droped 516 /* In case the connection was established and droped
@@ -519,7 +519,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
519 else 519 else
520 _conn_request_state(connection, mask, val, CS_VERBOSE); 520 _conn_request_state(connection, mask, val, CS_VERBOSE);
521 } 521 }
522 spin_unlock_irq(&connection->req_lock); 522 spin_unlock_irq(&connection->resource->req_lock);
523 523
524 return conn_highest_pdsk(connection) <= D_OUTDATED; 524 return conn_highest_pdsk(connection) <= D_OUTDATED;
525} 525}
@@ -654,11 +654,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
654 put_ldev(device); 654 put_ldev(device);
655 } 655 }
656 } else { 656 } else {
657 mutex_lock(&first_peer_device(device)->connection->conf_update); 657 mutex_lock(&device->resource->conf_update);
658 nc = first_peer_device(device)->connection->net_conf; 658 nc = first_peer_device(device)->connection->net_conf;
659 if (nc) 659 if (nc)
660 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 660 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
661 mutex_unlock(&first_peer_device(device)->connection->conf_update); 661 mutex_unlock(&device->resource->conf_update);
662 662
663 set_disk_ro(device->vdisk, false); 663 set_disk_ro(device->vdisk, false);
664 if (get_ldev(device)) { 664 if (get_ldev(device)) {
@@ -1188,10 +1188,10 @@ static void conn_reconfig_start(struct drbd_connection *connection)
1188static void conn_reconfig_done(struct drbd_connection *connection) 1188static void conn_reconfig_done(struct drbd_connection *connection)
1189{ 1189{
1190 bool stop_threads; 1190 bool stop_threads;
1191 spin_lock_irq(&connection->req_lock); 1191 spin_lock_irq(&connection->resource->req_lock);
1192 stop_threads = conn_all_vols_unconf(connection) && 1192 stop_threads = conn_all_vols_unconf(connection) &&
1193 connection->cstate == C_STANDALONE; 1193 connection->cstate == C_STANDALONE;
1194 spin_unlock_irq(&connection->req_lock); 1194 spin_unlock_irq(&connection->resource->req_lock);
1195 if (stop_threads) { 1195 if (stop_threads) {
1196 /* asender is implicitly stopped by receiver 1196 /* asender is implicitly stopped by receiver
1197 * in conn_disconnect() */ 1197 * in conn_disconnect() */
@@ -1211,10 +1211,10 @@ static void drbd_suspend_al(struct drbd_device *device)
1211 } 1211 }
1212 1212
1213 drbd_al_shrink(device); 1213 drbd_al_shrink(device);
1214 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1214 spin_lock_irq(&device->resource->req_lock);
1215 if (device->state.conn < C_CONNECTED) 1215 if (device->state.conn < C_CONNECTED)
1216 s = !test_and_set_bit(AL_SUSPENDED, &device->flags); 1216 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1217 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1217 spin_unlock_irq(&device->resource->req_lock);
1218 lc_unlock(device->act_log); 1218 lc_unlock(device->act_log);
1219 1219
1220 if (s) 1220 if (s)
@@ -1285,7 +1285,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1285 goto fail; 1285 goto fail;
1286 } 1286 }
1287 1287
1288 mutex_lock(&first_peer_device(device)->connection->conf_update); 1288 mutex_lock(&device->resource->conf_update);
1289 old_disk_conf = device->ldev->disk_conf; 1289 old_disk_conf = device->ldev->disk_conf;
1290 *new_disk_conf = *old_disk_conf; 1290 *new_disk_conf = *old_disk_conf;
1291 if (should_set_defaults(info)) 1291 if (should_set_defaults(info))
@@ -1348,7 +1348,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1348 rcu_assign_pointer(device->rs_plan_s, new_plan); 1348 rcu_assign_pointer(device->rs_plan_s, new_plan);
1349 } 1349 }
1350 1350
1351 mutex_unlock(&first_peer_device(device)->connection->conf_update); 1351 mutex_unlock(&device->resource->conf_update);
1352 1352
1353 if (new_disk_conf->al_updates) 1353 if (new_disk_conf->al_updates)
1354 device->ldev->md.flags &= ~MDF_AL_DISABLED; 1354 device->ldev->md.flags &= ~MDF_AL_DISABLED;
@@ -1374,7 +1374,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1374 goto success; 1374 goto success;
1375 1375
1376fail_unlock: 1376fail_unlock:
1377 mutex_unlock(&first_peer_device(device)->connection->conf_update); 1377 mutex_unlock(&device->resource->conf_update);
1378 fail: 1378 fail:
1379 kfree(new_disk_conf); 1379 kfree(new_disk_conf);
1380 kfree(new_plan); 1380 kfree(new_plan);
@@ -1724,7 +1724,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1724 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) 1724 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1725 drbd_suspend_al(device); /* IO is still suspended here... */ 1725 drbd_suspend_al(device); /* IO is still suspended here... */
1726 1726
1727 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1727 spin_lock_irq(&device->resource->req_lock);
1728 os = drbd_read_state(device); 1728 os = drbd_read_state(device);
1729 ns = os; 1729 ns = os;
1730 /* If MDF_CONSISTENT is not set go into inconsistent state, 1730 /* If MDF_CONSISTENT is not set go into inconsistent state,
@@ -1776,7 +1776,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1776 } 1776 }
1777 1777
1778 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); 1778 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1779 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1779 spin_unlock_irq(&device->resource->req_lock);
1780 1780
1781 if (rv < SS_SUCCESS) 1781 if (rv < SS_SUCCESS)
1782 goto force_diskless_dec; 1782 goto force_diskless_dec;
@@ -2077,7 +2077,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2077 conn_reconfig_start(connection); 2077 conn_reconfig_start(connection);
2078 2078
2079 mutex_lock(&connection->data.mutex); 2079 mutex_lock(&connection->data.mutex);
2080 mutex_lock(&connection->conf_update); 2080 mutex_lock(&connection->resource->conf_update);
2081 old_net_conf = connection->net_conf; 2081 old_net_conf = connection->net_conf;
2082 2082
2083 if (!old_net_conf) { 2083 if (!old_net_conf) {
@@ -2141,7 +2141,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2141 crypto_free_hash(connection->cram_hmac_tfm); 2141 crypto_free_hash(connection->cram_hmac_tfm);
2142 connection->cram_hmac_tfm = crypto.cram_hmac_tfm; 2142 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2143 2143
2144 mutex_unlock(&connection->conf_update); 2144 mutex_unlock(&connection->resource->conf_update);
2145 mutex_unlock(&connection->data.mutex); 2145 mutex_unlock(&connection->data.mutex);
2146 synchronize_rcu(); 2146 synchronize_rcu();
2147 kfree(old_net_conf); 2147 kfree(old_net_conf);
@@ -2152,7 +2152,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2152 goto done; 2152 goto done;
2153 2153
2154 fail: 2154 fail:
2155 mutex_unlock(&connection->conf_update); 2155 mutex_unlock(&connection->resource->conf_update);
2156 mutex_unlock(&connection->data.mutex); 2156 mutex_unlock(&connection->data.mutex);
2157 free_crypto(&crypto); 2157 free_crypto(&crypto);
2158 kfree(new_net_conf); 2158 kfree(new_net_conf);
@@ -2243,11 +2243,11 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2243 2243
2244 conn_flush_workqueue(connection); 2244 conn_flush_workqueue(connection);
2245 2245
2246 mutex_lock(&connection->conf_update); 2246 mutex_lock(&adm_ctx.resource->conf_update);
2247 old_net_conf = connection->net_conf; 2247 old_net_conf = connection->net_conf;
2248 if (old_net_conf) { 2248 if (old_net_conf) {
2249 retcode = ERR_NET_CONFIGURED; 2249 retcode = ERR_NET_CONFIGURED;
2250 mutex_unlock(&connection->conf_update); 2250 mutex_unlock(&adm_ctx.resource->conf_update);
2251 goto fail; 2251 goto fail;
2252 } 2252 }
2253 rcu_assign_pointer(connection->net_conf, new_net_conf); 2253 rcu_assign_pointer(connection->net_conf, new_net_conf);
@@ -2263,7 +2263,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2263 connection->peer_addr_len = nla_len(adm_ctx.peer_addr); 2263 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2264 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len); 2264 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2265 2265
2266 mutex_unlock(&connection->conf_update); 2266 mutex_unlock(&adm_ctx.resource->conf_update);
2267 2267
2268 rcu_read_lock(); 2268 rcu_read_lock();
2269 idr_for_each_entry(&connection->peer_devices, peer_device, i) { 2269 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
@@ -2486,12 +2486,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2486 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); 2486 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2487 2487
2488 if (new_disk_conf) { 2488 if (new_disk_conf) {
2489 mutex_lock(&first_peer_device(device)->connection->conf_update); 2489 mutex_lock(&device->resource->conf_update);
2490 old_disk_conf = device->ldev->disk_conf; 2490 old_disk_conf = device->ldev->disk_conf;
2491 *new_disk_conf = *old_disk_conf; 2491 *new_disk_conf = *old_disk_conf;
2492 new_disk_conf->disk_size = (sector_t)rs.resize_size; 2492 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2493 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 2493 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2494 mutex_unlock(&first_peer_device(device)->connection->conf_update); 2494 mutex_unlock(&device->resource->conf_update);
2495 synchronize_rcu(); 2495 synchronize_rcu();
2496 kfree(old_disk_conf); 2496 kfree(old_disk_conf);
2497 } 2497 }
@@ -3248,10 +3248,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3248 drbd_send_uuids_skip_initial_sync(device); 3248 drbd_send_uuids_skip_initial_sync(device);
3249 _drbd_uuid_set(device, UI_BITMAP, 0); 3249 _drbd_uuid_set(device, UI_BITMAP, 0);
3250 drbd_print_uuids(device, "cleared bitmap UUID"); 3250 drbd_print_uuids(device, "cleared bitmap UUID");
3251 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3251 spin_lock_irq(&device->resource->req_lock);
3252 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3252 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3253 CS_VERBOSE, NULL); 3253 CS_VERBOSE, NULL);
3254 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3254 spin_unlock_irq(&device->resource->req_lock);
3255 } 3255 }
3256 } 3256 }
3257 3257
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 761b15461cff..5d9e5cc3feeb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -221,9 +221,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
221 LIST_HEAD(reclaimed); 221 LIST_HEAD(reclaimed);
222 struct drbd_peer_request *peer_req, *t; 222 struct drbd_peer_request *peer_req, *t;
223 223
224 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 224 spin_lock_irq(&device->resource->req_lock);
225 reclaim_finished_net_peer_reqs(device, &reclaimed); 225 reclaim_finished_net_peer_reqs(device, &reclaimed);
226 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 226 spin_unlock_irq(&device->resource->req_lock);
227 227
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229 drbd_free_net_peer_req(device, peer_req); 229 drbd_free_net_peer_req(device, peer_req);
@@ -288,7 +288,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
288} 288}
289 289
290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. 290/* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
291 * Is also used from inside an other spin_lock_irq(&first_peer_device(device)->connection->req_lock); 291 * Is also used from inside an other spin_lock_irq(&resource->req_lock);
292 * Either links the page chain back to the global pool, 292 * Either links the page chain back to the global pool,
293 * or returns all pages to the system. */ 293 * or returns all pages to the system. */
294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) 294static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
@@ -396,9 +396,9 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
396 int count = 0; 396 int count = 0;
397 int is_net = list == &device->net_ee; 397 int is_net = list == &device->net_ee;
398 398
399 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 399 spin_lock_irq(&device->resource->req_lock);
400 list_splice_init(list, &work_list); 400 list_splice_init(list, &work_list);
401 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 401 spin_unlock_irq(&device->resource->req_lock);
402 402
403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) { 403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
404 __drbd_free_peer_req(device, peer_req, is_net); 404 __drbd_free_peer_req(device, peer_req, is_net);
@@ -417,10 +417,10 @@ static int drbd_finish_peer_reqs(struct drbd_device *device)
417 struct drbd_peer_request *peer_req, *t; 417 struct drbd_peer_request *peer_req, *t;
418 int err = 0; 418 int err = 0;
419 419
420 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 420 spin_lock_irq(&device->resource->req_lock);
421 reclaim_finished_net_peer_reqs(device, &reclaimed); 421 reclaim_finished_net_peer_reqs(device, &reclaimed);
422 list_splice_init(&device->done_ee, &work_list); 422 list_splice_init(&device->done_ee, &work_list);
423 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 423 spin_unlock_irq(&device->resource->req_lock);
424 424
425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) 425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
426 drbd_free_net_peer_req(device, peer_req); 426 drbd_free_net_peer_req(device, peer_req);
@@ -452,19 +452,19 @@ static void _drbd_wait_ee_list_empty(struct drbd_device *device,
452 * and calling prepare_to_wait in the fast path */ 452 * and calling prepare_to_wait in the fast path */
453 while (!list_empty(head)) { 453 while (!list_empty(head)) {
454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
455 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 455 spin_unlock_irq(&device->resource->req_lock);
456 io_schedule(); 456 io_schedule();
457 finish_wait(&device->ee_wait, &wait); 457 finish_wait(&device->ee_wait, &wait);
458 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 458 spin_lock_irq(&device->resource->req_lock);
459 } 459 }
460} 460}
461 461
462static void drbd_wait_ee_list_empty(struct drbd_device *device, 462static void drbd_wait_ee_list_empty(struct drbd_device *device,
463 struct list_head *head) 463 struct list_head *head)
464{ 464{
465 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 465 spin_lock_irq(&device->resource->req_lock);
466 _drbd_wait_ee_list_empty(device, head); 466 _drbd_wait_ee_list_empty(device, head);
467 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 467 spin_unlock_irq(&device->resource->req_lock);
468} 468}
469 469
470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) 470static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
@@ -1072,13 +1072,13 @@ randomize:
1072 1072
1073 drbd_thread_start(&connection->asender); 1073 drbd_thread_start(&connection->asender);
1074 1074
1075 mutex_lock(&connection->conf_update); 1075 mutex_lock(&connection->resource->conf_update);
1076 /* The discard_my_data flag is a single-shot modifier to the next 1076 /* The discard_my_data flag is a single-shot modifier to the next
1077 * connection attempt, the handshake of which is now well underway. 1077 * connection attempt, the handshake of which is now well underway.
1078 * No need for rcu style copying of the whole struct 1078 * No need for rcu style copying of the whole struct
1079 * just to clear a single value. */ 1079 * just to clear a single value. */
1080 connection->net_conf->discard_my_data = 0; 1080 connection->net_conf->discard_my_data = 0;
1081 mutex_unlock(&connection->conf_update); 1081 mutex_unlock(&connection->resource->conf_update);
1082 1082
1083 return h; 1083 return h;
1084 1084
@@ -1692,9 +1692,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1692 1692
1693 peer_req->w.cb = e_end_resync_block; 1693 peer_req->w.cb = e_end_resync_block;
1694 1694
1695 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1695 spin_lock_irq(&device->resource->req_lock);
1696 list_add(&peer_req->w.list, &device->sync_ee); 1696 list_add(&peer_req->w.list, &device->sync_ee);
1697 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1697 spin_unlock_irq(&device->resource->req_lock);
1698 1698
1699 atomic_add(data_size >> 9, &device->rs_sect_ev); 1699 atomic_add(data_size >> 9, &device->rs_sect_ev);
1700 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) 1700 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1702,9 +1702,9 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1702 1702
1703 /* don't care for the reason here */ 1703 /* don't care for the reason here */
1704 drbd_err(device, "submit failed, triggering re-connect\n"); 1704 drbd_err(device, "submit failed, triggering re-connect\n");
1705 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1705 spin_lock_irq(&device->resource->req_lock);
1706 list_del(&peer_req->w.list); 1706 list_del(&peer_req->w.list);
1707 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1707 spin_unlock_irq(&device->resource->req_lock);
1708 1708
1709 drbd_free_peer_req(device, peer_req); 1709 drbd_free_peer_req(device, peer_req);
1710fail: 1710fail:
@@ -1743,9 +1743,9 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
1743 1743
1744 sector = be64_to_cpu(p->sector); 1744 sector = be64_to_cpu(p->sector);
1745 1745
1746 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1746 spin_lock_irq(&device->resource->req_lock);
1747 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); 1747 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1748 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1748 spin_unlock_irq(&device->resource->req_lock);
1749 if (unlikely(!req)) 1749 if (unlikely(!req))
1750 return -EIO; 1750 return -EIO;
1751 1751
@@ -1844,12 +1844,12 @@ static int e_end_block(struct drbd_work *w, int cancel)
1844 /* we delete from the conflict detection hash _after_ we sent out the 1844 /* we delete from the conflict detection hash _after_ we sent out the
1845 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1845 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1846 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 1846 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1847 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1847 spin_lock_irq(&device->resource->req_lock);
1848 D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); 1848 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
1849 drbd_remove_epoch_entry_interval(device, peer_req); 1849 drbd_remove_epoch_entry_interval(device, peer_req);
1850 if (peer_req->flags & EE_RESTART_REQUESTS) 1850 if (peer_req->flags & EE_RESTART_REQUESTS)
1851 restart_conflicting_writes(device, sector, peer_req->i.size); 1851 restart_conflicting_writes(device, sector, peer_req->i.size);
1852 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1852 spin_unlock_irq(&device->resource->req_lock);
1853 } else 1853 } else
1854 D_ASSERT(device, drbd_interval_empty(&peer_req->i)); 1854 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
1855 1855
@@ -1925,7 +1925,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1925 struct drbd_peer_request *rs_req; 1925 struct drbd_peer_request *rs_req;
1926 bool rv = 0; 1926 bool rv = 0;
1927 1927
1928 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1928 spin_lock_irq(&device->resource->req_lock);
1929 list_for_each_entry(rs_req, &device->sync_ee, w.list) { 1929 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
1930 if (overlaps(peer_req->i.sector, peer_req->i.size, 1930 if (overlaps(peer_req->i.sector, peer_req->i.size,
1931 rs_req->i.sector, rs_req->i.size)) { 1931 rs_req->i.sector, rs_req->i.size)) {
@@ -1933,7 +1933,7 @@ static bool overlapping_resync_write(struct drbd_device *device, struct drbd_pee
1933 break; 1933 break;
1934 } 1934 }
1935 } 1935 }
1936 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1936 spin_unlock_irq(&device->resource->req_lock);
1937 1937
1938 return rv; 1938 return rv;
1939} 1939}
@@ -2034,10 +2034,10 @@ static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2034 continue; 2034 continue;
2035 req->rq_state &= ~RQ_POSTPONED; 2035 req->rq_state &= ~RQ_POSTPONED;
2036 __req_mod(req, NEG_ACKED, &m); 2036 __req_mod(req, NEG_ACKED, &m);
2037 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2037 spin_unlock_irq(&device->resource->req_lock);
2038 if (m.bio) 2038 if (m.bio)
2039 complete_master_bio(device, &m); 2039 complete_master_bio(device, &m);
2040 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2040 spin_lock_irq(&device->resource->req_lock);
2041 goto repeat; 2041 goto repeat;
2042 } 2042 }
2043} 2043}
@@ -2218,10 +2218,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2218 err = wait_for_and_update_peer_seq(device, peer_seq); 2218 err = wait_for_and_update_peer_seq(device, peer_seq);
2219 if (err) 2219 if (err)
2220 goto out_interrupted; 2220 goto out_interrupted;
2221 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2221 spin_lock_irq(&device->resource->req_lock);
2222 err = handle_write_conflicts(device, peer_req); 2222 err = handle_write_conflicts(device, peer_req);
2223 if (err) { 2223 if (err) {
2224 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2224 spin_unlock_irq(&device->resource->req_lock);
2225 if (err == -ENOENT) { 2225 if (err == -ENOENT) {
2226 put_ldev(device); 2226 put_ldev(device);
2227 return 0; 2227 return 0;
@@ -2230,10 +2230,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2230 } 2230 }
2231 } else { 2231 } else {
2232 update_peer_seq(device, peer_seq); 2232 update_peer_seq(device, peer_seq);
2233 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2233 spin_lock_irq(&device->resource->req_lock);
2234 } 2234 }
2235 list_add(&peer_req->w.list, &device->active_ee); 2235 list_add(&peer_req->w.list, &device->active_ee);
2236 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2236 spin_unlock_irq(&device->resource->req_lock);
2237 2237
2238 if (device->state.conn == C_SYNC_TARGET) 2238 if (device->state.conn == C_SYNC_TARGET)
2239 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); 2239 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
@@ -2278,10 +2278,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2278 2278
2279 /* don't care for the reason here */ 2279 /* don't care for the reason here */
2280 drbd_err(device, "submit failed, triggering re-connect\n"); 2280 drbd_err(device, "submit failed, triggering re-connect\n");
2281 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2281 spin_lock_irq(&device->resource->req_lock);
2282 list_del(&peer_req->w.list); 2282 list_del(&peer_req->w.list);
2283 drbd_remove_epoch_entry_interval(device, peer_req); 2283 drbd_remove_epoch_entry_interval(device, peer_req);
2284 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2284 spin_unlock_irq(&device->resource->req_lock);
2285 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) 2285 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2286 drbd_al_complete_io(device, &peer_req->i); 2286 drbd_al_complete_io(device, &peer_req->i);
2287 2287
@@ -2532,18 +2532,18 @@ submit_for_resync:
2532 2532
2533submit: 2533submit:
2534 inc_unacked(device); 2534 inc_unacked(device);
2535 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2535 spin_lock_irq(&device->resource->req_lock);
2536 list_add_tail(&peer_req->w.list, &device->read_ee); 2536 list_add_tail(&peer_req->w.list, &device->read_ee);
2537 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2537 spin_unlock_irq(&device->resource->req_lock);
2538 2538
2539 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) 2539 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
2540 return 0; 2540 return 0;
2541 2541
2542 /* don't care for the reason here */ 2542 /* don't care for the reason here */
2543 drbd_err(device, "submit failed, triggering re-connect\n"); 2543 drbd_err(device, "submit failed, triggering re-connect\n");
2544 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2544 spin_lock_irq(&device->resource->req_lock);
2545 list_del(&peer_req->w.list); 2545 list_del(&peer_req->w.list);
2546 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2546 spin_unlock_irq(&device->resource->req_lock);
2547 /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 2547 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2548 2548
2549out_free_e: 2549out_free_e:
@@ -3221,7 +3221,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
3221 } 3221 }
3222 3222
3223 mutex_lock(&connection->data.mutex); 3223 mutex_lock(&connection->data.mutex);
3224 mutex_lock(&connection->conf_update); 3224 mutex_lock(&connection->resource->conf_update);
3225 old_net_conf = connection->net_conf; 3225 old_net_conf = connection->net_conf;
3226 *new_net_conf = *old_net_conf; 3226 *new_net_conf = *old_net_conf;
3227 3227
@@ -3232,7 +3232,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
3232 new_net_conf->two_primaries = p_two_primaries; 3232 new_net_conf->two_primaries = p_two_primaries;
3233 3233
3234 rcu_assign_pointer(connection->net_conf, new_net_conf); 3234 rcu_assign_pointer(connection->net_conf, new_net_conf);
3235 mutex_unlock(&connection->conf_update); 3235 mutex_unlock(&connection->resource->conf_update);
3236 mutex_unlock(&connection->data.mutex); 3236 mutex_unlock(&connection->data.mutex);
3237 3237
3238 crypto_free_hash(connection->peer_integrity_tfm); 3238 crypto_free_hash(connection->peer_integrity_tfm);
@@ -3372,13 +3372,13 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3372 if (err) 3372 if (err)
3373 return err; 3373 return err;
3374 3374
3375 mutex_lock(&first_peer_device(device)->connection->conf_update); 3375 mutex_lock(&connection->resource->conf_update);
3376 old_net_conf = first_peer_device(device)->connection->net_conf; 3376 old_net_conf = first_peer_device(device)->connection->net_conf;
3377 if (get_ldev(device)) { 3377 if (get_ldev(device)) {
3378 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3378 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3379 if (!new_disk_conf) { 3379 if (!new_disk_conf) {
3380 put_ldev(device); 3380 put_ldev(device);
3381 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3381 mutex_unlock(&connection->resource->conf_update);
3382 drbd_err(device, "Allocation of new disk_conf failed\n"); 3382 drbd_err(device, "Allocation of new disk_conf failed\n");
3383 return -ENOMEM; 3383 return -ENOMEM;
3384 } 3384 }
@@ -3498,7 +3498,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3498 rcu_assign_pointer(device->rs_plan_s, new_plan); 3498 rcu_assign_pointer(device->rs_plan_s, new_plan);
3499 } 3499 }
3500 3500
3501 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3501 mutex_unlock(&connection->resource->conf_update);
3502 synchronize_rcu(); 3502 synchronize_rcu();
3503 if (new_net_conf) 3503 if (new_net_conf)
3504 kfree(old_net_conf); 3504 kfree(old_net_conf);
@@ -3512,7 +3512,7 @@ reconnect:
3512 put_ldev(device); 3512 put_ldev(device);
3513 kfree(new_disk_conf); 3513 kfree(new_disk_conf);
3514 } 3514 }
3515 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3515 mutex_unlock(&connection->resource->conf_update);
3516 return -EIO; 3516 return -EIO;
3517 3517
3518disconnect: 3518disconnect:
@@ -3521,7 +3521,7 @@ disconnect:
3521 put_ldev(device); 3521 put_ldev(device);
3522 kfree(new_disk_conf); 3522 kfree(new_disk_conf);
3523 } 3523 }
3524 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3524 mutex_unlock(&connection->resource->conf_update);
3525 /* just for completeness: actually not needed, 3525 /* just for completeness: actually not needed,
3526 * as this is not reached if csums_tfm was ok. */ 3526 * as this is not reached if csums_tfm was ok. */
3527 crypto_free_hash(csums_tfm); 3527 crypto_free_hash(csums_tfm);
@@ -3601,13 +3601,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3601 return -ENOMEM; 3601 return -ENOMEM;
3602 } 3602 }
3603 3603
3604 mutex_lock(&first_peer_device(device)->connection->conf_update); 3604 mutex_lock(&connection->resource->conf_update);
3605 old_disk_conf = device->ldev->disk_conf; 3605 old_disk_conf = device->ldev->disk_conf;
3606 *new_disk_conf = *old_disk_conf; 3606 *new_disk_conf = *old_disk_conf;
3607 new_disk_conf->disk_size = p_usize; 3607 new_disk_conf->disk_size = p_usize;
3608 3608
3609 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); 3609 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3610 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3610 mutex_unlock(&connection->resource->conf_update);
3611 synchronize_rcu(); 3611 synchronize_rcu();
3612 kfree(old_disk_conf); 3612 kfree(old_disk_conf);
3613 3613
@@ -3846,10 +3846,10 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3846 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3846 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3847 } 3847 }
3848 3848
3849 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3849 spin_lock_irq(&device->resource->req_lock);
3850 retry: 3850 retry:
3851 os = ns = drbd_read_state(device); 3851 os = ns = drbd_read_state(device);
3852 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3852 spin_unlock_irq(&device->resource->req_lock);
3853 3853
3854 /* If some other part of the code (asender thread, timeout) 3854 /* If some other part of the code (asender thread, timeout)
3855 * already decided to close the connection again, 3855 * already decided to close the connection again,
@@ -3952,7 +3952,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3952 } 3952 }
3953 } 3953 }
3954 3954
3955 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3955 spin_lock_irq(&device->resource->req_lock);
3956 if (os.i != drbd_read_state(device).i) 3956 if (os.i != drbd_read_state(device).i)
3957 goto retry; 3957 goto retry;
3958 clear_bit(CONSIDER_RESYNC, &device->flags); 3958 clear_bit(CONSIDER_RESYNC, &device->flags);
@@ -3966,7 +3966,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3966 test_bit(NEW_CUR_UUID, &device->flags)) { 3966 test_bit(NEW_CUR_UUID, &device->flags)) {
3967 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3967 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3968 for temporal network outages! */ 3968 for temporal network outages! */
3969 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3969 spin_unlock_irq(&device->resource->req_lock);
3970 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3970 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3971 tl_clear(first_peer_device(device)->connection); 3971 tl_clear(first_peer_device(device)->connection);
3972 drbd_uuid_new_current(device); 3972 drbd_uuid_new_current(device);
@@ -3976,7 +3976,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3976 } 3976 }
3977 rv = _drbd_set_state(device, ns, cs_flags, NULL); 3977 rv = _drbd_set_state(device, ns, cs_flags, NULL);
3978 ns = drbd_read_state(device); 3978 ns = drbd_read_state(device);
3979 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3979 spin_unlock_irq(&device->resource->req_lock);
3980 3980
3981 if (rv < SS_SUCCESS) { 3981 if (rv < SS_SUCCESS) {
3982 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3982 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -4483,12 +4483,12 @@ static void conn_disconnect(struct drbd_connection *connection)
4483 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN) 4483 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
4484 conn_try_outdate_peer_async(connection); 4484 conn_try_outdate_peer_async(connection);
4485 4485
4486 spin_lock_irq(&connection->req_lock); 4486 spin_lock_irq(&connection->resource->req_lock);
4487 oc = connection->cstate; 4487 oc = connection->cstate;
4488 if (oc >= C_UNCONNECTED) 4488 if (oc >= C_UNCONNECTED)
4489 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 4489 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4490 4490
4491 spin_unlock_irq(&connection->req_lock); 4491 spin_unlock_irq(&connection->resource->req_lock);
4492 4492
4493 if (oc == C_DISCONNECTING) 4493 if (oc == C_DISCONNECTING)
4494 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); 4494 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
@@ -4499,11 +4499,11 @@ static int drbd_disconnected(struct drbd_device *device)
4499 unsigned int i; 4499 unsigned int i;
4500 4500
4501 /* wait for current activity to cease. */ 4501 /* wait for current activity to cease. */
4502 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 4502 spin_lock_irq(&device->resource->req_lock);
4503 _drbd_wait_ee_list_empty(device, &device->active_ee); 4503 _drbd_wait_ee_list_empty(device, &device->active_ee);
4504 _drbd_wait_ee_list_empty(device, &device->sync_ee); 4504 _drbd_wait_ee_list_empty(device, &device->sync_ee);
4505 _drbd_wait_ee_list_empty(device, &device->read_ee); 4505 _drbd_wait_ee_list_empty(device, &device->read_ee);
4506 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 4506 spin_unlock_irq(&device->resource->req_lock);
4507 4507
4508 /* We do not have data structures that would allow us to 4508 /* We do not have data structures that would allow us to
4509 * get the rs_pending_cnt down to 0 again. 4509 * get the rs_pending_cnt down to 0 again.
@@ -4970,14 +4970,14 @@ validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t secto
4970 struct drbd_request *req; 4970 struct drbd_request *req;
4971 struct bio_and_error m; 4971 struct bio_and_error m;
4972 4972
4973 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 4973 spin_lock_irq(&device->resource->req_lock);
4974 req = find_request(device, root, id, sector, missing_ok, func); 4974 req = find_request(device, root, id, sector, missing_ok, func);
4975 if (unlikely(!req)) { 4975 if (unlikely(!req)) {
4976 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 4976 spin_unlock_irq(&device->resource->req_lock);
4977 return -EIO; 4977 return -EIO;
4978 } 4978 }
4979 __req_mod(req, what, &m); 4979 __req_mod(req, what, &m);
4980 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 4980 spin_unlock_irq(&device->resource->req_lock);
4981 4981
4982 if (m.bio) 4982 if (m.bio)
4983 complete_master_bio(device, &m); 4983 complete_master_bio(device, &m);
@@ -5218,14 +5218,14 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
5218 } 5218 }
5219 set_bit(SIGNAL_ASENDER, &connection->flags); 5219 set_bit(SIGNAL_ASENDER, &connection->flags);
5220 5220
5221 spin_lock_irq(&connection->req_lock); 5221 spin_lock_irq(&connection->resource->req_lock);
5222 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 5222 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5223 struct drbd_device *device = peer_device->device; 5223 struct drbd_device *device = peer_device->device;
5224 not_empty = !list_empty(&device->done_ee); 5224 not_empty = !list_empty(&device->done_ee);
5225 if (not_empty) 5225 if (not_empty)
5226 break; 5226 break;
5227 } 5227 }
5228 spin_unlock_irq(&connection->req_lock); 5228 spin_unlock_irq(&connection->resource->req_lock);
5229 rcu_read_unlock(); 5229 rcu_read_unlock();
5230 } while (not_empty); 5230 } while (not_empty);
5231 5231
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index e772b523ebba..f74c0a244e9a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -851,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
851 break; 851 break;
852 /* Indicate to wake up device->misc_wait on progress. */ 852 /* Indicate to wake up device->misc_wait on progress. */
853 i->waiting = true; 853 i->waiting = true;
854 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 854 spin_unlock_irq(&device->resource->req_lock);
855 schedule(); 855 schedule();
856 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 856 spin_lock_irq(&device->resource->req_lock);
857 } 857 }
858 finish_wait(&device->misc_wait, &wait); 858 finish_wait(&device->misc_wait, &wait);
859} 859}
@@ -1078,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1078 struct bio_and_error m = { NULL, }; 1078 struct bio_and_error m = { NULL, };
1079 bool no_remote = false; 1079 bool no_remote = false;
1080 1080
1081 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1081 spin_lock_irq(&device->resource->req_lock);
1082 if (rw == WRITE) { 1082 if (rw == WRITE) {
1083 /* This may temporarily give up the req_lock, 1083 /* This may temporarily give up the req_lock,
1084 * but will re-aquire it before it returns here. 1084 * but will re-aquire it before it returns here.
@@ -1140,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1140 /* needs to be marked within the same spinlock */ 1140 /* needs to be marked within the same spinlock */
1141 _req_mod(req, TO_BE_SUBMITTED); 1141 _req_mod(req, TO_BE_SUBMITTED);
1142 /* but we need to give up the spinlock to submit */ 1142 /* but we need to give up the spinlock to submit */
1143 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1143 spin_unlock_irq(&device->resource->req_lock);
1144 drbd_submit_req_private_bio(req); 1144 drbd_submit_req_private_bio(req);
1145 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1145 spin_lock_irq(&device->resource->req_lock);
1146 } else if (no_remote) { 1146 } else if (no_remote) {
1147nodata: 1147nodata:
1148 if (__ratelimit(&drbd_ratelimit_state)) 1148 if (__ratelimit(&drbd_ratelimit_state))
@@ -1155,7 +1155,7 @@ nodata:
1155out: 1155out:
1156 if (drbd_req_put_completion_ref(req, &m, 1)) 1156 if (drbd_req_put_completion_ref(req, &m, 1))
1157 kref_put(&req->kref, drbd_req_destroy); 1157 kref_put(&req->kref, drbd_req_destroy);
1158 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1158 spin_unlock_irq(&device->resource->req_lock);
1159 1159
1160 if (m.bio) 1160 if (m.bio)
1161 complete_master_bio(device, &m); 1161 complete_master_bio(device, &m);
@@ -1360,10 +1360,10 @@ void request_timer_fn(unsigned long data)
1360 1360
1361 now = jiffies; 1361 now = jiffies;
1362 1362
1363 spin_lock_irq(&connection->req_lock); 1363 spin_lock_irq(&device->resource->req_lock);
1364 req = find_oldest_request(connection); 1364 req = find_oldest_request(connection);
1365 if (!req) { 1365 if (!req) {
1366 spin_unlock_irq(&connection->req_lock); 1366 spin_unlock_irq(&device->resource->req_lock);
1367 mod_timer(&device->request_timer, now + et); 1367 mod_timer(&device->request_timer, now + et);
1368 return; 1368 return;
1369 } 1369 }
@@ -1397,6 +1397,6 @@ void request_timer_fn(unsigned long data)
1397 __drbd_chk_io_error(device, DRBD_FORCE_DETACH); 1397 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1398 } 1398 }
1399 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; 1399 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
1400 spin_unlock_irq(&connection->req_lock); 1400 spin_unlock_irq(&connection->resource->req_lock);
1401 mod_timer(&device->request_timer, nt); 1401 mod_timer(&device->request_timer, nt);
1402} 1402}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 27283e619a07..5ce6dc505751 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
318 struct bio_and_error m; 318 struct bio_and_error m;
319 int rv; 319 int rv;
320 320
321 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 321 spin_lock_irqsave(&device->resource->req_lock, flags);
322 rv = __req_mod(req, what, &m); 322 rv = __req_mod(req, what, &m);
323 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 323 spin_unlock_irqrestore(&device->resource->req_lock, flags);
324 324
325 if (m.bio) 325 if (m.bio)
326 complete_master_bio(device, &m); 326 complete_master_bio(device, &m);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 79d0ea26f373..10c89314ff2b 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -250,10 +250,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
250 union drbd_state ns; 250 union drbd_state ns;
251 enum drbd_state_rv rv; 251 enum drbd_state_rv rv;
252 252
253 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 253 spin_lock_irqsave(&device->resource->req_lock, flags);
254 ns = apply_mask_val(drbd_read_state(device), mask, val); 254 ns = apply_mask_val(drbd_read_state(device), mask, val);
255 rv = _drbd_set_state(device, ns, f, NULL); 255 rv = _drbd_set_state(device, ns, f, NULL);
256 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 256 spin_unlock_irqrestore(&device->resource->req_lock, flags);
257 257
258 return rv; 258 return rv;
259} 259}
@@ -284,7 +284,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
284 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) 284 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
285 return SS_CW_FAILED_BY_PEER; 285 return SS_CW_FAILED_BY_PEER;
286 286
287 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 287 spin_lock_irqsave(&device->resource->req_lock, flags);
288 os = drbd_read_state(device); 288 os = drbd_read_state(device);
289 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 289 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
290 rv = is_valid_transition(os, ns); 290 rv = is_valid_transition(os, ns);
@@ -301,7 +301,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
301 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 301 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
302 } 302 }
303 } 303 }
304 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 304 spin_unlock_irqrestore(&device->resource->req_lock, flags);
305 305
306 return rv; 306 return rv;
307} 307}
@@ -330,12 +330,12 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
330 if (f & CS_SERIALIZE) 330 if (f & CS_SERIALIZE)
331 mutex_lock(device->state_mutex); 331 mutex_lock(device->state_mutex);
332 332
333 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 333 spin_lock_irqsave(&device->resource->req_lock, flags);
334 os = drbd_read_state(device); 334 os = drbd_read_state(device);
335 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 335 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
336 rv = is_valid_transition(os, ns); 336 rv = is_valid_transition(os, ns);
337 if (rv < SS_SUCCESS) { 337 if (rv < SS_SUCCESS) {
338 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 338 spin_unlock_irqrestore(&device->resource->req_lock, flags);
339 goto abort; 339 goto abort;
340 } 340 }
341 341
@@ -343,7 +343,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
343 rv = is_valid_state(device, ns); 343 rv = is_valid_state(device, ns);
344 if (rv == SS_SUCCESS) 344 if (rv == SS_SUCCESS)
345 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); 345 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
346 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 346 spin_unlock_irqrestore(&device->resource->req_lock, flags);
347 347
348 if (rv < SS_SUCCESS) { 348 if (rv < SS_SUCCESS) {
349 if (f & CS_VERBOSE) 349 if (f & CS_VERBOSE)
@@ -366,14 +366,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
366 print_st_err(device, os, ns, rv); 366 print_st_err(device, os, ns, rv);
367 goto abort; 367 goto abort;
368 } 368 }
369 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 369 spin_lock_irqsave(&device->resource->req_lock, flags);
370 ns = apply_mask_val(drbd_read_state(device), mask, val); 370 ns = apply_mask_val(drbd_read_state(device), mask, val);
371 rv = _drbd_set_state(device, ns, f, &done); 371 rv = _drbd_set_state(device, ns, f, &done);
372 } else { 372 } else {
373 rv = _drbd_set_state(device, ns, f, &done); 373 rv = _drbd_set_state(device, ns, f, &done);
374 } 374 }
375 375
376 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 376 spin_unlock_irqrestore(&device->resource->req_lock, flags);
377 377
378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
379 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); 379 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
@@ -1245,7 +1245,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1245 struct drbd_connection *connection = first_peer_device(device)->connection; 1245 struct drbd_connection *connection = first_peer_device(device)->connection;
1246 enum drbd_req_event what = NOTHING; 1246 enum drbd_req_event what = NOTHING;
1247 1247
1248 spin_lock_irq(&connection->req_lock); 1248 spin_lock_irq(&device->resource->req_lock);
1249 if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED) 1249 if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
1250 what = RESEND; 1250 what = RESEND;
1251 1251
@@ -1260,13 +1260,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1260 (union drbd_state) { { .susp_nod = 0 } }, 1260 (union drbd_state) { { .susp_nod = 0 } },
1261 CS_VERBOSE); 1261 CS_VERBOSE);
1262 } 1262 }
1263 spin_unlock_irq(&connection->req_lock); 1263 spin_unlock_irq(&device->resource->req_lock);
1264 } 1264 }
1265 1265
1266 if (ns.susp_fen) { 1266 if (ns.susp_fen) {
1267 struct drbd_connection *connection = first_peer_device(device)->connection; 1267 struct drbd_connection *connection = first_peer_device(device)->connection;
1268 1268
1269 spin_lock_irq(&connection->req_lock); 1269 spin_lock_irq(&device->resource->req_lock);
1270 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { 1270 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
1271 /* case2: The connection was established again: */ 1271 /* case2: The connection was established again: */
1272 struct drbd_peer_device *peer_device; 1272 struct drbd_peer_device *peer_device;
@@ -1282,7 +1282,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1282 (union drbd_state) { { .susp_fen = 0 } }, 1282 (union drbd_state) { { .susp_fen = 0 } },
1283 CS_VERBOSE); 1283 CS_VERBOSE);
1284 } 1284 }
1285 spin_unlock_irq(&connection->req_lock); 1285 spin_unlock_irq(&device->resource->req_lock);
1286 } 1286 }
1287 1287
1288 /* Became sync source. With protocol >= 96, we still need to send out 1288 /* Became sync source. With protocol >= 96, we still need to send out
@@ -1555,13 +1555,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1555 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) { 1555 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1556 struct net_conf *old_conf; 1556 struct net_conf *old_conf;
1557 1557
1558 mutex_lock(&connection->conf_update); 1558 mutex_lock(&connection->resource->conf_update);
1559 old_conf = connection->net_conf; 1559 old_conf = connection->net_conf;
1560 connection->my_addr_len = 0; 1560 connection->my_addr_len = 0;
1561 connection->peer_addr_len = 0; 1561 connection->peer_addr_len = 0;
1562 rcu_assign_pointer(connection->net_conf, NULL); 1562 rcu_assign_pointer(connection->net_conf, NULL);
1563 conn_free_crypto(connection); 1563 conn_free_crypto(connection);
1564 mutex_unlock(&connection->conf_update); 1564 mutex_unlock(&connection->resource->conf_update);
1565 1565
1566 synchronize_rcu(); 1566 synchronize_rcu();
1567 kfree(old_conf); 1567 kfree(old_conf);
@@ -1579,13 +1579,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1579 } 1579 }
1580 } 1580 }
1581 rcu_read_unlock(); 1581 rcu_read_unlock();
1582 spin_lock_irq(&connection->req_lock); 1582 spin_lock_irq(&connection->resource->req_lock);
1583 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING); 1583 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
1584 _conn_request_state(connection, 1584 _conn_request_state(connection,
1585 (union drbd_state) { { .susp_fen = 1 } }, 1585 (union drbd_state) { { .susp_fen = 1 } },
1586 (union drbd_state) { { .susp_fen = 0 } }, 1586 (union drbd_state) { { .susp_fen = 0 } },
1587 CS_VERBOSE); 1587 CS_VERBOSE);
1588 spin_unlock_irq(&connection->req_lock); 1588 spin_unlock_irq(&connection->resource->req_lock);
1589 } 1589 }
1590 } 1590 }
1591 kref_put(&connection->kref, drbd_destroy_connection); 1591 kref_put(&connection->kref, drbd_destroy_connection);
@@ -1802,7 +1802,7 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1802 /* This will be a cluster-wide state change. 1802 /* This will be a cluster-wide state change.
1803 * Need to give up the spinlock, grab the mutex, 1803 * Need to give up the spinlock, grab the mutex,
1804 * then send the state change request, ... */ 1804 * then send the state change request, ... */
1805 spin_unlock_irq(&connection->req_lock); 1805 spin_unlock_irq(&connection->resource->req_lock);
1806 mutex_lock(&connection->cstate_mutex); 1806 mutex_lock(&connection->cstate_mutex);
1807 have_mutex = true; 1807 have_mutex = true;
1808 1808
@@ -1821,10 +1821,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1821 /* ... and re-aquire the spinlock. 1821 /* ... and re-aquire the spinlock.
1822 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call 1822 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1823 * conn_set_state() within the same spinlock. */ 1823 * conn_set_state() within the same spinlock. */
1824 spin_lock_irq(&connection->req_lock); 1824 spin_lock_irq(&connection->resource->req_lock);
1825 wait_event_lock_irq(connection->ping_wait, 1825 wait_event_lock_irq(connection->ping_wait,
1826 (rv = _conn_rq_cond(connection, mask, val)), 1826 (rv = _conn_rq_cond(connection, mask, val)),
1827 connection->req_lock); 1827 connection->resource->req_lock);
1828 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags); 1828 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1829 if (rv < SS_SUCCESS) 1829 if (rv < SS_SUCCESS)
1830 goto abort; 1830 goto abort;
@@ -1853,10 +1853,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1853 if (have_mutex) { 1853 if (have_mutex) {
1854 /* mutex_unlock() "... must not be used in interrupt context.", 1854 /* mutex_unlock() "... must not be used in interrupt context.",
1855 * so give up the spinlock, then re-aquire it */ 1855 * so give up the spinlock, then re-aquire it */
1856 spin_unlock_irq(&connection->req_lock); 1856 spin_unlock_irq(&connection->resource->req_lock);
1857 abort_unlocked: 1857 abort_unlocked:
1858 mutex_unlock(&connection->cstate_mutex); 1858 mutex_unlock(&connection->cstate_mutex);
1859 spin_lock_irq(&connection->req_lock); 1859 spin_lock_irq(&connection->resource->req_lock);
1860 } 1860 }
1861 if (rv < SS_SUCCESS && flags & CS_VERBOSE) { 1861 if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
1862 drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv)); 1862 drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
@@ -1872,9 +1872,9 @@ conn_request_state(struct drbd_connection *connection, union drbd_state mask, un
1872{ 1872{
1873 enum drbd_state_rv rv; 1873 enum drbd_state_rv rv;
1874 1874
1875 spin_lock_irq(&connection->req_lock); 1875 spin_lock_irq(&connection->resource->req_lock);
1876 rv = _conn_request_state(connection, mask, val, flags); 1876 rv = _conn_request_state(connection, mask, val, flags);
1877 spin_unlock_irq(&connection->req_lock); 1877 spin_unlock_irq(&connection->resource->req_lock);
1878 1878
1879 return rv; 1879 return rv;
1880} 1880}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index db63b1ff4b35..1ba8f8ec1525 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,14 +102,14 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
102 unsigned long flags = 0; 102 unsigned long flags = 0;
103 struct drbd_device *device = peer_req->w.device; 103 struct drbd_device *device = peer_req->w.device;
104 104
105 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 105 spin_lock_irqsave(&device->resource->req_lock, flags);
106 device->read_cnt += peer_req->i.size >> 9; 106 device->read_cnt += peer_req->i.size >> 9;
107 list_del(&peer_req->w.list); 107 list_del(&peer_req->w.list);
108 if (list_empty(&device->read_ee)) 108 if (list_empty(&device->read_ee))
109 wake_up(&device->ee_wait); 109 wake_up(&device->ee_wait);
110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 110 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
111 __drbd_chk_io_error(device, DRBD_READ_ERROR); 111 __drbd_chk_io_error(device, DRBD_READ_ERROR);
112 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 112 spin_unlock_irqrestore(&device->resource->req_lock, flags);
113 113
114 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w); 114 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &peer_req->w);
115 put_ldev(device); 115 put_ldev(device);
@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; 134 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
135 block_id = peer_req->block_id; 135 block_id = peer_req->block_id;
136 136
137 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 137 spin_lock_irqsave(&device->resource->req_lock, flags);
138 device->writ_cnt += peer_req->i.size >> 9; 138 device->writ_cnt += peer_req->i.size >> 9;
139 list_move_tail(&peer_req->w.list, &device->done_ee); 139 list_move_tail(&peer_req->w.list, &device->done_ee);
140 140
@@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
150 150
151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) 151 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); 152 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
153 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 153 spin_unlock_irqrestore(&device->resource->req_lock, flags);
154 154
155 if (block_id == ID_SYNCER) 155 if (block_id == ID_SYNCER)
156 drbd_rs_complete_io(device, i.sector); 156 drbd_rs_complete_io(device, i.sector);
@@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
273 req->private_bio = ERR_PTR(error); 273 req->private_bio = ERR_PTR(error);
274 274
275 /* not req_mod(), we need irqsave here! */ 275 /* not req_mod(), we need irqsave here! */
276 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 276 spin_lock_irqsave(&device->resource->req_lock, flags);
277 __req_mod(req, what, &m); 277 __req_mod(req, what, &m);
278 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 278 spin_unlock_irqrestore(&device->resource->req_lock, flags);
279 put_ldev(device); 279 put_ldev(device);
280 280
281 if (m.bio) 281 if (m.bio)
@@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
397 goto defer; 397 goto defer;
398 398
399 peer_req->w.cb = w_e_send_csum; 399 peer_req->w.cb = w_e_send_csum;
400 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 400 spin_lock_irq(&device->resource->req_lock);
401 list_add(&peer_req->w.list, &device->read_ee); 401 list_add(&peer_req->w.list, &device->read_ee);
402 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 402 spin_unlock_irq(&device->resource->req_lock);
403 403
404 atomic_add(size >> 9, &device->rs_sect_ev); 404 atomic_add(size >> 9, &device->rs_sect_ev);
405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) 405 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
@@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
409 * because bio_add_page failed (probably broken lower level driver), 409 * because bio_add_page failed (probably broken lower level driver),
410 * retry may or may not help. 410 * retry may or may not help.
411 * If it does not, you may need to force disconnect. */ 411 * If it does not, you may need to force disconnect. */
412 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 412 spin_lock_irq(&device->resource->req_lock);
413 list_del(&peer_req->w.list); 413 list_del(&peer_req->w.list);
414 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 414 spin_unlock_irq(&device->resource->req_lock);
415 415
416 drbd_free_peer_req(device, peer_req); 416 drbd_free_peer_req(device, peer_req);
417defer: 417defer:
@@ -855,7 +855,7 @@ int drbd_resync_finished(struct drbd_device *device)
855 855
856 ping_peer(device); 856 ping_peer(device);
857 857
858 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 858 spin_lock_irq(&device->resource->req_lock);
859 os = drbd_read_state(device); 859 os = drbd_read_state(device);
860 860
861 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); 861 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
@@ -944,7 +944,7 @@ int drbd_resync_finished(struct drbd_device *device)
944 944
945 _drbd_set_state(device, ns, CS_VERBOSE, NULL); 945 _drbd_set_state(device, ns, CS_VERBOSE, NULL);
946out_unlock: 946out_unlock:
947 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 947 spin_unlock_irq(&device->resource->req_lock);
948 put_ldev(device); 948 put_ldev(device);
949out: 949out:
950 device->rs_total = 0; 950 device->rs_total = 0;
@@ -971,9 +971,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
971 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; 971 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
972 atomic_add(i, &device->pp_in_use_by_net); 972 atomic_add(i, &device->pp_in_use_by_net);
973 atomic_sub(i, &device->pp_in_use); 973 atomic_sub(i, &device->pp_in_use);
974 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 974 spin_lock_irq(&device->resource->req_lock);
975 list_add_tail(&peer_req->w.list, &device->net_ee); 975 list_add_tail(&peer_req->w.list, &device->net_ee);
976 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 976 spin_unlock_irq(&device->resource->req_lock);
977 wake_up(&drbd_pp_wait); 977 wake_up(&drbd_pp_wait);
978 } else 978 } else
979 drbd_free_peer_req(device, peer_req); 979 drbd_free_peer_req(device, peer_req);
@@ -1847,7 +1847,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
1847 for (;;) { 1847 for (;;) {
1848 int send_barrier; 1848 int send_barrier;
1849 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); 1849 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
1850 spin_lock_irq(&connection->req_lock); 1850 spin_lock_irq(&connection->resource->req_lock);
1851 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ 1851 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
1852 /* dequeue single item only, 1852 /* dequeue single item only,
1853 * we still use drbd_queue_work_front() in some places */ 1853 * we still use drbd_queue_work_front() in some places */
@@ -1855,11 +1855,11 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head *
1855 list_move(connection->sender_work.q.next, work_list); 1855 list_move(connection->sender_work.q.next, work_list);
1856 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ 1856 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
1857 if (!list_empty(work_list) || signal_pending(current)) { 1857 if (!list_empty(work_list) || signal_pending(current)) {
1858 spin_unlock_irq(&connection->req_lock); 1858 spin_unlock_irq(&connection->resource->req_lock);
1859 break; 1859 break;
1860 } 1860 }
1861 send_barrier = need_to_send_barrier(connection); 1861 send_barrier = need_to_send_barrier(connection);
1862 spin_unlock_irq(&connection->req_lock); 1862 spin_unlock_irq(&connection->resource->req_lock);
1863 if (send_barrier) { 1863 if (send_barrier) {
1864 drbd_send_barrier(connection); 1864 drbd_send_barrier(connection);
1865 connection->send.current_epoch_nr++; 1865 connection->send.current_epoch_nr++;