aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6213ba..620c70ff2231 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@ rwlock_t global_state_lock;
70void drbd_md_io_complete(struct bio *bio, int error) 70void drbd_md_io_complete(struct bio *bio, int error)
71{ 71{
72 struct drbd_md_io *md_io; 72 struct drbd_md_io *md_io;
73 struct drbd_conf *mdev;
73 74
74 md_io = (struct drbd_md_io *)bio->bi_private; 75 md_io = (struct drbd_md_io *)bio->bi_private;
76 mdev = container_of(md_io, struct drbd_conf, md_io);
77
75 md_io->error = error; 78 md_io->error = error;
76 79
77 complete(&md_io->event); 80 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
81 * to timeout on the lower level device, and eventually detach from it.
82 * If this io completion runs after that timeout expired, this
83 * drbd_md_put_buffer() may allow us to finally try and re-attach.
84 * During normal operation, this only puts that extra reference
85 * down to 1 again.
86 * Make sure we first drop the reference, and only then signal
87 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
88 * next drbd_md_sync_page_io(), that we trigger the
89 * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
90 */
91 drbd_md_put_buffer(mdev);
92 md_io->done = 1;
93 wake_up(&mdev->misc_wait);
94 bio_put(bio);
95 put_ldev(mdev);
78} 96}
79 97
80/* reads on behalf of the partner, 98/* reads on behalf of the partner,
@@ -226,6 +244,7 @@ void drbd_endio_pri(struct bio *bio, int error)
226 spin_lock_irqsave(&mdev->req_lock, flags); 244 spin_lock_irqsave(&mdev->req_lock, flags);
227 __req_mod(req, what, &m); 245 __req_mod(req, what, &m);
228 spin_unlock_irqrestore(&mdev->req_lock, flags); 246 spin_unlock_irqrestore(&mdev->req_lock, flags);
247 put_ldev(mdev);
229 248
230 if (m.bio) 249 if (m.bio)
231 complete_master_bio(mdev, &m); 250 complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
290 sg_init_table(&sg, 1); 309 sg_init_table(&sg, 1);
291 crypto_hash_init(&desc); 310 crypto_hash_init(&desc);
292 311
293 __bio_for_each_segment(bvec, bio, i, 0) { 312 bio_for_each_segment(bvec, bio, i) {
294 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 313 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
295 crypto_hash_update(&desc, &sg, sg.length); 314 crypto_hash_update(&desc, &sg, sg.length);
296 } 315 }
@@ -728,7 +747,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
728 } 747 }
729 748
730 drbd_start_resync(mdev, C_SYNC_SOURCE); 749 drbd_start_resync(mdev, C_SYNC_SOURCE);
731 clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); 750 clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
732 return 1; 751 return 1;
733} 752}
734 753
@@ -1519,14 +1538,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1519 } 1538 }
1520 1539
1521 drbd_state_lock(mdev); 1540 drbd_state_lock(mdev);
1522 1541 write_lock_irq(&global_state_lock);
1523 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { 1542 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1543 write_unlock_irq(&global_state_lock);
1524 drbd_state_unlock(mdev); 1544 drbd_state_unlock(mdev);
1525 return; 1545 return;
1526 } 1546 }
1527 1547
1528 write_lock_irq(&global_state_lock); 1548 ns.i = mdev->state.i;
1529 ns = mdev->state;
1530 1549
1531 ns.aftr_isp = !_drbd_may_sync_now(mdev); 1550 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1532 1551