aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_main.c
diff options
context:
space:
mode:
authorPhilipp Reisner <philipp.reisner@linbit.com>2011-01-19 08:16:30 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-08-29 05:30:15 -0400
commit87eeee41f8740451b61a1e7d37a494333a906861 (patch)
treef7b328aa626ed5153ec5cf5b9dbd94c23676b6a8 /drivers/block/drbd/drbd_main.c
parent31890f4ab299c4116cf0a104ca9ce4f9ca2c5da0 (diff)
drbd: moved req_lock and transfer log from mdev to tconn
sed -i \ -e 's/mdev->req_lock/mdev->tconn->req_lock/g' \ -e 's/mdev->unused_spare_tle/mdev->tconn->unused_spare_tle/g' \ -e 's/mdev->newest_tle/mdev->tconn->newest_tle/g' \ -e 's/mdev->oldest_tle/mdev->tconn->oldest_tle/g' \ -e 's/mdev->out_of_sequence_requests/mdev->tconn->out_of_sequence_requests/g' \ *.[ch] Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r--drivers/block/drbd/drbd_main.c100
1 files changed, 50 insertions, 50 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e06ca4a0d906..c063cd513223 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -185,7 +185,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
185 * DOC: The transfer log 185 * DOC: The transfer log
186 * 186 *
187 * The transfer log is a single linked list of &struct drbd_tl_epoch objects. 187 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
188 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail 188 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
189 * of the list. There is always at least one &struct drbd_tl_epoch object. 189 * of the list. There is always at least one &struct drbd_tl_epoch object.
190 * 190 *
191 * Each &struct drbd_tl_epoch has a circular double linked list of requests 191 * Each &struct drbd_tl_epoch has a circular double linked list of requests
@@ -206,21 +206,21 @@ static int tl_init(struct drbd_conf *mdev)
206 b->n_writes = 0; 206 b->n_writes = 0;
207 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ 207 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
208 208
209 mdev->oldest_tle = b; 209 mdev->tconn->oldest_tle = b;
210 mdev->newest_tle = b; 210 mdev->tconn->newest_tle = b;
211 INIT_LIST_HEAD(&mdev->out_of_sequence_requests); 211 INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests);
212 212
213 return 1; 213 return 1;
214} 214}
215 215
216static void tl_cleanup(struct drbd_conf *mdev) 216static void tl_cleanup(struct drbd_conf *mdev)
217{ 217{
218 D_ASSERT(mdev->oldest_tle == mdev->newest_tle); 218 D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle);
219 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 219 D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
220 kfree(mdev->oldest_tle); 220 kfree(mdev->tconn->oldest_tle);
221 mdev->oldest_tle = NULL; 221 mdev->tconn->oldest_tle = NULL;
222 kfree(mdev->unused_spare_tle); 222 kfree(mdev->tconn->unused_spare_tle);
223 mdev->unused_spare_tle = NULL; 223 mdev->tconn->unused_spare_tle = NULL;
224} 224}
225 225
226/** 226/**
@@ -240,13 +240,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240 new->next = NULL; 240 new->next = NULL;
241 new->n_writes = 0; 241 new->n_writes = 0;
242 242
243 newest_before = mdev->newest_tle; 243 newest_before = mdev->tconn->newest_tle;
244 /* never send a barrier number == 0, because that is special-cased 244 /* never send a barrier number == 0, because that is special-cased
245 * when using TCQ for our write ordering code */ 245 * when using TCQ for our write ordering code */
246 new->br_number = (newest_before->br_number+1) ?: 1; 246 new->br_number = (newest_before->br_number+1) ?: 1;
247 if (mdev->newest_tle != new) { 247 if (mdev->tconn->newest_tle != new) {
248 mdev->newest_tle->next = new; 248 mdev->tconn->newest_tle->next = new;
249 mdev->newest_tle = new; 249 mdev->tconn->newest_tle = new;
250 } 250 }
251} 251}
252 252
@@ -267,9 +267,9 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
267 struct list_head *le, *tle; 267 struct list_head *le, *tle;
268 struct drbd_request *r; 268 struct drbd_request *r;
269 269
270 spin_lock_irq(&mdev->req_lock); 270 spin_lock_irq(&mdev->tconn->req_lock);
271 271
272 b = mdev->oldest_tle; 272 b = mdev->tconn->oldest_tle;
273 273
274 /* first some paranoia code */ 274 /* first some paranoia code */
275 if (b == NULL) { 275 if (b == NULL) {
@@ -312,22 +312,22 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
312 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 312 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
313 _tl_add_barrier(mdev, b); 313 _tl_add_barrier(mdev, b);
314 if (nob) 314 if (nob)
315 mdev->oldest_tle = nob; 315 mdev->tconn->oldest_tle = nob;
316 /* if nob == NULL b was the only barrier, and becomes the new 316 /* if nob == NULL b was the only barrier, and becomes the new
317 barrier. Therefore mdev->oldest_tle points already to b */ 317 barrier. Therefore mdev->tconn->oldest_tle points already to b */
318 } else { 318 } else {
319 D_ASSERT(nob != NULL); 319 D_ASSERT(nob != NULL);
320 mdev->oldest_tle = nob; 320 mdev->tconn->oldest_tle = nob;
321 kfree(b); 321 kfree(b);
322 } 322 }
323 323
324 spin_unlock_irq(&mdev->req_lock); 324 spin_unlock_irq(&mdev->tconn->req_lock);
325 dec_ap_pending(mdev); 325 dec_ap_pending(mdev);
326 326
327 return; 327 return;
328 328
329bail: 329bail:
330 spin_unlock_irq(&mdev->req_lock); 330 spin_unlock_irq(&mdev->tconn->req_lock);
331 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 331 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
332} 332}
333 333
@@ -347,8 +347,8 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
347 struct drbd_request *req; 347 struct drbd_request *req;
348 int rv, n_writes, n_reads; 348 int rv, n_writes, n_reads;
349 349
350 b = mdev->oldest_tle; 350 b = mdev->tconn->oldest_tle;
351 pn = &mdev->oldest_tle; 351 pn = &mdev->tconn->oldest_tle;
352 while (b) { 352 while (b) {
353 n_writes = 0; 353 n_writes = 0;
354 n_reads = 0; 354 n_reads = 0;
@@ -387,7 +387,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
387 if (b->w.cb != NULL) 387 if (b->w.cb != NULL)
388 dec_ap_pending(mdev); 388 dec_ap_pending(mdev);
389 389
390 if (b == mdev->newest_tle) { 390 if (b == mdev->tconn->newest_tle) {
391 /* recycle, but reinit! */ 391 /* recycle, but reinit! */
392 D_ASSERT(tmp == NULL); 392 D_ASSERT(tmp == NULL);
393 INIT_LIST_HEAD(&b->requests); 393 INIT_LIST_HEAD(&b->requests);
@@ -422,15 +422,15 @@ void tl_clear(struct drbd_conf *mdev)
422 struct list_head *le, *tle; 422 struct list_head *le, *tle;
423 struct drbd_request *r; 423 struct drbd_request *r;
424 424
425 spin_lock_irq(&mdev->req_lock); 425 spin_lock_irq(&mdev->tconn->req_lock);
426 426
427 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING); 427 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
428 428
429 /* we expect this list to be empty. */ 429 /* we expect this list to be empty. */
430 D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); 430 D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
431 431
432 /* but just in case, clean it up anyways! */ 432 /* but just in case, clean it up anyways! */
433 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) { 433 list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) {
434 r = list_entry(le, struct drbd_request, tl_requests); 434 r = list_entry(le, struct drbd_request, tl_requests);
435 /* It would be nice to complete outside of spinlock. 435 /* It would be nice to complete outside of spinlock.
436 * But this is easier for now. */ 436 * But this is easier for now. */
@@ -440,14 +440,14 @@ void tl_clear(struct drbd_conf *mdev)
440 /* ensure bit indicating barrier is required is clear */ 440 /* ensure bit indicating barrier is required is clear */
441 clear_bit(CREATE_BARRIER, &mdev->flags); 441 clear_bit(CREATE_BARRIER, &mdev->flags);
442 442
443 spin_unlock_irq(&mdev->req_lock); 443 spin_unlock_irq(&mdev->tconn->req_lock);
444} 444}
445 445
446void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) 446void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
447{ 447{
448 spin_lock_irq(&mdev->req_lock); 448 spin_lock_irq(&mdev->tconn->req_lock);
449 _tl_restart(mdev, what); 449 _tl_restart(mdev, what);
450 spin_unlock_irq(&mdev->req_lock); 450 spin_unlock_irq(&mdev->tconn->req_lock);
451} 451}
452 452
453/** 453/**
@@ -476,12 +476,12 @@ drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
476 union drbd_state os, ns; 476 union drbd_state os, ns;
477 enum drbd_state_rv rv; 477 enum drbd_state_rv rv;
478 478
479 spin_lock_irqsave(&mdev->req_lock, flags); 479 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
480 os = mdev->state; 480 os = mdev->state;
481 ns.i = (os.i & ~mask.i) | val.i; 481 ns.i = (os.i & ~mask.i) | val.i;
482 rv = _drbd_set_state(mdev, ns, f, NULL); 482 rv = _drbd_set_state(mdev, ns, f, NULL);
483 ns = mdev->state; 483 ns = mdev->state;
484 spin_unlock_irqrestore(&mdev->req_lock, flags); 484 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
485 485
486 return rv; 486 return rv;
487} 487}
@@ -522,7 +522,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
522 return SS_CW_FAILED_BY_PEER; 522 return SS_CW_FAILED_BY_PEER;
523 523
524 rv = 0; 524 rv = 0;
525 spin_lock_irqsave(&mdev->req_lock, flags); 525 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
526 os = mdev->state; 526 os = mdev->state;
527 ns.i = (os.i & ~mask.i) | val.i; 527 ns.i = (os.i & ~mask.i) | val.i;
528 ns = sanitize_state(mdev, os, ns, NULL); 528 ns = sanitize_state(mdev, os, ns, NULL);
@@ -537,7 +537,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
537 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 537 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
538 } 538 }
539 } 539 }
540 spin_unlock_irqrestore(&mdev->req_lock, flags); 540 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
541 541
542 return rv; 542 return rv;
543} 543}
@@ -566,7 +566,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
566 if (f & CS_SERIALIZE) 566 if (f & CS_SERIALIZE)
567 mutex_lock(&mdev->state_mutex); 567 mutex_lock(&mdev->state_mutex);
568 568
569 spin_lock_irqsave(&mdev->req_lock, flags); 569 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
570 os = mdev->state; 570 os = mdev->state;
571 ns.i = (os.i & ~mask.i) | val.i; 571 ns.i = (os.i & ~mask.i) | val.i;
572 ns = sanitize_state(mdev, os, ns, NULL); 572 ns = sanitize_state(mdev, os, ns, NULL);
@@ -575,7 +575,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
575 rv = is_valid_state(mdev, ns); 575 rv = is_valid_state(mdev, ns);
576 if (rv == SS_SUCCESS) 576 if (rv == SS_SUCCESS)
577 rv = is_valid_state_transition(mdev, ns, os); 577 rv = is_valid_state_transition(mdev, ns, os);
578 spin_unlock_irqrestore(&mdev->req_lock, flags); 578 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
579 579
580 if (rv < SS_SUCCESS) { 580 if (rv < SS_SUCCESS) {
581 if (f & CS_VERBOSE) 581 if (f & CS_VERBOSE)
@@ -601,7 +601,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
601 print_st_err(mdev, os, ns, rv); 601 print_st_err(mdev, os, ns, rv);
602 goto abort; 602 goto abort;
603 } 603 }
604 spin_lock_irqsave(&mdev->req_lock, flags); 604 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
605 os = mdev->state; 605 os = mdev->state;
606 ns.i = (os.i & ~mask.i) | val.i; 606 ns.i = (os.i & ~mask.i) | val.i;
607 rv = _drbd_set_state(mdev, ns, f, &done); 607 rv = _drbd_set_state(mdev, ns, f, &done);
@@ -610,7 +610,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
610 rv = _drbd_set_state(mdev, ns, f, &done); 610 rv = _drbd_set_state(mdev, ns, f, &done);
611 } 611 }
612 612
613 spin_unlock_irqrestore(&mdev->req_lock, flags); 613 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
614 614
615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 615 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
616 D_ASSERT(current != mdev->tconn->worker.task); 616 D_ASSERT(current != mdev->tconn->worker.task);
@@ -1367,9 +1367,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1367 drbd_uuid_new_current(mdev); 1367 drbd_uuid_new_current(mdev);
1368 clear_bit(NEW_CUR_UUID, &mdev->flags); 1368 clear_bit(NEW_CUR_UUID, &mdev->flags);
1369 } 1369 }
1370 spin_lock_irq(&mdev->req_lock); 1370 spin_lock_irq(&mdev->tconn->req_lock);
1371 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1371 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1372 spin_unlock_irq(&mdev->req_lock); 1372 spin_unlock_irq(&mdev->tconn->req_lock);
1373 } 1373 }
1374 /* case2: The connection was established again: */ 1374 /* case2: The connection was established again: */
1375 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1375 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
@@ -1380,11 +1380,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1380 } 1380 }
1381 1381
1382 if (what != NOTHING) { 1382 if (what != NOTHING) {
1383 spin_lock_irq(&mdev->req_lock); 1383 spin_lock_irq(&mdev->tconn->req_lock);
1384 _tl_restart(mdev, what); 1384 _tl_restart(mdev, what);
1385 nsm.i &= mdev->state.i; 1385 nsm.i &= mdev->state.i;
1386 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL); 1386 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1387 spin_unlock_irq(&mdev->req_lock); 1387 spin_unlock_irq(&mdev->tconn->req_lock);
1388 } 1388 }
1389 1389
1390 /* Became sync source. With protocol >= 96, we still need to send out 1390 /* Became sync source. With protocol >= 96, we still need to send out
@@ -2898,7 +2898,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
2898 int rv = 0; 2898 int rv = 0;
2899 2899
2900 mutex_lock(&drbd_main_mutex); 2900 mutex_lock(&drbd_main_mutex);
2901 spin_lock_irqsave(&mdev->req_lock, flags); 2901 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
2902 /* to have a stable mdev->state.role 2902 /* to have a stable mdev->state.role
2903 * and no race with updating open_cnt */ 2903 * and no race with updating open_cnt */
2904 2904
@@ -2911,7 +2911,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
2911 2911
2912 if (!rv) 2912 if (!rv)
2913 mdev->open_cnt++; 2913 mdev->open_cnt++;
2914 spin_unlock_irqrestore(&mdev->req_lock, flags); 2914 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
2915 mutex_unlock(&drbd_main_mutex); 2915 mutex_unlock(&drbd_main_mutex);
2916 2916
2917 return rv; 2917 return rv;
@@ -2990,7 +2990,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2990 spin_lock_init(&mdev->tconn->meta.work.q_lock); 2990 spin_lock_init(&mdev->tconn->meta.work.q_lock);
2991 2991
2992 spin_lock_init(&mdev->al_lock); 2992 spin_lock_init(&mdev->al_lock);
2993 spin_lock_init(&mdev->req_lock); 2993 spin_lock_init(&mdev->tconn->req_lock);
2994 spin_lock_init(&mdev->peer_seq_lock); 2994 spin_lock_init(&mdev->peer_seq_lock);
2995 spin_lock_init(&mdev->epoch_lock); 2995 spin_lock_init(&mdev->epoch_lock);
2996 2996
@@ -3451,7 +3451,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3451 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); 3451 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3452 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3452 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3453 blk_queue_merge_bvec(q, drbd_merge_bvec); 3453 blk_queue_merge_bvec(q, drbd_merge_bvec);
3454 q->queue_lock = &mdev->req_lock; 3454 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
3455 3455
3456 mdev->md_io_page = alloc_page(GFP_KERNEL); 3456 mdev->md_io_page = alloc_page(GFP_KERNEL);
3457 if (!mdev->md_io_page) 3457 if (!mdev->md_io_page)
@@ -3784,14 +3784,14 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3784 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); 3784 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3785 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3785 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3786 3786
3787 spin_lock_irq(&mdev->req_lock); 3787 spin_lock_irq(&mdev->tconn->req_lock);
3788 if (mdev->state.conn < C_CONNECTED) { 3788 if (mdev->state.conn < C_CONNECTED) {
3789 int peer; 3789 int peer;
3790 peer = be32_to_cpu(buffer->la_peer_max_bio_size); 3790 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3791 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); 3791 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3792 mdev->peer_max_bio_size = peer; 3792 mdev->peer_max_bio_size = peer;
3793 } 3793 }
3794 spin_unlock_irq(&mdev->req_lock); 3794 spin_unlock_irq(&mdev->tconn->req_lock);
3795 3795
3796 if (mdev->sync_conf.al_extents < 7) 3796 if (mdev->sync_conf.al_extents < 7)
3797 mdev->sync_conf.al_extents = 127; 3797 mdev->sync_conf.al_extents = 127;
@@ -4046,13 +4046,13 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4046 mdev->bm_io_work.why = why; 4046 mdev->bm_io_work.why = why;
4047 mdev->bm_io_work.flags = flags; 4047 mdev->bm_io_work.flags = flags;
4048 4048
4049 spin_lock_irq(&mdev->req_lock); 4049 spin_lock_irq(&mdev->tconn->req_lock);
4050 set_bit(BITMAP_IO, &mdev->flags); 4050 set_bit(BITMAP_IO, &mdev->flags);
4051 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 4051 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4052 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 4052 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4053 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); 4053 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
4054 } 4054 }
4055 spin_unlock_irq(&mdev->req_lock); 4055 spin_unlock_irq(&mdev->tconn->req_lock);
4056} 4056}
4057 4057
4058/** 4058/**