diff options
author | Andreas Gruenbacher <agruen@linbit.com> | 2011-06-27 10:23:33 -0400 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2014-02-17 10:45:04 -0500 |
commit | 0b0ba1efc7b887bc2bd767ef822979fe2dae620e (patch) | |
tree | cb87bd0b135803615bd76eebb63ddc72bf797b20 /drivers/block/drbd/drbd_req.c | |
parent | 1ec861ebd0889263841b822ee3f3eb49caf23656 (diff) |
drbd: Add explicit device parameter to D_ASSERT
The implicit dependency on a variable inside the macro is problematic.
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com>
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 17fade0118ff..e772b523ebba 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -307,7 +307,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) | |||
307 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) | 307 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
308 | { | 308 | { |
309 | struct drbd_device *device = req->w.device; | 309 | struct drbd_device *device = req->w.device; |
310 | D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); | 310 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
311 | 311 | ||
312 | if (!atomic_sub_and_test(put, &req->completion_ref)) | 312 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
313 | return 0; | 313 | return 0; |
@@ -374,7 +374,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, | |||
374 | ++c_put; | 374 | ++c_put; |
375 | 375 | ||
376 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { | 376 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { |
377 | D_ASSERT(req->rq_state & RQ_LOCAL_PENDING); | 377 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); |
378 | /* local completion may still come in later, | 378 | /* local completion may still come in later, |
379 | * we need to keep the req object around. */ | 379 | * we need to keep the req object around. */ |
380 | kref_get(&req->kref); | 380 | kref_get(&req->kref); |
@@ -475,7 +475,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
475 | case TO_BE_SENT: /* via network */ | 475 | case TO_BE_SENT: /* via network */ |
476 | /* reached via __drbd_make_request | 476 | /* reached via __drbd_make_request |
477 | * and from w_read_retry_remote */ | 477 | * and from w_read_retry_remote */ |
478 | D_ASSERT(!(req->rq_state & RQ_NET_MASK)); | 478 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
479 | rcu_read_lock(); | 479 | rcu_read_lock(); |
480 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); | 480 | nc = rcu_dereference(first_peer_device(device)->connection->net_conf); |
481 | p = nc->wire_protocol; | 481 | p = nc->wire_protocol; |
@@ -488,7 +488,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
488 | 488 | ||
489 | case TO_BE_SUBMITTED: /* locally */ | 489 | case TO_BE_SUBMITTED: /* locally */ |
490 | /* reached via __drbd_make_request */ | 490 | /* reached via __drbd_make_request */ |
491 | D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); | 491 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); |
492 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); | 492 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
493 | break; | 493 | break; |
494 | 494 | ||
@@ -533,13 +533,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
533 | /* So we can verify the handle in the answer packet. | 533 | /* So we can verify the handle in the answer packet. |
534 | * Corresponding drbd_remove_request_interval is in | 534 | * Corresponding drbd_remove_request_interval is in |
535 | * drbd_req_complete() */ | 535 | * drbd_req_complete() */ |
536 | D_ASSERT(drbd_interval_empty(&req->i)); | 536 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
537 | drbd_insert_interval(&device->read_requests, &req->i); | 537 | drbd_insert_interval(&device->read_requests, &req->i); |
538 | 538 | ||
539 | set_bit(UNPLUG_REMOTE, &device->flags); | 539 | set_bit(UNPLUG_REMOTE, &device->flags); |
540 | 540 | ||
541 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 541 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
542 | D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); | 542 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); |
543 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); | 543 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
544 | req->w.cb = w_send_read_req; | 544 | req->w.cb = w_send_read_req; |
545 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); | 545 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); |
@@ -551,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
551 | 551 | ||
552 | /* Corresponding drbd_remove_request_interval is in | 552 | /* Corresponding drbd_remove_request_interval is in |
553 | * drbd_req_complete() */ | 553 | * drbd_req_complete() */ |
554 | D_ASSERT(drbd_interval_empty(&req->i)); | 554 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
555 | drbd_insert_interval(&device->write_requests, &req->i); | 555 | drbd_insert_interval(&device->write_requests, &req->i); |
556 | 556 | ||
557 | /* NOTE | 557 | /* NOTE |
@@ -574,7 +574,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
574 | set_bit(UNPLUG_REMOTE, &device->flags); | 574 | set_bit(UNPLUG_REMOTE, &device->flags); |
575 | 575 | ||
576 | /* queue work item to send data */ | 576 | /* queue work item to send data */ |
577 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 577 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
578 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); | 578 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
579 | req->w.cb = w_send_dblock; | 579 | req->w.cb = w_send_dblock; |
580 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); | 580 | drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); |
@@ -640,15 +640,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
640 | * If this request had been marked as RQ_POSTPONED before, | 640 | * If this request had been marked as RQ_POSTPONED before, |
641 | * it will actually not be completed, but "restarted", | 641 | * it will actually not be completed, but "restarted", |
642 | * resubmitted from the retry worker context. */ | 642 | * resubmitted from the retry worker context. */ |
643 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 643 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
644 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); | 644 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
645 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); | 645 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); |
646 | break; | 646 | break; |
647 | 647 | ||
648 | case WRITE_ACKED_BY_PEER_AND_SIS: | 648 | case WRITE_ACKED_BY_PEER_AND_SIS: |
649 | req->rq_state |= RQ_NET_SIS; | 649 | req->rq_state |= RQ_NET_SIS; |
650 | case WRITE_ACKED_BY_PEER: | 650 | case WRITE_ACKED_BY_PEER: |
651 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); | 651 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
652 | /* protocol C; successfully written on peer. | 652 | /* protocol C; successfully written on peer. |
653 | * Nothing more to do here. | 653 | * Nothing more to do here. |
654 | * We want to keep the tl in place for all protocols, to cater | 654 | * We want to keep the tl in place for all protocols, to cater |
@@ -656,22 +656,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
656 | 656 | ||
657 | goto ack_common; | 657 | goto ack_common; |
658 | case RECV_ACKED_BY_PEER: | 658 | case RECV_ACKED_BY_PEER: |
659 | D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); | 659 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); |
660 | /* protocol B; pretends to be successfully written on peer. | 660 | /* protocol B; pretends to be successfully written on peer. |
661 | * see also notes above in HANDED_OVER_TO_NETWORK about | 661 | * see also notes above in HANDED_OVER_TO_NETWORK about |
662 | * protocol != C */ | 662 | * protocol != C */ |
663 | ack_common: | 663 | ack_common: |
664 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 664 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
665 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); | 665 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
666 | break; | 666 | break; |
667 | 667 | ||
668 | case POSTPONE_WRITE: | 668 | case POSTPONE_WRITE: |
669 | D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); | 669 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
670 | /* If this node has already detected the write conflict, the | 670 | /* If this node has already detected the write conflict, the |
671 | * worker will be waiting on misc_wait. Wake it up once this | 671 | * worker will be waiting on misc_wait. Wake it up once this |
672 | * request has completed locally. | 672 | * request has completed locally. |
673 | */ | 673 | */ |
674 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 674 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
675 | req->rq_state |= RQ_POSTPONED; | 675 | req->rq_state |= RQ_POSTPONED; |
676 | if (req->i.waiting) | 676 | if (req->i.waiting) |
677 | wake_up(&device->misc_wait); | 677 | wake_up(&device->misc_wait); |
@@ -752,7 +752,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, | |||
752 | break; | 752 | break; |
753 | 753 | ||
754 | case DATA_RECEIVED: | 754 | case DATA_RECEIVED: |
755 | D_ASSERT(req->rq_state & RQ_NET_PENDING); | 755 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
756 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); | 756 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
757 | break; | 757 | break; |
758 | 758 | ||
@@ -783,8 +783,8 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, | |||
783 | return false; | 783 | return false; |
784 | esector = sector + (size >> 9) - 1; | 784 | esector = sector + (size >> 9) - 1; |
785 | nr_sectors = drbd_get_capacity(device->this_bdev); | 785 | nr_sectors = drbd_get_capacity(device->this_bdev); |
786 | D_ASSERT(sector < nr_sectors); | 786 | D_ASSERT(device, sector < nr_sectors); |
787 | D_ASSERT(esector < nr_sectors); | 787 | D_ASSERT(device, esector < nr_sectors); |
788 | 788 | ||
789 | sbnr = BM_SECT_TO_BIT(sector); | 789 | sbnr = BM_SECT_TO_BIT(sector); |
790 | ebnr = BM_SECT_TO_BIT(esector); | 790 | ebnr = BM_SECT_TO_BIT(esector); |
@@ -974,7 +974,7 @@ static int drbd_process_write_request(struct drbd_request *req) | |||
974 | * replicating, in which case there is no point. */ | 974 | * replicating, in which case there is no point. */ |
975 | if (unlikely(req->i.size == 0)) { | 975 | if (unlikely(req->i.size == 0)) { |
976 | /* The only size==0 bios we expect are empty flushes. */ | 976 | /* The only size==0 bios we expect are empty flushes. */ |
977 | D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH); | 977 | D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); |
978 | if (remote) | 978 | if (remote) |
979 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); | 979 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
980 | return remote; | 980 | return remote; |
@@ -983,7 +983,7 @@ static int drbd_process_write_request(struct drbd_request *req) | |||
983 | if (!remote && !send_oos) | 983 | if (!remote && !send_oos) |
984 | return 0; | 984 | return 0; |
985 | 985 | ||
986 | D_ASSERT(!(remote && send_oos)); | 986 | D_ASSERT(device, !(remote && send_oos)); |
987 | 987 | ||
988 | if (remote) { | 988 | if (remote) { |
989 | _req_mod(req, TO_BE_SENT); | 989 | _req_mod(req, TO_BE_SENT); |
@@ -1281,7 +1281,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1281 | /* | 1281 | /* |
1282 | * what we "blindly" assume: | 1282 | * what we "blindly" assume: |
1283 | */ | 1283 | */ |
1284 | D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); | 1284 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); |
1285 | 1285 | ||
1286 | inc_ap_bio(device); | 1286 | inc_ap_bio(device); |
1287 | __drbd_make_request(device, bio, start_time); | 1287 | __drbd_make_request(device, bio, start_time); |