aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/drbd/drbd_actlog.c38
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h10
-rw-r--r--drivers/block/drbd/drbd_main.c46
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c54
-rw-r--r--drivers/block/drbd/drbd_req.c44
-rw-r--r--drivers/block/drbd/drbd_state.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c8
9 files changed, 109 insertions, 107 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 4d892b118c48..081ff42602d0 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -198,7 +198,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
198 int err; 198 int err;
199 struct page *iop = device->md_io_page; 199 struct page *iop = device->md_io_page;
200 200
201 D_ASSERT(atomic_read(&device->md_io_in_use) == 1); 201 D_ASSERT(device, atomic_read(&device->md_io_in_use) == 1);
202 202
203 BUG_ON(!bdev->md_bdev); 203 BUG_ON(!bdev->md_bdev);
204 204
@@ -264,8 +264,8 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval
264 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 264 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
265 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 265 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
266 266
267 D_ASSERT((unsigned)(last - first) <= 1); 267 D_ASSERT(device, (unsigned)(last - first) <= 1);
268 D_ASSERT(atomic_read(&device->local_cnt) > 0); 268 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
269 269
270 /* FIXME figure out a fast path for bios crossing AL extent boundaries */ 270 /* FIXME figure out a fast path for bios crossing AL extent boundaries */
271 if (first != last) 271 if (first != last)
@@ -284,8 +284,8 @@ bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *
284 unsigned enr; 284 unsigned enr;
285 bool need_transaction = false; 285 bool need_transaction = false;
286 286
287 D_ASSERT(first <= last); 287 D_ASSERT(device, first <= last);
288 D_ASSERT(atomic_read(&device->local_cnt) > 0); 288 D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
289 289
290 for (enr = first; enr <= last; enr++) { 290 for (enr = first; enr <= last; enr++) {
291 struct lc_element *al_ext; 291 struct lc_element *al_ext;
@@ -371,7 +371,7 @@ int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *
371 unsigned available_update_slots; 371 unsigned available_update_slots;
372 unsigned enr; 372 unsigned enr;
373 373
374 D_ASSERT(first <= last); 374 D_ASSERT(device, first <= last);
375 375
376 nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */ 376 nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
377 available_update_slots = min(al->nr_elements - al->used, 377 available_update_slots = min(al->nr_elements - al->used,
@@ -419,7 +419,7 @@ void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
419 struct lc_element *extent; 419 struct lc_element *extent;
420 unsigned long flags; 420 unsigned long flags;
421 421
422 D_ASSERT(first <= last); 422 D_ASSERT(device, first <= last);
423 spin_lock_irqsave(&device->al_lock, flags); 423 spin_lock_irqsave(&device->al_lock, flags);
424 424
425 for (enr = first; enr <= last; enr++) { 425 for (enr = first; enr <= last; enr++) {
@@ -648,7 +648,7 @@ void drbd_al_shrink(struct drbd_device *device)
648 struct lc_element *al_ext; 648 struct lc_element *al_ext;
649 int i; 649 int i;
650 650
651 D_ASSERT(test_bit(__LC_LOCKED, &device->act_log->flags)); 651 D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags));
652 652
653 for (i = 0; i < device->act_log->nr_elements; i++) { 653 for (i = 0; i < device->act_log->nr_elements; i++) {
654 al_ext = lc_element_by_index(device->act_log, i); 654 al_ext = lc_element_by_index(device->act_log, i);
@@ -729,7 +729,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
729 729
730 unsigned int enr; 730 unsigned int enr;
731 731
732 D_ASSERT(atomic_read(&device->local_cnt)); 732 D_ASSERT(device, atomic_read(&device->local_cnt));
733 733
734 /* I simply assume that a sector/size pair never crosses 734 /* I simply assume that a sector/size pair never crosses
735 * a 16 MB extent border. (Currently this is true...) */ 735 * a 16 MB extent border. (Currently this is true...) */
@@ -1093,8 +1093,8 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1093 e = lc_find(device->resync, device->resync_wenr); 1093 e = lc_find(device->resync, device->resync_wenr);
1094 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1094 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1095 if (bm_ext) { 1095 if (bm_ext) {
1096 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1096 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1097 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1097 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1098 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1098 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1099 device->resync_wenr = LC_FREE; 1099 device->resync_wenr = LC_FREE;
1100 if (lc_put(device->resync, &bm_ext->lce) == 0) 1100 if (lc_put(device->resync, &bm_ext->lce) == 0)
@@ -1118,7 +1118,7 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1118 * so we tried again. 1118 * so we tried again.
1119 * drop the extra reference. */ 1119 * drop the extra reference. */
1120 bm_ext->lce.refcnt--; 1120 bm_ext->lce.refcnt--;
1121 D_ASSERT(bm_ext->lce.refcnt > 0); 1121 D_ASSERT(device, bm_ext->lce.refcnt > 0);
1122 } 1122 }
1123 goto check_al; 1123 goto check_al;
1124 } else { 1124 } else {
@@ -1141,10 +1141,10 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1141 bm_ext->rs_failed = 0; 1141 bm_ext->rs_failed = 0;
1142 lc_committed(device->resync); 1142 lc_committed(device->resync);
1143 wake_up(&device->al_wait); 1143 wake_up(&device->al_wait);
1144 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); 1144 D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1145 } 1145 }
1146 set_bit(BME_NO_WRITES, &bm_ext->flags); 1146 set_bit(BME_NO_WRITES, &bm_ext->flags);
1147 D_ASSERT(bm_ext->lce.refcnt == 1); 1147 D_ASSERT(device, bm_ext->lce.refcnt == 1);
1148 device->resync_locked++; 1148 device->resync_locked++;
1149 goto check_al; 1149 goto check_al;
1150 } 1150 }
@@ -1244,8 +1244,8 @@ int drbd_rs_del_all(struct drbd_device *device)
1244 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently" 1244 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1245 " got 'synced' by application io\n", 1245 " got 'synced' by application io\n",
1246 device->resync_wenr); 1246 device->resync_wenr);
1247 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1247 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1248 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); 1248 D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags));
1249 clear_bit(BME_NO_WRITES, &bm_ext->flags); 1249 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1250 device->resync_wenr = LC_FREE; 1250 device->resync_wenr = LC_FREE;
1251 lc_put(device->resync, &bm_ext->lce); 1251 lc_put(device->resync, &bm_ext->lce);
@@ -1257,11 +1257,11 @@ int drbd_rs_del_all(struct drbd_device *device)
1257 spin_unlock_irq(&device->al_lock); 1257 spin_unlock_irq(&device->al_lock);
1258 return -EAGAIN; 1258 return -EAGAIN;
1259 } 1259 }
1260 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1260 D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags));
1261 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); 1261 D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags));
1262 lc_del(device->resync, &bm_ext->lce); 1262 lc_del(device->resync, &bm_ext->lce);
1263 } 1263 }
1264 D_ASSERT(device->resync->used == 0); 1264 D_ASSERT(device, device->resync->used == 0);
1265 put_ldev(device); 1265 put_ldev(device);
1266 } 1266 }
1267 spin_unlock_irq(&device->al_lock); 1267 spin_unlock_irq(&device->al_lock);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 232eeb7ca84c..08259c101183 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -692,7 +692,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
692 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 692 want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
693 have = b->bm_number_of_pages; 693 have = b->bm_number_of_pages;
694 if (want == have) { 694 if (want == have) {
695 D_ASSERT(b->bm_pages != NULL); 695 D_ASSERT(device, b->bm_pages != NULL);
696 npages = b->bm_pages; 696 npages = b->bm_pages;
697 } else { 697 } else {
698 if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC)) 698 if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 417241a14b3e..4dcad12581bd 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -147,8 +147,10 @@ void drbd_printk_with_wrong_object_type(void);
147#define dynamic_drbd_dbg(device, fmt, args...) \ 147#define dynamic_drbd_dbg(device, fmt, args...) \
148 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args) 148 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
149 149
150#define D_ASSERT(exp) if (!(exp)) \ 150#define D_ASSERT(device, exp) do { \
151 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 151 if (!(exp)) \
152 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
153 } while (0)
152 154
153/** 155/**
154 * expect - Make an assertion 156 * expect - Make an assertion
@@ -1863,7 +1865,7 @@ static inline void put_ldev(struct drbd_device *device)
1863 * so we must not sleep here. */ 1865 * so we must not sleep here. */
1864 1866
1865 __release(local); 1867 __release(local);
1866 D_ASSERT(i >= 0); 1868 D_ASSERT(device, i >= 0);
1867 if (i == 0) { 1869 if (i == 0) {
1868 if (device->state.disk == D_DISKLESS) 1870 if (device->state.disk == D_DISKLESS)
1869 /* even internal references gone, safe to destroy */ 1871 /* even internal references gone, safe to destroy */
@@ -2094,7 +2096,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
2094 int mxb = drbd_get_max_buffers(device); 2096 int mxb = drbd_get_max_buffers(device);
2095 int ap_bio = atomic_dec_return(&device->ap_bio_cnt); 2097 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2096 2098
2097 D_ASSERT(ap_bio >= 0); 2099 D_ASSERT(device, ap_bio >= 0);
2098 2100
2099 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2101 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2100 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 2102 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e2c8f9d7a0b..358eb3445f72 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -891,7 +891,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
891 struct p_rs_uuid *p; 891 struct p_rs_uuid *p;
892 u64 uuid; 892 u64 uuid;
893 893
894 D_ASSERT(device->state.disk == D_UP_TO_DATE); 894 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
895 895
896 uuid = device->ldev->md.uuid[UI_BITMAP]; 896 uuid = device->ldev->md.uuid[UI_BITMAP];
897 if (uuid && uuid != UUID_JUST_CREATED) 897 if (uuid && uuid != UUID_JUST_CREATED)
@@ -919,7 +919,7 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
919 unsigned int max_bio_size; 919 unsigned int max_bio_size;
920 920
921 if (get_ldev_if_state(device, D_NEGOTIATING)) { 921 if (get_ldev_if_state(device, D_NEGOTIATING)) {
922 D_ASSERT(device->ldev->backing_bdev); 922 D_ASSERT(device, device->ldev->backing_bdev);
923 d_size = drbd_get_max_capacity(device->ldev); 923 d_size = drbd_get_max_capacity(device->ldev);
924 rcu_read_lock(); 924 rcu_read_lock();
925 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; 925 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
@@ -1974,7 +1974,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1974 device->rs_mark_left[i] = 0; 1974 device->rs_mark_left[i] = 0;
1975 device->rs_mark_time[i] = 0; 1975 device->rs_mark_time[i] = 0;
1976 } 1976 }
1977 D_ASSERT(first_peer_device(device)->connection->net_conf == NULL); 1977 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
1978 1978
1979 drbd_set_my_capacity(device, 0); 1979 drbd_set_my_capacity(device, 0);
1980 if (device->bitmap) { 1980 if (device->bitmap) {
@@ -1988,16 +1988,16 @@ void drbd_device_cleanup(struct drbd_device *device)
1988 1988
1989 clear_bit(AL_SUSPENDED, &device->flags); 1989 clear_bit(AL_SUSPENDED, &device->flags);
1990 1990
1991 D_ASSERT(list_empty(&device->active_ee)); 1991 D_ASSERT(device, list_empty(&device->active_ee));
1992 D_ASSERT(list_empty(&device->sync_ee)); 1992 D_ASSERT(device, list_empty(&device->sync_ee));
1993 D_ASSERT(list_empty(&device->done_ee)); 1993 D_ASSERT(device, list_empty(&device->done_ee));
1994 D_ASSERT(list_empty(&device->read_ee)); 1994 D_ASSERT(device, list_empty(&device->read_ee));
1995 D_ASSERT(list_empty(&device->net_ee)); 1995 D_ASSERT(device, list_empty(&device->net_ee));
1996 D_ASSERT(list_empty(&device->resync_reads)); 1996 D_ASSERT(device, list_empty(&device->resync_reads));
1997 D_ASSERT(list_empty(&first_peer_device(device)->connection->sender_work.q)); 1997 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
1998 D_ASSERT(list_empty(&device->resync_work.list)); 1998 D_ASSERT(device, list_empty(&device->resync_work.list));
1999 D_ASSERT(list_empty(&device->unplug_work.list)); 1999 D_ASSERT(device, list_empty(&device->unplug_work.list));
2000 D_ASSERT(list_empty(&device->go_diskless.list)); 2000 D_ASSERT(device, list_empty(&device->go_diskless.list));
2001 2001
2002 drbd_set_defaults(device); 2002 drbd_set_defaults(device);
2003} 2003}
@@ -2014,7 +2014,7 @@ static void drbd_destroy_mempools(void)
2014 drbd_pp_vacant--; 2014 drbd_pp_vacant--;
2015 } 2015 }
2016 2016
2017 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ 2017 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2018 2018
2019 if (drbd_md_io_bio_set) 2019 if (drbd_md_io_bio_set)
2020 bioset_free(drbd_md_io_bio_set); 2020 bioset_free(drbd_md_io_bio_set);
@@ -2169,7 +2169,7 @@ void drbd_destroy_device(struct kref *kref)
2169 del_timer_sync(&device->request_timer); 2169 del_timer_sync(&device->request_timer);
2170 2170
2171 /* paranoia asserts */ 2171 /* paranoia asserts */
2172 D_ASSERT(device->open_cnt == 0); 2172 D_ASSERT(device, device->open_cnt == 0);
2173 /* end paranoia asserts */ 2173 /* end paranoia asserts */
2174 2174
2175 /* cleanup stuff that may have been allocated during 2175 /* cleanup stuff that may have been allocated during
@@ -3006,7 +3006,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
3006 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes); 3006 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3007 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k); 3007 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3008 3008
3009 D_ASSERT(drbd_md_ss(device->ldev) == device->ldev->md.md_offset); 3009 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3010 sector = device->ldev->md.md_offset; 3010 sector = device->ldev->md.md_offset;
3011 3011
3012 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { 3012 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
@@ -3459,7 +3459,7 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
3459 struct drbd_device *device = w->device; 3459 struct drbd_device *device = w->device;
3460 int rv = -EIO; 3460 int rv = -EIO;
3461 3461
3462 D_ASSERT(atomic_read(&device->ap_bio_cnt) == 0); 3462 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
3463 3463
3464 if (get_ldev(device)) { 3464 if (get_ldev(device)) {
3465 drbd_bm_lock(device, work->why, work->flags); 3465 drbd_bm_lock(device, work->why, work->flags);
@@ -3498,7 +3498,7 @@ static int w_go_diskless(struct drbd_work *w, int unused)
3498{ 3498{
3499 struct drbd_device *device = w->device; 3499 struct drbd_device *device = w->device;
3500 3500
3501 D_ASSERT(device->state.disk == D_FAILED); 3501 D_ASSERT(device, device->state.disk == D_FAILED);
3502 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3502 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3503 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch 3503 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3504 * the protected members anymore, though, so once put_ldev reaches zero 3504 * the protected members anymore, though, so once put_ldev reaches zero
@@ -3552,11 +3552,11 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3552 void (*done)(struct drbd_device *, int), 3552 void (*done)(struct drbd_device *, int),
3553 char *why, enum bm_flag flags) 3553 char *why, enum bm_flag flags)
3554{ 3554{
3555 D_ASSERT(current == first_peer_device(device)->connection->worker.task); 3555 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3556 3556
3557 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags)); 3557 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3558 D_ASSERT(!test_bit(BITMAP_IO, &device->flags)); 3558 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3559 D_ASSERT(list_empty(&device->bm_io_work.w.list)); 3559 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3560 if (device->bm_io_work.why) 3560 if (device->bm_io_work.why)
3561 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n", 3561 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3562 why, device->bm_io_work.why); 3562 why, device->bm_io_work.why);
@@ -3589,7 +3589,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
3589{ 3589{
3590 int rv; 3590 int rv;
3591 3591
3592 D_ASSERT(current != first_peer_device(device)->connection->worker.task); 3592 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3593 3593
3594 if ((flags & BM_LOCKED_SET_ALLOWED) == 0) 3594 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3595 drbd_suspend_io(device); 3595 drbd_suspend_io(device);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index af26a0b099ca..924126436a8b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -590,7 +590,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
590 590
591 if (rv == SS_NO_UP_TO_DATE_DISK && 591 if (rv == SS_NO_UP_TO_DATE_DISK &&
592 device->state.disk == D_CONSISTENT && mask.pdsk == 0) { 592 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
593 D_ASSERT(device->state.pdsk == D_UNKNOWN); 593 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
594 594
595 if (conn_try_outdate_peer(first_peer_device(device)->connection)) { 595 if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
596 val.disk = D_UP_TO_DATE; 596 val.disk = D_UP_TO_DATE;
@@ -1644,7 +1644,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1644 * Devices and memory are no longer released by error cleanup below. 1644 * Devices and memory are no longer released by error cleanup below.
1645 * now device takes over responsibility, and the state engine should 1645 * now device takes over responsibility, and the state engine should
1646 * clean it up somewhere. */ 1646 * clean it up somewhere. */
1647 D_ASSERT(device->ldev == NULL); 1647 D_ASSERT(device, device->ldev == NULL);
1648 device->ldev = nbc; 1648 device->ldev = nbc;
1649 device->resync = resync_lru; 1649 device->resync = resync_lru;
1650 device->rs_plan_s = new_plan; 1650 device->rs_plan_s = new_plan;
@@ -3011,8 +3011,8 @@ next_resource:
3011 } 3011 }
3012 3012
3013 device = peer_device->device; 3013 device = peer_device->device;
3014 D_ASSERT(device->vnr == volume); 3014 D_ASSERT(device, device->vnr == volume);
3015 D_ASSERT(first_peer_device(device)->connection == connection); 3015 D_ASSERT(device, first_peer_device(device)->connection == connection);
3016 3016
3017 dh->minor = device_to_minor(device); 3017 dh->minor = device_to_minor(device);
3018 dh->ret_code = NO_ERROR; 3018 dh->ret_code = NO_ERROR;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1de5cac5a8dd..761b15461cff 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -384,8 +384,8 @@ void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *
384 if (peer_req->flags & EE_HAS_DIGEST) 384 if (peer_req->flags & EE_HAS_DIGEST)
385 kfree(peer_req->digest); 385 kfree(peer_req->digest);
386 drbd_free_pages(device, peer_req->pages, is_net); 386 drbd_free_pages(device, peer_req->pages, is_net);
387 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0); 387 D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
388 D_ASSERT(drbd_interval_empty(&peer_req->i)); 388 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
389 mempool_free(peer_req, drbd_ee_mempool); 389 mempool_free(peer_req, drbd_ee_mempool);
390} 390}
391 391
@@ -1369,8 +1369,8 @@ next_bio:
1369 sector += len >> 9; 1369 sector += len >> 9;
1370 --nr_pages; 1370 --nr_pages;
1371 } 1371 }
1372 D_ASSERT(page == NULL); 1372 D_ASSERT(device, page == NULL);
1373 D_ASSERT(ds == 0); 1373 D_ASSERT(device, ds == 0);
1374 1374
1375 atomic_set(&peer_req->pending_bios, n_bios); 1375 atomic_set(&peer_req->pending_bios, n_bios);
1376 do { 1376 do {
@@ -1624,7 +1624,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1624 device->recv_cnt += data_size>>9; 1624 device->recv_cnt += data_size>>9;
1625 1625
1626 bio = req->master_bio; 1626 bio = req->master_bio;
1627 D_ASSERT(sector == bio->bi_iter.bi_sector); 1627 D_ASSERT(device, sector == bio->bi_iter.bi_sector);
1628 1628
1629 bio_for_each_segment(bvec, bio, iter) { 1629 bio_for_each_segment(bvec, bio, iter) {
1630 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; 1630 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
@@ -1644,7 +1644,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1644 } 1644 }
1645 } 1645 }
1646 1646
1647 D_ASSERT(data_size == 0); 1647 D_ASSERT(device, data_size == 0);
1648 return 0; 1648 return 0;
1649} 1649}
1650 1650
@@ -1660,7 +1660,7 @@ static int e_end_resync_block(struct drbd_work *w, int unused)
1660 sector_t sector = peer_req->i.sector; 1660 sector_t sector = peer_req->i.sector;
1661 int err; 1661 int err;
1662 1662
1663 D_ASSERT(drbd_interval_empty(&peer_req->i)); 1663 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
1664 1664
1665 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { 1665 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1666 drbd_set_in_sync(device, sector, peer_req->i.size); 1666 drbd_set_in_sync(device, sector, peer_req->i.size);
@@ -1774,7 +1774,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
1774 return -EIO; 1774 return -EIO;
1775 1775
1776 sector = be64_to_cpu(p->sector); 1776 sector = be64_to_cpu(p->sector);
1777 D_ASSERT(p->block_id == ID_SYNCER); 1777 D_ASSERT(device, p->block_id == ID_SYNCER);
1778 1778
1779 if (get_ldev(device)) { 1779 if (get_ldev(device)) {
1780 /* data is submitted to disk within recv_resync_read. 1780 /* data is submitted to disk within recv_resync_read.
@@ -1845,13 +1845,13 @@ static int e_end_block(struct drbd_work *w, int cancel)
1845 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1845 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1846 if (peer_req->flags & EE_IN_INTERVAL_TREE) { 1846 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1847 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1847 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1848 D_ASSERT(!drbd_interval_empty(&peer_req->i)); 1848 D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
1849 drbd_remove_epoch_entry_interval(device, peer_req); 1849 drbd_remove_epoch_entry_interval(device, peer_req);
1850 if (peer_req->flags & EE_RESTART_REQUESTS) 1850 if (peer_req->flags & EE_RESTART_REQUESTS)
1851 restart_conflicting_writes(device, sector, peer_req->i.size); 1851 restart_conflicting_writes(device, sector, peer_req->i.size);
1852 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1852 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1853 } else 1853 } else
1854 D_ASSERT(drbd_interval_empty(&peer_req->i)); 1854 D_ASSERT(device, drbd_interval_empty(&peer_req->i));
1855 1855
1856 drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1856 drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1857 1857
@@ -2197,8 +2197,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2197 dp_flags = be32_to_cpu(p->dp_flags); 2197 dp_flags = be32_to_cpu(p->dp_flags);
2198 rw |= wire_flags_to_bio(device, dp_flags); 2198 rw |= wire_flags_to_bio(device, dp_flags);
2199 if (peer_req->pages == NULL) { 2199 if (peer_req->pages == NULL) {
2200 D_ASSERT(peer_req->i.size == 0); 2200 D_ASSERT(device, peer_req->i.size == 0);
2201 D_ASSERT(dp_flags & DP_FLUSH); 2201 D_ASSERT(device, dp_flags & DP_FLUSH);
2202 } 2202 }
2203 2203
2204 if (dp_flags & DP_MAY_SET_IN_SYNC) 2204 if (dp_flags & DP_MAY_SET_IN_SYNC)
@@ -2461,7 +2461,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2461 goto out_free_e; 2461 goto out_free_e;
2462 2462
2463 if (pi->cmd == P_CSUM_RS_REQUEST) { 2463 if (pi->cmd == P_CSUM_RS_REQUEST) {
2464 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89); 2464 D_ASSERT(device, first_peer_device(device)->connection->agreed_pro_version >= 89);
2465 peer_req->w.cb = w_e_end_csum_rs_req; 2465 peer_req->w.cb = w_e_end_csum_rs_req;
2466 /* used in the sector offset progress display */ 2466 /* used in the sector offset progress display */
2467 device->bm_resync_fo = BM_SECT_TO_BIT(sector); 2467 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -3357,11 +3357,11 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3357 } else if (apv <= 94) { 3357 } else if (apv <= 94) {
3358 header_size = sizeof(struct p_rs_param_89); 3358 header_size = sizeof(struct p_rs_param_89);
3359 data_size = pi->size - header_size; 3359 data_size = pi->size - header_size;
3360 D_ASSERT(data_size == 0); 3360 D_ASSERT(device, data_size == 0);
3361 } else { 3361 } else {
3362 header_size = sizeof(struct p_rs_param_95); 3362 header_size = sizeof(struct p_rs_param_95);
3363 data_size = pi->size - header_size; 3363 data_size = pi->size - header_size;
3364 D_ASSERT(data_size == 0); 3364 D_ASSERT(device, data_size == 0);
3365 } 3365 }
3366 3366
3367 /* initialize verify_alg and csums_alg */ 3367 /* initialize verify_alg and csums_alg */
@@ -3404,14 +3404,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3404 goto reconnect; 3404 goto reconnect;
3405 /* we expect NUL terminated string */ 3405 /* we expect NUL terminated string */
3406 /* but just in case someone tries to be evil */ 3406 /* but just in case someone tries to be evil */
3407 D_ASSERT(p->verify_alg[data_size-1] == 0); 3407 D_ASSERT(device, p->verify_alg[data_size-1] == 0);
3408 p->verify_alg[data_size-1] = 0; 3408 p->verify_alg[data_size-1] = 0;
3409 3409
3410 } else /* apv >= 89 */ { 3410 } else /* apv >= 89 */ {
3411 /* we still expect NUL terminated strings */ 3411 /* we still expect NUL terminated strings */
3412 /* but just in case someone tries to be evil */ 3412 /* but just in case someone tries to be evil */
3413 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); 3413 D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3414 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); 3414 D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3415 p->verify_alg[SHARED_SECRET_MAX-1] = 0; 3415 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3416 p->csums_alg[SHARED_SECRET_MAX-1] = 0; 3416 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3417 } 3417 }
@@ -3945,7 +3945,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3945 } else { 3945 } else {
3946 if (test_and_clear_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags)) 3946 if (test_and_clear_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags))
3947 return -EIO; 3947 return -EIO;
3948 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3948 D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
3949 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3949 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3950 return -EIO; 3950 return -EIO;
3951 } 3951 }
@@ -4016,7 +4016,7 @@ static int receive_sync_uuid(struct drbd_connection *connection, struct packet_i
4016 device->state.conn < C_CONNECTED || 4016 device->state.conn < C_CONNECTED ||
4017 device->state.disk < D_NEGOTIATING); 4017 device->state.disk < D_NEGOTIATING);
4018 4018
4019 /* D_ASSERT( device->state.conn == C_WF_SYNC_UUID ); */ 4019 /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */
4020 4020
4021 /* Here the _drbd_uuid_ functions are right, current should 4021 /* Here the _drbd_uuid_ functions are right, current should
4022 _not_ be rotated into the history */ 4022 _not_ be rotated into the history */
@@ -4293,7 +4293,7 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4293 goto out; 4293 goto out;
4294 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 4294 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4295 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 4295 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4296 D_ASSERT(rv == SS_SUCCESS); 4296 D_ASSERT(device, rv == SS_SUCCESS);
4297 } else if (device->state.conn != C_WF_BITMAP_S) { 4297 } else if (device->state.conn != C_WF_BITMAP_S) {
4298 /* admin may have requested C_DISCONNECTING, 4298 /* admin may have requested C_DISCONNECTING,
4299 * other threads may have noticed network errors */ 4299 * other threads may have noticed network errors */
@@ -4569,10 +4569,10 @@ static int drbd_disconnected(struct drbd_device *device)
4569 if (i) 4569 if (i)
4570 drbd_info(device, "pp_in_use = %d, expected 0\n", i); 4570 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
4571 4571
4572 D_ASSERT(list_empty(&device->read_ee)); 4572 D_ASSERT(device, list_empty(&device->read_ee));
4573 D_ASSERT(list_empty(&device->active_ee)); 4573 D_ASSERT(device, list_empty(&device->active_ee));
4574 D_ASSERT(list_empty(&device->sync_ee)); 4574 D_ASSERT(device, list_empty(&device->sync_ee));
4575 D_ASSERT(list_empty(&device->done_ee)); 4575 D_ASSERT(device, list_empty(&device->done_ee));
4576 4576
4577 return 0; 4577 return 0;
4578} 4578}
@@ -4902,7 +4902,7 @@ static int got_RqSReply(struct drbd_connection *connection, struct packet_info *
4902 return -EIO; 4902 return -EIO;
4903 4903
4904 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) { 4904 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
4905 D_ASSERT(connection->agreed_pro_version < 100); 4905 D_ASSERT(device, connection->agreed_pro_version < 100);
4906 return got_conn_RqSReply(connection, pi); 4906 return got_conn_RqSReply(connection, pi);
4907 } 4907 }
4908 4908
@@ -4945,7 +4945,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
4945 if (!device) 4945 if (!device)
4946 return -EIO; 4946 return -EIO;
4947 4947
4948 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89); 4948 D_ASSERT(device, first_peer_device(device)->connection->agreed_pro_version >= 89);
4949 4949
4950 update_peer_seq(device, be32_to_cpu(p->seq_num)); 4950 update_peer_seq(device, be32_to_cpu(p->seq_num));
4951 4951
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 17fade0118ff..e772b523ebba 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -307,7 +307,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
307static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) 307static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
308{ 308{
309 struct drbd_device *device = req->w.device; 309 struct drbd_device *device = req->w.device;
310 D_ASSERT(m || (req->rq_state & RQ_POSTPONED)); 310 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
311 311
312 if (!atomic_sub_and_test(put, &req->completion_ref)) 312 if (!atomic_sub_and_test(put, &req->completion_ref))
313 return 0; 313 return 0;
@@ -374,7 +374,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
374 ++c_put; 374 ++c_put;
375 375
376 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { 376 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
377 D_ASSERT(req->rq_state & RQ_LOCAL_PENDING); 377 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
378 /* local completion may still come in later, 378 /* local completion may still come in later,
379 * we need to keep the req object around. */ 379 * we need to keep the req object around. */
380 kref_get(&req->kref); 380 kref_get(&req->kref);
@@ -475,7 +475,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
475 case TO_BE_SENT: /* via network */ 475 case TO_BE_SENT: /* via network */
476 /* reached via __drbd_make_request 476 /* reached via __drbd_make_request
477 * and from w_read_retry_remote */ 477 * and from w_read_retry_remote */
478 D_ASSERT(!(req->rq_state & RQ_NET_MASK)); 478 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
479 rcu_read_lock(); 479 rcu_read_lock();
480 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 480 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
481 p = nc->wire_protocol; 481 p = nc->wire_protocol;
@@ -488,7 +488,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
488 488
489 case TO_BE_SUBMITTED: /* locally */ 489 case TO_BE_SUBMITTED: /* locally */
490 /* reached via __drbd_make_request */ 490 /* reached via __drbd_make_request */
491 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); 491 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
492 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); 492 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
493 break; 493 break;
494 494
@@ -533,13 +533,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
533 /* So we can verify the handle in the answer packet. 533 /* So we can verify the handle in the answer packet.
534 * Corresponding drbd_remove_request_interval is in 534 * Corresponding drbd_remove_request_interval is in
535 * drbd_req_complete() */ 535 * drbd_req_complete() */
536 D_ASSERT(drbd_interval_empty(&req->i)); 536 D_ASSERT(device, drbd_interval_empty(&req->i));
537 drbd_insert_interval(&device->read_requests, &req->i); 537 drbd_insert_interval(&device->read_requests, &req->i);
538 538
539 set_bit(UNPLUG_REMOTE, &device->flags); 539 set_bit(UNPLUG_REMOTE, &device->flags);
540 540
541 D_ASSERT(req->rq_state & RQ_NET_PENDING); 541 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
542 D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); 542 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
543 mod_rq_state(req, m, 0, RQ_NET_QUEUED); 543 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
544 req->w.cb = w_send_read_req; 544 req->w.cb = w_send_read_req;
545 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 545 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
@@ -551,7 +551,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
551 551
552 /* Corresponding drbd_remove_request_interval is in 552 /* Corresponding drbd_remove_request_interval is in
553 * drbd_req_complete() */ 553 * drbd_req_complete() */
554 D_ASSERT(drbd_interval_empty(&req->i)); 554 D_ASSERT(device, drbd_interval_empty(&req->i));
555 drbd_insert_interval(&device->write_requests, &req->i); 555 drbd_insert_interval(&device->write_requests, &req->i);
556 556
557 /* NOTE 557 /* NOTE
@@ -574,7 +574,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
574 set_bit(UNPLUG_REMOTE, &device->flags); 574 set_bit(UNPLUG_REMOTE, &device->flags);
575 575
576 /* queue work item to send data */ 576 /* queue work item to send data */
577 D_ASSERT(req->rq_state & RQ_NET_PENDING); 577 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); 578 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
579 req->w.cb = w_send_dblock; 579 req->w.cb = w_send_dblock;
580 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w); 580 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &req->w);
@@ -640,15 +640,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
640 * If this request had been marked as RQ_POSTPONED before, 640 * If this request had been marked as RQ_POSTPONED before,
641 * it will actually not be completed, but "restarted", 641 * it will actually not be completed, but "restarted",
642 * resubmitted from the retry worker context. */ 642 * resubmitted from the retry worker context. */
643 D_ASSERT(req->rq_state & RQ_NET_PENDING); 643 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
644 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); 644 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
645 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); 645 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
646 break; 646 break;
647 647
648 case WRITE_ACKED_BY_PEER_AND_SIS: 648 case WRITE_ACKED_BY_PEER_AND_SIS:
649 req->rq_state |= RQ_NET_SIS; 649 req->rq_state |= RQ_NET_SIS;
650 case WRITE_ACKED_BY_PEER: 650 case WRITE_ACKED_BY_PEER:
651 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); 651 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
652 /* protocol C; successfully written on peer. 652 /* protocol C; successfully written on peer.
653 * Nothing more to do here. 653 * Nothing more to do here.
654 * We want to keep the tl in place for all protocols, to cater 654 * We want to keep the tl in place for all protocols, to cater
@@ -656,22 +656,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
656 656
657 goto ack_common; 657 goto ack_common;
658 case RECV_ACKED_BY_PEER: 658 case RECV_ACKED_BY_PEER:
659 D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK); 659 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
660 /* protocol B; pretends to be successfully written on peer. 660 /* protocol B; pretends to be successfully written on peer.
661 * see also notes above in HANDED_OVER_TO_NETWORK about 661 * see also notes above in HANDED_OVER_TO_NETWORK about
662 * protocol != C */ 662 * protocol != C */
663 ack_common: 663 ack_common:
664 D_ASSERT(req->rq_state & RQ_NET_PENDING); 664 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
665 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); 665 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
666 break; 666 break;
667 667
668 case POSTPONE_WRITE: 668 case POSTPONE_WRITE:
669 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK); 669 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
670 /* If this node has already detected the write conflict, the 670 /* If this node has already detected the write conflict, the
671 * worker will be waiting on misc_wait. Wake it up once this 671 * worker will be waiting on misc_wait. Wake it up once this
672 * request has completed locally. 672 * request has completed locally.
673 */ 673 */
674 D_ASSERT(req->rq_state & RQ_NET_PENDING); 674 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
675 req->rq_state |= RQ_POSTPONED; 675 req->rq_state |= RQ_POSTPONED;
676 if (req->i.waiting) 676 if (req->i.waiting)
677 wake_up(&device->misc_wait); 677 wake_up(&device->misc_wait);
@@ -752,7 +752,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
752 break; 752 break;
753 753
754 case DATA_RECEIVED: 754 case DATA_RECEIVED:
755 D_ASSERT(req->rq_state & RQ_NET_PENDING); 755 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
756 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); 756 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
757 break; 757 break;
758 758
@@ -783,8 +783,8 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
783 return false; 783 return false;
784 esector = sector + (size >> 9) - 1; 784 esector = sector + (size >> 9) - 1;
785 nr_sectors = drbd_get_capacity(device->this_bdev); 785 nr_sectors = drbd_get_capacity(device->this_bdev);
786 D_ASSERT(sector < nr_sectors); 786 D_ASSERT(device, sector < nr_sectors);
787 D_ASSERT(esector < nr_sectors); 787 D_ASSERT(device, esector < nr_sectors);
788 788
789 sbnr = BM_SECT_TO_BIT(sector); 789 sbnr = BM_SECT_TO_BIT(sector);
790 ebnr = BM_SECT_TO_BIT(esector); 790 ebnr = BM_SECT_TO_BIT(esector);
@@ -974,7 +974,7 @@ static int drbd_process_write_request(struct drbd_request *req)
974 * replicating, in which case there is no point. */ 974 * replicating, in which case there is no point. */
975 if (unlikely(req->i.size == 0)) { 975 if (unlikely(req->i.size == 0)) {
976 /* The only size==0 bios we expect are empty flushes. */ 976 /* The only size==0 bios we expect are empty flushes. */
977 D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH); 977 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
978 if (remote) 978 if (remote)
979 _req_mod(req, QUEUE_AS_DRBD_BARRIER); 979 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
980 return remote; 980 return remote;
@@ -983,7 +983,7 @@ static int drbd_process_write_request(struct drbd_request *req)
983 if (!remote && !send_oos) 983 if (!remote && !send_oos)
984 return 0; 984 return 0;
985 985
986 D_ASSERT(!(remote && send_oos)); 986 D_ASSERT(device, !(remote && send_oos));
987 987
988 if (remote) { 988 if (remote) {
989 _req_mod(req, TO_BE_SENT); 989 _req_mod(req, TO_BE_SENT);
@@ -1281,7 +1281,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1281 /* 1281 /*
1282 * what we "blindly" assume: 1282 * what we "blindly" assume:
1283 */ 1283 */
1284 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512)); 1284 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1285 1285
1286 inc_ap_bio(device); 1286 inc_ap_bio(device);
1287 __drbd_make_request(device, bio, start_time); 1287 __drbd_make_request(device, bio, start_time);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index e66f725ff169..79d0ea26f373 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -376,7 +376,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
376 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 376 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
377 377
378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
379 D_ASSERT(current != first_peer_device(device)->connection->worker.task); 379 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
380 wait_for_completion(&done); 380 wait_for_completion(&done);
381 } 381 }
382 382
@@ -1163,7 +1163,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
1163 1163
1164 after_state_ch(device, ascw->os, ascw->ns, ascw->flags); 1164 after_state_ch(device, ascw->os, ascw->ns, ascw->flags);
1165 if (ascw->flags & CS_WAIT_COMPLETE) { 1165 if (ascw->flags & CS_WAIT_COMPLETE) {
1166 D_ASSERT(ascw->done != NULL); 1166 D_ASSERT(device, ascw->done != NULL);
1167 complete(ascw->done); 1167 complete(ascw->done);
1168 } 1168 }
1169 kfree(ascw); 1169 kfree(ascw);
@@ -1195,7 +1195,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
1195{ 1195{
1196 int rv; 1196 int rv;
1197 1197
1198 D_ASSERT(current == first_peer_device(device)->connection->worker.task); 1198 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
1199 1199
1200 /* open coded non-blocking drbd_suspend_io(device); */ 1200 /* open coded non-blocking drbd_suspend_io(device); */
1201 set_bit(SUSPEND_IO, &device->flags); 1201 set_bit(SUSPEND_IO, &device->flags);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 53be1eaa95de..db63b1ff4b35 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -881,7 +881,7 @@ int drbd_resync_finished(struct drbd_device *device)
881 khelper_cmd = "out-of-sync"; 881 khelper_cmd = "out-of-sync";
882 } 882 }
883 } else { 883 } else {
884 D_ASSERT((n_oos - device->rs_failed) == 0); 884 D_ASSERT(device, (n_oos - device->rs_failed) == 0);
885 885
886 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) 886 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
887 khelper_cmd = "after-resync-target"; 887 khelper_cmd = "after-resync-target";
@@ -1099,7 +1099,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1099 * introducing more locking mechanisms */ 1099 * introducing more locking mechanisms */
1100 if (first_peer_device(device)->connection->csums_tfm) { 1100 if (first_peer_device(device)->connection->csums_tfm) {
1101 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm); 1101 digest_size = crypto_hash_digestsize(first_peer_device(device)->connection->csums_tfm);
1102 D_ASSERT(digest_size == di->digest_size); 1102 D_ASSERT(device, digest_size == di->digest_size);
1103 digest = kmalloc(digest_size, GFP_NOIO); 1103 digest = kmalloc(digest_size, GFP_NOIO);
1104 } 1104 }
1105 if (digest) { 1105 if (digest) {
@@ -1223,7 +1223,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1223 if (digest) { 1223 if (digest) {
1224 drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest); 1224 drbd_csum_ee(device, first_peer_device(device)->connection->verify_tfm, peer_req, digest);
1225 1225
1226 D_ASSERT(digest_size == di->digest_size); 1226 D_ASSERT(device, digest_size == di->digest_size);
1227 eq = !memcmp(digest, di->digest, digest_size); 1227 eq = !memcmp(digest, di->digest, digest_size);
1228 kfree(digest); 1228 kfree(digest);
1229 } 1229 }
@@ -1936,7 +1936,7 @@ int drbd_worker(struct drbd_thread *thi)
1936 rcu_read_lock(); 1936 rcu_read_lock();
1937 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 1937 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1938 struct drbd_device *device = peer_device->device; 1938 struct drbd_device *device = peer_device->device;
1939 D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); 1939 D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
1940 kref_get(&device->kref); 1940 kref_get(&device->kref);
1941 rcu_read_unlock(); 1941 rcu_read_unlock();
1942 drbd_device_cleanup(device); 1942 drbd_device_cleanup(device);