aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-07-03 11:53:52 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:45:01 -0500
commitd01801710265cfb7bd8928ae7c3be4d9d15ceeb0 (patch)
tree37aeb2c599a8d92ba9f70346a213968544493327
parentc06ece6ba6f1bb2e01616e111303c3ae5f80fdbe (diff)
drbd: Remove the terrible DEV hack
DRBD was using dev_err() and similar all over the code; instead of having to write dev_err(disk_to_dev(device->vdisk), ...) to convert a drbd_device into a kernel device, a DEV macro was used which implicitly references the device variable. This is terrible; introduce separate drbd_err() and similar macros with an explicit device parameter instead. Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_actlog.c54
-rw-r--r--drivers/block/drbd/drbd_bitmap.c58
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c62
-rw-r--r--drivers/block/drbd/drbd_nl.c74
-rw-r--r--drivers/block/drbd/drbd_receiver.c180
-rw-r--r--drivers/block/drbd/drbd_req.c28
-rw-r--r--drivers/block/drbd/drbd_state.c30
-rw-r--r--drivers/block/drbd/drbd_worker.c56
9 files changed, 298 insertions, 283 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 4af4dc166373..4d892b118c48 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -137,7 +137,7 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
137 dt = wait_event_timeout(device->misc_wait, 137 dt = wait_event_timeout(device->misc_wait,
138 *done || test_bit(FORCE_DETACH, &device->flags), dt); 138 *done || test_bit(FORCE_DETACH, &device->flags), dt);
139 if (dt == 0) { 139 if (dt == 0) {
140 dev_err(DEV, "meta-data IO operation timed out\n"); 140 drbd_err(device, "meta-data IO operation timed out\n");
141 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH); 141 drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH);
142 } 142 }
143} 143}
@@ -172,7 +172,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
172 ; 172 ;
173 else if (!get_ldev_if_state(device, D_ATTACHING)) { 173 else if (!get_ldev_if_state(device, D_ATTACHING)) {
174 /* Corresponding put_ldev in drbd_md_io_complete() */ 174 /* Corresponding put_ldev in drbd_md_io_complete() */
175 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); 175 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
176 err = -ENODEV; 176 err = -ENODEV;
177 goto out; 177 goto out;
178 } 178 }
@@ -202,21 +202,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
202 202
203 BUG_ON(!bdev->md_bdev); 203 BUG_ON(!bdev->md_bdev);
204 204
205 dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", 205 drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
206 current->comm, current->pid, __func__, 206 current->comm, current->pid, __func__,
207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", 207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
208 (void*)_RET_IP_ ); 208 (void*)_RET_IP_ );
209 209
210 if (sector < drbd_md_first_sector(bdev) || 210 if (sector < drbd_md_first_sector(bdev) ||
211 sector + 7 > drbd_md_last_sector(bdev)) 211 sector + 7 > drbd_md_last_sector(bdev))
212 dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", 212 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
213 current->comm, current->pid, __func__, 213 current->comm, current->pid, __func__,
214 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); 214 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
215 215
216 /* we do all our meta data IO in aligned 4k blocks. */ 216 /* we do all our meta data IO in aligned 4k blocks. */
217 err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096); 217 err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096);
218 if (err) { 218 if (err) {
219 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", 219 drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
220 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); 220 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
221 } 221 }
222 return err; 222 return err;
@@ -404,7 +404,7 @@ int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *
404 struct lc_element *al_ext; 404 struct lc_element *al_ext;
405 al_ext = lc_get_cumulative(device->act_log, enr); 405 al_ext = lc_get_cumulative(device->act_log, enr);
406 if (!al_ext) 406 if (!al_ext)
407 dev_info(DEV, "LOGIC BUG for enr=%u\n", enr); 407 drbd_info(device, "LOGIC BUG for enr=%u\n", enr);
408 } 408 }
409 return 0; 409 return 0;
410} 410}
@@ -425,7 +425,7 @@ void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
425 for (enr = first; enr <= last; enr++) { 425 for (enr = first; enr <= last; enr++) {
426 extent = lc_find(device->act_log, enr); 426 extent = lc_find(device->act_log, enr);
427 if (!extent) { 427 if (!extent) {
428 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); 428 drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
429 continue; 429 continue;
430 } 430 }
431 lc_put(device->act_log, extent); 431 lc_put(device->act_log, extent);
@@ -491,14 +491,14 @@ _al_write_transaction(struct drbd_device *device)
491 int err = 0; 491 int err = 0;
492 492
493 if (!get_ldev(device)) { 493 if (!get_ldev(device)) {
494 dev_err(DEV, "disk is %s, cannot start al transaction\n", 494 drbd_err(device, "disk is %s, cannot start al transaction\n",
495 drbd_disk_str(device->state.disk)); 495 drbd_disk_str(device->state.disk));
496 return -EIO; 496 return -EIO;
497 } 497 }
498 498
499 /* The bitmap write may have failed, causing a state change. */ 499 /* The bitmap write may have failed, causing a state change. */
500 if (device->state.disk < D_INCONSISTENT) { 500 if (device->state.disk < D_INCONSISTENT) {
501 dev_err(DEV, 501 drbd_err(device,
502 "disk is %s, cannot write al transaction\n", 502 "disk is %s, cannot write al transaction\n",
503 drbd_disk_str(device->state.disk)); 503 drbd_disk_str(device->state.disk));
504 put_ldev(device); 504 put_ldev(device);
@@ -507,7 +507,7 @@ _al_write_transaction(struct drbd_device *device)
507 507
508 buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */ 508 buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */
509 if (!buffer) { 509 if (!buffer) {
510 dev_err(DEV, "disk failed while waiting for md_io buffer\n"); 510 drbd_err(device, "disk failed while waiting for md_io buffer\n");
511 put_ldev(device); 511 put_ldev(device);
512 return -ENODEV; 512 return -ENODEV;
513 } 513 }
@@ -689,7 +689,7 @@ static int w_update_odbm(struct drbd_work *w, int unused)
689 689
690 if (!get_ldev(device)) { 690 if (!get_ldev(device)) {
691 if (__ratelimit(&drbd_ratelimit_state)) 691 if (__ratelimit(&drbd_ratelimit_state))
692 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); 692 drbd_warn(device, "Can not update on disk bitmap, local IO disabled.\n");
693 kfree(udw); 693 kfree(udw);
694 return 0; 694 return 0;
695 } 695 }
@@ -744,7 +744,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
744 else 744 else
745 ext->rs_failed += count; 745 ext->rs_failed += count;
746 if (ext->rs_left < ext->rs_failed) { 746 if (ext->rs_left < ext->rs_failed) {
747 dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d " 747 drbd_warn(device, "BAD! sector=%llus enr=%u rs_left=%d "
748 "rs_failed=%d count=%d cstate=%s\n", 748 "rs_failed=%d count=%d cstate=%s\n",
749 (unsigned long long)sector, 749 (unsigned long long)sector,
750 ext->lce.lc_number, ext->rs_left, 750 ext->lce.lc_number, ext->rs_left,
@@ -768,14 +768,14 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
768 */ 768 */
769 int rs_left = drbd_bm_e_weight(device, enr); 769 int rs_left = drbd_bm_e_weight(device, enr);
770 if (ext->flags != 0) { 770 if (ext->flags != 0) {
771 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" 771 drbd_warn(device, "changing resync lce: %d[%u;%02lx]"
772 " -> %d[%u;00]\n", 772 " -> %d[%u;00]\n",
773 ext->lce.lc_number, ext->rs_left, 773 ext->lce.lc_number, ext->rs_left,
774 ext->flags, enr, rs_left); 774 ext->flags, enr, rs_left);
775 ext->flags = 0; 775 ext->flags = 0;
776 } 776 }
777 if (ext->rs_failed) { 777 if (ext->rs_failed) {
778 dev_warn(DEV, "Kicking resync_lru element enr=%u " 778 drbd_warn(device, "Kicking resync_lru element enr=%u "
779 "out with rs_failed=%d\n", 779 "out with rs_failed=%d\n",
780 ext->lce.lc_number, ext->rs_failed); 780 ext->lce.lc_number, ext->rs_failed);
781 } 781 }
@@ -798,11 +798,11 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
798 udw->w.device = device; 798 udw->w.device = device;
799 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &udw->w); 799 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &udw->w);
800 } else { 800 } else {
801 dev_warn(DEV, "Could not kmalloc an udw\n"); 801 drbd_warn(device, "Could not kmalloc an udw\n");
802 } 802 }
803 } 803 }
804 } else { 804 } else {
805 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", 805 drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n",
806 device->resync_locked, 806 device->resync_locked,
807 device->resync->nr_elements, 807 device->resync->nr_elements,
808 device->resync->flags); 808 device->resync->flags);
@@ -843,7 +843,7 @@ void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size,
843 unsigned long flags; 843 unsigned long flags;
844 844
845 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 845 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
846 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", 846 drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
847 (unsigned long long)sector, size); 847 (unsigned long long)sector, size);
848 return; 848 return;
849 } 849 }
@@ -917,7 +917,7 @@ int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size
917 return 0; 917 return 0;
918 918
919 if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 919 if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
920 dev_err(DEV, "sector: %llus, size: %d\n", 920 drbd_err(device, "sector: %llus, size: %d\n",
921 (unsigned long long)sector, size); 921 (unsigned long long)sector, size);
922 return 0; 922 return 0;
923 } 923 }
@@ -988,7 +988,7 @@ struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr)
988 988
989 if (!bm_ext) { 989 if (!bm_ext) {
990 if (rs_flags & LC_STARVING) 990 if (rs_flags & LC_STARVING)
991 dev_warn(DEV, "Have to wait for element" 991 drbd_warn(device, "Have to wait for element"
992 " (resync LRU too small?)\n"); 992 " (resync LRU too small?)\n");
993 BUG_ON(rs_flags & LC_LOCKED); 993 BUG_ON(rs_flags & LC_LOCKED);
994 } 994 }
@@ -1049,7 +1049,7 @@ retry:
1049 if (schedule_timeout_interruptible(HZ/10)) 1049 if (schedule_timeout_interruptible(HZ/10))
1050 return -EINTR; 1050 return -EINTR;
1051 if (sa && --sa == 0) 1051 if (sa && --sa == 0)
1052 dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec." 1052 drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec."
1053 "Resync stalled?\n"); 1053 "Resync stalled?\n");
1054 goto retry; 1054 goto retry;
1055 } 1055 }
@@ -1101,7 +1101,7 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1101 device->resync_locked--; 1101 device->resync_locked--;
1102 wake_up(&device->al_wait); 1102 wake_up(&device->al_wait);
1103 } else { 1103 } else {
1104 dev_alert(DEV, "LOGIC BUG\n"); 1104 drbd_alert(device, "LOGIC BUG\n");
1105 } 1105 }
1106 } 1106 }
1107 /* TRY. */ 1107 /* TRY. */
@@ -1131,7 +1131,7 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector)
1131 if (!bm_ext) { 1131 if (!bm_ext) {
1132 const unsigned long rs_flags = device->resync->flags; 1132 const unsigned long rs_flags = device->resync->flags;
1133 if (rs_flags & LC_STARVING) 1133 if (rs_flags & LC_STARVING)
1134 dev_warn(DEV, "Have to wait for element" 1134 drbd_warn(device, "Have to wait for element"
1135 " (resync LRU too small?)\n"); 1135 " (resync LRU too small?)\n");
1136 BUG_ON(rs_flags & LC_LOCKED); 1136 BUG_ON(rs_flags & LC_LOCKED);
1137 goto try_again; 1137 goto try_again;
@@ -1179,13 +1179,13 @@ void drbd_rs_complete_io(struct drbd_device *device, sector_t sector)
1179 if (!bm_ext) { 1179 if (!bm_ext) {
1180 spin_unlock_irqrestore(&device->al_lock, flags); 1180 spin_unlock_irqrestore(&device->al_lock, flags);
1181 if (__ratelimit(&drbd_ratelimit_state)) 1181 if (__ratelimit(&drbd_ratelimit_state))
1182 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); 1182 drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n");
1183 return; 1183 return;
1184 } 1184 }
1185 1185
1186 if (bm_ext->lce.refcnt == 0) { 1186 if (bm_ext->lce.refcnt == 0) {
1187 spin_unlock_irqrestore(&device->al_lock, flags); 1187 spin_unlock_irqrestore(&device->al_lock, flags);
1188 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " 1188 drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, "
1189 "but refcnt is 0!?\n", 1189 "but refcnt is 0!?\n",
1190 (unsigned long long)sector, enr); 1190 (unsigned long long)sector, enr);
1191 return; 1191 return;
@@ -1241,7 +1241,7 @@ int drbd_rs_del_all(struct drbd_device *device)
1241 if (bm_ext->lce.lc_number == LC_FREE) 1241 if (bm_ext->lce.lc_number == LC_FREE)
1242 continue; 1242 continue;
1243 if (bm_ext->lce.lc_number == device->resync_wenr) { 1243 if (bm_ext->lce.lc_number == device->resync_wenr) {
1244 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" 1244 drbd_info(device, "dropping %u in drbd_rs_del_all, apparently"
1245 " got 'synced' by application io\n", 1245 " got 'synced' by application io\n",
1246 device->resync_wenr); 1246 device->resync_wenr);
1247 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); 1247 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
@@ -1251,7 +1251,7 @@ int drbd_rs_del_all(struct drbd_device *device)
1251 lc_put(device->resync, &bm_ext->lce); 1251 lc_put(device->resync, &bm_ext->lce);
1252 } 1252 }
1253 if (bm_ext->lce.refcnt != 0) { 1253 if (bm_ext->lce.refcnt != 0) {
1254 dev_info(DEV, "Retrying drbd_rs_del_all() later. " 1254 drbd_info(device, "Retrying drbd_rs_del_all() later. "
1255 "refcnt=%d\n", bm_ext->lce.refcnt); 1255 "refcnt=%d\n", bm_ext->lce.refcnt);
1256 put_ldev(device); 1256 put_ldev(device);
1257 spin_unlock_irq(&device->al_lock); 1257 spin_unlock_irq(&device->al_lock);
@@ -1285,7 +1285,7 @@ void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size)
1285 int wake_up = 0; 1285 int wake_up = 0;
1286 1286
1287 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 1287 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
1288 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1288 drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1289 (unsigned long long)sector, size); 1289 (unsigned long long)sector, size);
1290 return; 1290 return;
1291 } 1291 }
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index cb8e64978b8e..232eeb7ca84c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -118,7 +118,7 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func)
118 struct drbd_bitmap *b = device->bitmap; 118 struct drbd_bitmap *b = device->bitmap;
119 if (!__ratelimit(&drbd_ratelimit_state)) 119 if (!__ratelimit(&drbd_ratelimit_state))
120 return; 120 return;
121 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 121 drbd_err(device, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122 drbd_task_to_thread_name(first_peer_device(device)->connection, current), 122 drbd_task_to_thread_name(first_peer_device(device)->connection, current),
123 func, b->bm_why ?: "?", 123 func, b->bm_why ?: "?",
124 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task)); 124 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task));
@@ -130,21 +130,21 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
130 int trylock_failed; 130 int trylock_failed;
131 131
132 if (!b) { 132 if (!b) {
133 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 133 drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
134 return; 134 return;
135 } 135 }
136 136
137 trylock_failed = !mutex_trylock(&b->bm_change); 137 trylock_failed = !mutex_trylock(&b->bm_change);
138 138
139 if (trylock_failed) { 139 if (trylock_failed) {
140 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 140 drbd_warn(device, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141 drbd_task_to_thread_name(first_peer_device(device)->connection, current), 141 drbd_task_to_thread_name(first_peer_device(device)->connection, current),
142 why, b->bm_why ?: "?", 142 why, b->bm_why ?: "?",
143 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task)); 143 drbd_task_to_thread_name(first_peer_device(device)->connection, b->bm_task));
144 mutex_lock(&b->bm_change); 144 mutex_lock(&b->bm_change);
145 } 145 }
146 if (BM_LOCKED_MASK & b->bm_flags) 146 if (BM_LOCKED_MASK & b->bm_flags)
147 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 147 drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
148 b->bm_flags |= flags & BM_LOCKED_MASK; 148 b->bm_flags |= flags & BM_LOCKED_MASK;
149 149
150 b->bm_why = why; 150 b->bm_why = why;
@@ -155,12 +155,12 @@ void drbd_bm_unlock(struct drbd_device *device)
155{ 155{
156 struct drbd_bitmap *b = device->bitmap; 156 struct drbd_bitmap *b = device->bitmap;
157 if (!b) { 157 if (!b) {
158 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 158 drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
159 return; 159 return;
160 } 160 }
161 161
162 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags)) 162 if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
163 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 163 drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
164 164
165 b->bm_flags &= ~BM_LOCKED_MASK; 165 b->bm_flags &= ~BM_LOCKED_MASK;
166 b->bm_why = NULL; 166 b->bm_why = NULL;
@@ -253,7 +253,7 @@ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
253{ 253{
254 struct page *page; 254 struct page *page;
255 if (page_nr >= device->bitmap->bm_number_of_pages) { 255 if (page_nr >= device->bitmap->bm_number_of_pages) {
256 dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n", 256 drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
257 page_nr, (int)device->bitmap->bm_number_of_pages); 257 page_nr, (int)device->bitmap->bm_number_of_pages);
258 return; 258 return;
259 } 259 }
@@ -645,7 +645,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
645 645
646 drbd_bm_lock(device, "resize", BM_LOCKED_MASK); 646 drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
647 647
648 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 648 drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
649 (unsigned long long)capacity); 649 (unsigned long long)capacity);
650 650
651 if (capacity == b->bm_dev_capacity) 651 if (capacity == b->bm_dev_capacity)
@@ -682,8 +682,8 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
682 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev); 682 u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
683 put_ldev(device); 683 put_ldev(device);
684 if (bits > bits_on_disk) { 684 if (bits > bits_on_disk) {
685 dev_info(DEV, "bits = %lu\n", bits); 685 drbd_info(device, "bits = %lu\n", bits);
686 dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); 686 drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
687 err = -ENOSPC; 687 err = -ENOSPC;
688 goto out; 688 goto out;
689 } 689 }
@@ -742,7 +742,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
742 bm_vk_free(opages, opages_vmalloced); 742 bm_vk_free(opages, opages_vmalloced);
743 if (!growing) 743 if (!growing)
744 b->bm_set = bm_count_bits(b); 744 b->bm_set = bm_count_bits(b);
745 dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 745 drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
746 746
747 out: 747 out:
748 drbd_bm_unlock(device); 748 drbd_bm_unlock(device);
@@ -878,7 +878,7 @@ void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
878 if ((offset >= b->bm_words) || 878 if ((offset >= b->bm_words) ||
879 (end > b->bm_words) || 879 (end > b->bm_words) ||
880 (number <= 0)) 880 (number <= 0))
881 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 881 drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
882 (unsigned long) offset, 882 (unsigned long) offset,
883 (unsigned long) number, 883 (unsigned long) number,
884 (unsigned long) b->bm_words); 884 (unsigned long) b->bm_words);
@@ -966,7 +966,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
966 966
967 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 967 if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
968 !bm_test_page_unchanged(b->bm_pages[idx])) 968 !bm_test_page_unchanged(b->bm_pages[idx]))
969 dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); 969 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
970 970
971 if (error) { 971 if (error) {
972 /* ctx error will hold the completed-last non-zero error code, 972 /* ctx error will hold the completed-last non-zero error code,
@@ -976,11 +976,11 @@ static void bm_async_io_complete(struct bio *bio, int error)
976 /* Not identical to on disk version of it. 976 /* Not identical to on disk version of it.
977 * Is BM_PAGE_IO_ERROR enough? */ 977 * Is BM_PAGE_IO_ERROR enough? */
978 if (__ratelimit(&drbd_ratelimit_state)) 978 if (__ratelimit(&drbd_ratelimit_state))
979 dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", 979 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
980 error, idx); 980 error, idx);
981 } else { 981 } else {
982 bm_clear_page_io_err(b->bm_pages[idx]); 982 bm_clear_page_io_err(b->bm_pages[idx]);
983 dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); 983 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
984 } 984 }
985 985
986 bm_page_unlock_io(device, idx); 986 bm_page_unlock_io(device, idx);
@@ -1081,7 +1081,7 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la
1081 }; 1081 };
1082 1082
1083 if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 1083 if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
1084 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); 1084 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1085 kfree(ctx); 1085 kfree(ctx);
1086 return -ENODEV; 1086 return -ENODEV;
1087 } 1087 }
@@ -1106,14 +1106,14 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la
1106 1106
1107 if (!(flags & BM_WRITE_ALL_PAGES) && 1107 if (!(flags & BM_WRITE_ALL_PAGES) &&
1108 bm_test_page_unchanged(b->bm_pages[i])) { 1108 bm_test_page_unchanged(b->bm_pages[i])) {
1109 dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 1109 dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
1110 continue; 1110 continue;
1111 } 1111 }
1112 /* during lazy writeout, 1112 /* during lazy writeout,
1113 * ignore those pages not marked for lazy writeout. */ 1113 * ignore those pages not marked for lazy writeout. */
1114 if (lazy_writeout_upper_idx && 1114 if (lazy_writeout_upper_idx &&
1115 !bm_test_page_lazy_writeout(b->bm_pages[i])) { 1115 !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1116 dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); 1116 dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
1117 continue; 1117 continue;
1118 } 1118 }
1119 } 1119 }
@@ -1138,12 +1138,12 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la
1138 1138
1139 /* summary for global bitmap IO */ 1139 /* summary for global bitmap IO */
1140 if (flags == 0) 1140 if (flags == 0)
1141 dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 1141 drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
1142 rw == WRITE ? "WRITE" : "READ", 1142 rw == WRITE ? "WRITE" : "READ",
1143 count, jiffies - now); 1143 count, jiffies - now);
1144 1144
1145 if (ctx->error) { 1145 if (ctx->error) {
1146 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 1146 drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1147 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 1147 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1148 err = -EIO; /* ctx->error ? */ 1148 err = -EIO; /* ctx->error ? */
1149 } 1149 }
@@ -1156,13 +1156,13 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la
1156 drbd_md_flush(device); 1156 drbd_md_flush(device);
1157 } else /* rw == READ */ { 1157 } else /* rw == READ */ {
1158 b->bm_set = bm_count_bits(b); 1158 b->bm_set = bm_count_bits(b);
1159 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 1159 drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1160 jiffies - now); 1160 jiffies - now);
1161 } 1161 }
1162 now = b->bm_set; 1162 now = b->bm_set;
1163 1163
1164 if (flags == 0) 1164 if (flags == 0)
1165 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 1165 drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1166 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 1166 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1167 1167
1168 kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1168 kref_put(&ctx->kref, &bm_aio_ctx_destroy);
@@ -1243,7 +1243,7 @@ int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold
1243 int err; 1243 int err;
1244 1244
1245 if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) { 1245 if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
1246 dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 1246 dynamic_drbd_dbg(device, "skipped bm page write for idx %u\n", idx);
1247 return 0; 1247 return 0;
1248 } 1248 }
1249 1249
@@ -1261,7 +1261,7 @@ int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold
1261 }; 1261 };
1262 1262
1263 if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 1263 if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
1264 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); 1264 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
1265 kfree(ctx); 1265 kfree(ctx);
1266 return -ENODEV; 1266 return -ENODEV;
1267 } 1267 }
@@ -1298,7 +1298,7 @@ static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm
1298 1298
1299 1299
1300 if (bm_fo > b->bm_bits) { 1300 if (bm_fo > b->bm_bits) {
1301 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 1301 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1302 bm_fo = DRBD_END_OF_BITMAP; 1302 bm_fo = DRBD_END_OF_BITMAP;
1303 } else { 1303 } else {
1304 while (bm_fo < b->bm_bits) { 1304 while (bm_fo < b->bm_bits) {
@@ -1393,7 +1393,7 @@ static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s
1393 int changed_total = 0; 1393 int changed_total = 0;
1394 1394
1395 if (e >= b->bm_bits) { 1395 if (e >= b->bm_bits) {
1396 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1396 drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1397 s, e, b->bm_bits); 1397 s, e, b->bm_bits);
1398 e = b->bm_bits ? b->bm_bits -1 : 0; 1398 e = b->bm_bits ? b->bm_bits -1 : 0;
1399 } 1399 }
@@ -1596,7 +1596,7 @@ int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1596 } else if (bitnr == b->bm_bits) { 1596 } else if (bitnr == b->bm_bits) {
1597 i = -1; 1597 i = -1;
1598 } else { /* (bitnr > b->bm_bits) */ 1598 } else { /* (bitnr > b->bm_bits) */
1599 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1599 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1600 i = 0; 1600 i = 0;
1601 } 1601 }
1602 1602
@@ -1637,7 +1637,7 @@ int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const
1637 if (expect(bitnr < b->bm_bits)) 1637 if (expect(bitnr < b->bm_bits))
1638 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1638 c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1639 else 1639 else
1640 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1640 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1641 } 1641 }
1642 if (p_addr) 1642 if (p_addr)
1643 bm_unmap(p_addr); 1643 bm_unmap(p_addr);
@@ -1687,7 +1687,7 @@ int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1687 count += hweight_long(*bm++); 1687 count += hweight_long(*bm++);
1688 bm_unmap(p_addr); 1688 bm_unmap(p_addr);
1689 } else { 1689 } else {
1690 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1690 drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1691 } 1691 }
1692 spin_unlock_irqrestore(&b->bm_lock, flags); 1692 spin_unlock_irqrestore(&b->bm_lock, flags);
1693 return count; 1693 return count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 41dced195653..d393f0bc26fb 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -100,9 +100,24 @@ extern char usermode_helper[];
100struct drbd_device; 100struct drbd_device;
101struct drbd_connection; 101struct drbd_connection;
102 102
103 103#define drbd_printk(level, device, fmt, args...) \
104/* to shorten dev_warn(DEV, "msg"); and relatives statements */ 104 dev_printk(level, disk_to_dev(device->vdisk), fmt, ## args)
105#define DEV (disk_to_dev(device->vdisk)) 105
106#define drbd_dbg(device, fmt, args...) \
107 drbd_printk(KERN_DEBUG, device, fmt, ## args)
108#define drbd_alert(device, fmt, args...) \
109 drbd_printk(KERN_ALERT, device, fmt, ## args)
110#define drbd_err(device, fmt, args...) \
111 drbd_printk(KERN_ERR, device, fmt, ## args)
112#define drbd_warn(device, fmt, args...) \
113 drbd_printk(KERN_WARNING, device, fmt, ## args)
114#define drbd_info(device, fmt, args...) \
115 drbd_printk(KERN_INFO, device, fmt, ## args)
116#define drbd_emerg(device, fmt, args...) \
117 drbd_printk(KERN_EMERG, device, fmt, ## args)
118
119#define dynamic_drbd_dbg(device, fmt, args...) \
120 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
106 121
107#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \ 122#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
108 printk(LEVEL "d-con %s: " FMT, TCONN->resource->name , ## ARGS) 123 printk(LEVEL "d-con %s: " FMT, TCONN->resource->name , ## ARGS)
@@ -115,7 +130,7 @@ struct drbd_connection;
115#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS) 130#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
116 131
117#define D_ASSERT(exp) if (!(exp)) \ 132#define D_ASSERT(exp) if (!(exp)) \
118 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 133 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
119 134
120/** 135/**
121 * expect - Make an assertion 136 * expect - Make an assertion
@@ -125,7 +140,7 @@ struct drbd_connection;
125#define expect(exp) ({ \ 140#define expect(exp) ({ \
126 bool _bool = (exp); \ 141 bool _bool = (exp); \
127 if (!_bool) \ 142 if (!_bool) \
128 dev_err(DEV, "ASSERTION %s FAILED in %s\n", \ 143 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
129 #exp, __func__); \ 144 #exp, __func__); \
130 _bool; \ 145 _bool; \
131 }) 146 })
@@ -1278,7 +1293,7 @@ extern void drbd_rs_controller_reset(struct drbd_device *device);
1278static inline void ov_out_of_sync_print(struct drbd_device *device) 1293static inline void ov_out_of_sync_print(struct drbd_device *device)
1279{ 1294{
1280 if (device->ov_last_oos_size) { 1295 if (device->ov_last_oos_size) {
1281 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", 1296 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1282 (unsigned long long)device->ov_last_oos_start, 1297 (unsigned long long)device->ov_last_oos_start,
1283 (unsigned long)device->ov_last_oos_size); 1298 (unsigned long)device->ov_last_oos_size);
1284 } 1299 }
@@ -1504,7 +1519,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
1504 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */ 1519 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1505 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) { 1520 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1506 if (__ratelimit(&drbd_ratelimit_state)) 1521 if (__ratelimit(&drbd_ratelimit_state))
1507 dev_err(DEV, "Local IO failed in %s.\n", where); 1522 drbd_err(device, "Local IO failed in %s.\n", where);
1508 if (device->state.disk > D_INCONSISTENT) 1523 if (device->state.disk > D_INCONSISTENT)
1509 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); 1524 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1510 break; 1525 break;
@@ -1539,7 +1554,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
1539 set_bit(FORCE_DETACH, &device->flags); 1554 set_bit(FORCE_DETACH, &device->flags);
1540 if (device->state.disk > D_FAILED) { 1555 if (device->state.disk > D_FAILED) {
1541 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL); 1556 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1542 dev_err(DEV, 1557 drbd_err(device,
1543 "Local IO failed in %s. Detaching...\n", where); 1558 "Local IO failed in %s. Detaching...\n", where);
1544 } 1559 }
1545 break; 1560 break;
@@ -1755,7 +1770,7 @@ static inline void inc_ap_pending(struct drbd_device *device)
1755 1770
1756#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \ 1771#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
1757 if (atomic_read(&device->which) < 0) \ 1772 if (atomic_read(&device->which) < 0) \
1758 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ 1773 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
1759 func, line, \ 1774 func, line, \
1760 atomic_read(&device->which)) 1775 atomic_read(&device->which))
1761 1776
@@ -1888,7 +1903,7 @@ static inline void drbd_get_syncer_progress(struct drbd_device *device,
1888 * for now, just prevent in-kernel buffer overflow. 1903 * for now, just prevent in-kernel buffer overflow.
1889 */ 1904 */
1890 smp_rmb(); 1905 smp_rmb();
1891 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", 1906 drbd_warn(device, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
1892 drbd_conn_str(device->state.conn), 1907 drbd_conn_str(device->state.conn),
1893 *bits_left, device->rs_total, device->rs_failed); 1908 *bits_left, device->rs_total, device->rs_failed);
1894 *per_mil_done = 0; 1909 *per_mil_done = 0;
@@ -2103,7 +2118,7 @@ static inline void drbd_md_flush(struct drbd_device *device)
2103 int r; 2118 int r;
2104 2119
2105 if (device->ldev == NULL) { 2120 if (device->ldev == NULL) {
2106 dev_warn(DEV, "device->ldev == NULL in drbd_md_flush\n"); 2121 drbd_warn(device, "device->ldev == NULL in drbd_md_flush\n");
2107 return; 2122 return;
2108 } 2123 }
2109 2124
@@ -2113,7 +2128,7 @@ static inline void drbd_md_flush(struct drbd_device *device)
2113 r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL); 2128 r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL);
2114 if (r) { 2129 if (r) {
2115 set_bit(MD_NO_FUA, &device->flags); 2130 set_bit(MD_NO_FUA, &device->flags);
2116 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2131 drbd_err(device, "meta data flush failed with status %d, disabling md-flushes\n", r);
2117 } 2132 }
2118} 2133}
2119 2134
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 3a67f2421077..6e64e6e45e35 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -871,7 +871,7 @@ void drbd_print_uuids(struct drbd_device *device, const char *text)
871{ 871{
872 if (get_ldev_if_state(device, D_NEGOTIATING)) { 872 if (get_ldev_if_state(device, D_NEGOTIATING)) {
873 u64 *uuid = device->ldev->md.uuid; 873 u64 *uuid = device->ldev->md.uuid;
874 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", 874 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
875 text, 875 text,
876 (unsigned long long)uuid[UI_CURRENT], 876 (unsigned long long)uuid[UI_CURRENT],
877 (unsigned long long)uuid[UI_BITMAP], 877 (unsigned long long)uuid[UI_BITMAP],
@@ -879,7 +879,7 @@ void drbd_print_uuids(struct drbd_device *device, const char *text)
879 (unsigned long long)uuid[UI_HISTORY_END]); 879 (unsigned long long)uuid[UI_HISTORY_END]);
880 put_ldev(device); 880 put_ldev(device);
881 } else { 881 } else {
882 dev_info(DEV, "%s effective data uuid: %016llX\n", 882 drbd_info(device, "%s effective data uuid: %016llX\n",
883 text, 883 text,
884 (unsigned long long)device->ed_uuid); 884 (unsigned long long)device->ed_uuid);
885 } 885 }
@@ -1126,7 +1126,7 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1126 /* paranoia: catch zero runlength. 1126 /* paranoia: catch zero runlength.
1127 * can only happen if bitmap is modified while we scan it. */ 1127 * can only happen if bitmap is modified while we scan it. */
1128 if (rl == 0) { 1128 if (rl == 0) {
1129 dev_err(DEV, "unexpected zero runlength while encoding bitmap " 1129 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1130 "t:%u bo:%lu\n", toggle, c->bit_offset); 1130 "t:%u bo:%lu\n", toggle, c->bit_offset);
1131 return -1; 1131 return -1;
1132 } 1132 }
@@ -1135,7 +1135,7 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
1135 if (bits == -ENOBUFS) /* buffer full */ 1135 if (bits == -ENOBUFS) /* buffer full */
1136 break; 1136 break;
1137 if (bits <= 0) { 1137 if (bits <= 0) {
1138 dev_err(DEV, "error while encoding bitmap: %d\n", bits); 1138 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1139 return 0; 1139 return 0;
1140 } 1140 }
1141 1141
@@ -1238,13 +1238,13 @@ static int _drbd_send_bitmap(struct drbd_device *device)
1238 1238
1239 if (get_ldev(device)) { 1239 if (get_ldev(device)) {
1240 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) { 1240 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1241 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n"); 1241 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1242 drbd_bm_set_all(device); 1242 drbd_bm_set_all(device);
1243 if (drbd_bm_write(device)) { 1243 if (drbd_bm_write(device)) {
1244 /* write_bm did fail! Leave full sync flag set in Meta P_DATA 1244 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1245 * but otherwise process as per normal - need to tell other 1245 * but otherwise process as per normal - need to tell other
1246 * side that a full resync is required! */ 1246 * side that a full resync is required! */
1247 dev_err(DEV, "Failed to write bitmap to disk!\n"); 1247 drbd_err(device, "Failed to write bitmap to disk!\n");
1248 } else { 1248 } else {
1249 drbd_md_clear_flag(device, MDF_FULL_SYNC); 1249 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1250 drbd_md_sync(device); 1250 drbd_md_sync(device);
@@ -1517,7 +1517,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
1517 break; 1517 break;
1518 continue; 1518 continue;
1519 } 1519 }
1520 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n", 1520 drbd_warn(device, "%s: size=%d len=%d sent=%d\n",
1521 __func__, (int)size, len, sent); 1521 __func__, (int)size, len, sent);
1522 if (sent < 0) 1522 if (sent < 0)
1523 err = sent; 1523 err = sent;
@@ -1663,7 +1663,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
1663 unsigned char digest[64]; 1663 unsigned char digest[64];
1664 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, digest); 1664 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, digest);
1665 if (memcmp(p + 1, digest, dgs)) { 1665 if (memcmp(p + 1, digest, dgs)) {
1666 dev_warn(DEV, 1666 drbd_warn(device,
1667 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1667 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1668 (unsigned long long)req->i.sector, req->i.size); 1668 (unsigned long long)req->i.sector, req->i.size);
1669 } 1669 }
@@ -1955,7 +1955,7 @@ void drbd_device_cleanup(struct drbd_device *device)
1955{ 1955{
1956 int i; 1956 int i;
1957 if (first_peer_device(device)->connection->receiver.t_state != NONE) 1957 if (first_peer_device(device)->connection->receiver.t_state != NONE)
1958 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", 1958 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1959 first_peer_device(device)->connection->receiver.t_state); 1959 first_peer_device(device)->connection->receiver.t_state);
1960 1960
1961 device->al_writ_cnt = 1961 device->al_writ_cnt =
@@ -2140,23 +2140,23 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
2140 2140
2141 rr = drbd_free_peer_reqs(device, &device->active_ee); 2141 rr = drbd_free_peer_reqs(device, &device->active_ee);
2142 if (rr) 2142 if (rr)
2143 dev_err(DEV, "%d EEs in active list found!\n", rr); 2143 drbd_err(device, "%d EEs in active list found!\n", rr);
2144 2144
2145 rr = drbd_free_peer_reqs(device, &device->sync_ee); 2145 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2146 if (rr) 2146 if (rr)
2147 dev_err(DEV, "%d EEs in sync list found!\n", rr); 2147 drbd_err(device, "%d EEs in sync list found!\n", rr);
2148 2148
2149 rr = drbd_free_peer_reqs(device, &device->read_ee); 2149 rr = drbd_free_peer_reqs(device, &device->read_ee);
2150 if (rr) 2150 if (rr)
2151 dev_err(DEV, "%d EEs in read list found!\n", rr); 2151 drbd_err(device, "%d EEs in read list found!\n", rr);
2152 2152
2153 rr = drbd_free_peer_reqs(device, &device->done_ee); 2153 rr = drbd_free_peer_reqs(device, &device->done_ee);
2154 if (rr) 2154 if (rr)
2155 dev_err(DEV, "%d EEs in done list found!\n", rr); 2155 drbd_err(device, "%d EEs in done list found!\n", rr);
2156 2156
2157 rr = drbd_free_peer_reqs(device, &device->net_ee); 2157 rr = drbd_free_peer_reqs(device, &device->net_ee);
2158 if (rr) 2158 if (rr)
2159 dev_err(DEV, "%d EEs in net list found!\n", rr); 2159 drbd_err(device, "%d EEs in net list found!\n", rr);
2160} 2160}
2161 2161
2162/* caution. no locking. */ 2162/* caution. no locking. */
@@ -2237,7 +2237,7 @@ static void do_retry(struct work_struct *ws)
2237 (req->rq_state & RQ_LOCAL_ABORTED) != 0); 2237 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2238 2238
2239 if (!expected) 2239 if (!expected)
2240 dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n", 2240 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2241 req, atomic_read(&req->completion_ref), 2241 req, atomic_read(&req->completion_ref),
2242 req->rq_state); 2242 req->rq_state);
2243 2243
@@ -3011,7 +3011,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
3011 3011
3012 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { 3012 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
3013 /* this was a try anyways ... */ 3013 /* this was a try anyways ... */
3014 dev_err(DEV, "meta data update failed!\n"); 3014 drbd_err(device, "meta data update failed!\n");
3015 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 3015 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3016 } 3016 }
3017} 3017}
@@ -3093,7 +3093,7 @@ static int check_activity_log_stripe_size(struct drbd_device *device,
3093 3093
3094 return 0; 3094 return 0;
3095err: 3095err:
3096 dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", 3096 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3097 al_stripes, al_stripe_size_4k); 3097 al_stripes, al_stripe_size_4k);
3098 return -EINVAL; 3098 return -EINVAL;
3099} 3099}
@@ -3165,7 +3165,7 @@ static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backi
3165 return 0; 3165 return 0;
3166 3166
3167err: 3167err:
3168 dev_err(DEV, "meta data offsets don't make sense: idx=%d " 3168 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3169 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, " 3169 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3170 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n", 3170 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3171 in_core->meta_dev_idx, 3171 in_core->meta_dev_idx,
@@ -3210,7 +3210,7 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3210 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { 3210 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
3211 /* NOTE: can't do normal error processing here as this is 3211 /* NOTE: can't do normal error processing here as this is
3212 called BEFORE disk is attached */ 3212 called BEFORE disk is attached */
3213 dev_err(DEV, "Error while reading metadata.\n"); 3213 drbd_err(device, "Error while reading metadata.\n");
3214 rv = ERR_IO_MD_DISK; 3214 rv = ERR_IO_MD_DISK;
3215 goto err; 3215 goto err;
3216 } 3216 }
@@ -3220,7 +3220,7 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3220 if (magic == DRBD_MD_MAGIC_84_UNCLEAN || 3220 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3221 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) { 3221 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3222 /* btw: that's Activity Log clean, not "all" clean. */ 3222 /* btw: that's Activity Log clean, not "all" clean. */
3223 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); 3223 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3224 rv = ERR_MD_UNCLEAN; 3224 rv = ERR_MD_UNCLEAN;
3225 goto err; 3225 goto err;
3226 } 3226 }
@@ -3228,14 +3228,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3228 rv = ERR_MD_INVALID; 3228 rv = ERR_MD_INVALID;
3229 if (magic != DRBD_MD_MAGIC_08) { 3229 if (magic != DRBD_MD_MAGIC_08) {
3230 if (magic == DRBD_MD_MAGIC_07) 3230 if (magic == DRBD_MD_MAGIC_07)
3231 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); 3231 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3232 else 3232 else
3233 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); 3233 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3234 goto err; 3234 goto err;
3235 } 3235 }
3236 3236
3237 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { 3237 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3238 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", 3238 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3239 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); 3239 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3240 goto err; 3240 goto err;
3241 } 3241 }
@@ -3258,12 +3258,12 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3258 goto err; 3258 goto err;
3259 3259
3260 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { 3260 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3261 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", 3261 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3262 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); 3262 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3263 goto err; 3263 goto err;
3264 } 3264 }
3265 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { 3265 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3266 dev_err(DEV, "unexpected md_size: %u (expected %u)\n", 3266 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3267 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); 3267 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3268 goto err; 3268 goto err;
3269 } 3269 }
@@ -3371,7 +3371,7 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3371 bm_uuid = device->ldev->md.uuid[UI_BITMAP]; 3371 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3372 3372
3373 if (bm_uuid) 3373 if (bm_uuid)
3374 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3374 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3375 3375
3376 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT]; 3376 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3377 __drbd_uuid_set(device, UI_CURRENT, val); 3377 __drbd_uuid_set(device, UI_CURRENT, val);
@@ -3396,7 +3396,7 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3396 } else { 3396 } else {
3397 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP]; 3397 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3398 if (bm_uuid) 3398 if (bm_uuid)
3399 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); 3399 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3400 3400
3401 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); 3401 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3402 } 3402 }
@@ -3558,7 +3558,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
3558 D_ASSERT(!test_bit(BITMAP_IO, &device->flags)); 3558 D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
3559 D_ASSERT(list_empty(&device->bm_io_work.w.list)); 3559 D_ASSERT(list_empty(&device->bm_io_work.w.list));
3560 if (device->bm_io_work.why) 3560 if (device->bm_io_work.why)
3561 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", 3561 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3562 why, device->bm_io_work.why); 3562 why, device->bm_io_work.why);
3563 3563
3564 device->bm_io_work.io_fn = io_fn; 3564 device->bm_io_work.io_fn = io_fn;
@@ -3637,9 +3637,9 @@ static int w_md_sync(struct drbd_work *w, int unused)
3637{ 3637{
3638 struct drbd_device *device = w->device; 3638 struct drbd_device *device = w->device;
3639 3639
3640 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); 3640 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3641#ifdef DEBUG 3641#ifdef DEBUG
3642 dev_warn(DEV, "last md_mark_dirty: %s:%u\n", 3642 drbd_warn(device, "last md_mark_dirty: %s:%u\n",
3643 device->last_md_mark_dirty.func, device->last_md_mark_dirty.line); 3643 device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
3644#endif 3644#endif
3645 drbd_md_sync(device); 3645 drbd_md_sync(device);
@@ -3813,7 +3813,7 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3813 fault_count++; 3813 fault_count++;
3814 3814
3815 if (__ratelimit(&drbd_ratelimit_state)) 3815 if (__ratelimit(&drbd_ratelimit_state))
3816 dev_warn(DEV, "***Simulating %s failure\n", 3816 drbd_warn(device, "***Simulating %s failure\n",
3817 _drbd_fault_str(type)); 3817 _drbd_fault_str(type));
3818 } 3818 }
3819 3819
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c352c61f74c3..157ccf871f69 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -351,17 +351,17 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
351 * write out any unsynced meta data changes now */ 351 * write out any unsynced meta data changes now */
352 drbd_md_sync(device); 352 drbd_md_sync(device);
353 353
354 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 354 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
355 sib.sib_reason = SIB_HELPER_PRE; 355 sib.sib_reason = SIB_HELPER_PRE;
356 sib.helper_name = cmd; 356 sib.helper_name = cmd;
357 drbd_bcast_event(device, &sib); 357 drbd_bcast_event(device, &sib);
358 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 358 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
359 if (ret) 359 if (ret)
360 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 360 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
361 usermode_helper, cmd, mb, 361 usermode_helper, cmd, mb,
362 (ret >> 8) & 0xff, ret); 362 (ret >> 8) & 0xff, ret);
363 else 363 else
364 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 364 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
365 usermode_helper, cmd, mb, 365 usermode_helper, cmd, mb,
366 (ret >> 8) & 0xff, ret); 366 (ret >> 8) & 0xff, ret);
367 sib.sib_reason = SIB_HELPER_POST; 367 sib.sib_reason = SIB_HELPER_POST;
@@ -603,7 +603,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
603 goto out; 603 goto out;
604 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 604 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
605 if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) { 605 if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) {
606 dev_warn(DEV, "Forced into split brain situation!\n"); 606 drbd_warn(device, "Forced into split brain situation!\n");
607 mask.pdsk = D_MASK; 607 mask.pdsk = D_MASK;
608 val.pdsk = D_OUTDATED; 608 val.pdsk = D_OUTDATED;
609 609
@@ -636,7 +636,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
636 goto out; 636 goto out;
637 637
638 if (forced) 638 if (forced)
639 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 639 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
640 640
641 /* Wait until nothing is on the fly :) */ 641 /* Wait until nothing is on the fly :) */
642 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0); 642 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
@@ -905,7 +905,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
905 if (rs && u_size == 0) { 905 if (rs && u_size == 0) {
906 /* Remove "rs &&" later. This check should always be active, but 906 /* Remove "rs &&" later. This check should always be active, but
907 right now the receiver expects the permissive behavior */ 907 right now the receiver expects the permissive behavior */
908 dev_warn(DEV, "Implicit shrink not allowed. " 908 drbd_warn(device, "Implicit shrink not allowed. "
909 "Use --size=%llus for explicit shrink.\n", 909 "Use --size=%llus for explicit shrink.\n",
910 (unsigned long long)size); 910 (unsigned long long)size);
911 rv = DS_ERROR_SHRINK; 911 rv = DS_ERROR_SHRINK;
@@ -924,10 +924,10 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
924 /* currently there is only one error: ENOMEM! */ 924 /* currently there is only one error: ENOMEM! */
925 size = drbd_bm_capacity(device)>>1; 925 size = drbd_bm_capacity(device)>>1;
926 if (size == 0) { 926 if (size == 0) {
927 dev_err(DEV, "OUT OF MEMORY! " 927 drbd_err(device, "OUT OF MEMORY! "
928 "Could not allocate bitmap!\n"); 928 "Could not allocate bitmap!\n");
929 } else { 929 } else {
930 dev_err(DEV, "BM resizing failed. " 930 drbd_err(device, "BM resizing failed. "
931 "Leaving size unchanged at size = %lu KB\n", 931 "Leaving size unchanged at size = %lu KB\n",
932 (unsigned long)size); 932 (unsigned long)size);
933 } 933 }
@@ -936,7 +936,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
936 /* racy, see comments above. */ 936 /* racy, see comments above. */
937 drbd_set_my_capacity(device, size); 937 drbd_set_my_capacity(device, size);
938 device->ldev->md.la_size_sect = size; 938 device->ldev->md.la_size_sect = size;
939 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 939 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
940 (unsigned long long)size>>1); 940 (unsigned long long)size>>1);
941 } 941 }
942 if (rv <= DS_ERROR) 942 if (rv <= DS_ERROR)
@@ -956,7 +956,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
956 md->flags &= ~MDF_PRIMARY_IND; 956 md->flags &= ~MDF_PRIMARY_IND;
957 drbd_md_write(device, buffer); 957 drbd_md_write(device, buffer);
958 958
959 dev_info(DEV, "Writing the whole bitmap, %s\n", 959 drbd_info(device, "Writing the whole bitmap, %s\n",
960 la_size_changed && md_moved ? "size changed and md moved" : 960 la_size_changed && md_moved ? "size changed and md moved" :
961 la_size_changed ? "size changed" : "md moved"); 961 la_size_changed ? "size changed" : "md moved");
962 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ 962 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
@@ -968,8 +968,8 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
968 drbd_md_write(device, buffer); 968 drbd_md_write(device, buffer);
969 969
970 if (rs) 970 if (rs)
971 dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n", 971 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
972 md->al_stripes, md->al_stripe_size_4k * 4); 972 md->al_stripes, md->al_stripe_size_4k * 4);
973 } 973 }
974 974
975 if (size > la_size_sect) 975 if (size > la_size_sect)
@@ -1007,7 +1007,7 @@ drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1007 m_size = drbd_get_max_capacity(bdev); 1007 m_size = drbd_get_max_capacity(bdev);
1008 1008
1009 if (device->state.conn < C_CONNECTED && assume_peer_has_space) { 1009 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1010 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 1010 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1011 p_size = m_size; 1011 p_size = m_size;
1012 } 1012 }
1013 1013
@@ -1029,11 +1029,11 @@ drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1029 } 1029 }
1030 1030
1031 if (size == 0) 1031 if (size == 0)
1032 dev_err(DEV, "Both nodes diskless!\n"); 1032 drbd_err(device, "Both nodes diskless!\n");
1033 1033
1034 if (u_size) { 1034 if (u_size) {
1035 if (u_size > size) 1035 if (u_size > size)
1036 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 1036 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1037 (unsigned long)u_size>>1, (unsigned long)size>>1); 1037 (unsigned long)u_size>>1, (unsigned long)size>>1);
1038 else 1038 else
1039 size = u_size; 1039 size = u_size;
@@ -1067,7 +1067,7 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1067 dc->al_extents, sizeof(struct lc_element), 0); 1067 dc->al_extents, sizeof(struct lc_element), 0);
1068 1068
1069 if (n == NULL) { 1069 if (n == NULL) {
1070 dev_err(DEV, "Cannot allocate act_log lru!\n"); 1070 drbd_err(device, "Cannot allocate act_log lru!\n");
1071 return -ENOMEM; 1071 return -ENOMEM;
1072 } 1072 }
1073 spin_lock_irq(&device->al_lock); 1073 spin_lock_irq(&device->al_lock);
@@ -1075,7 +1075,7 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1075 for (i = 0; i < t->nr_elements; i++) { 1075 for (i = 0; i < t->nr_elements; i++) {
1076 e = lc_element_by_index(t, i); 1076 e = lc_element_by_index(t, i);
1077 if (e->refcnt) 1077 if (e->refcnt)
1078 dev_err(DEV, "refcnt(%d)==%d\n", 1078 drbd_err(device, "refcnt(%d)==%d\n",
1079 e->lc_number, e->refcnt); 1079 e->lc_number, e->refcnt);
1080 in_use += e->refcnt; 1080 in_use += e->refcnt;
1081 } 1081 }
@@ -1084,7 +1084,7 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1084 device->act_log = n; 1084 device->act_log = n;
1085 spin_unlock_irq(&device->al_lock); 1085 spin_unlock_irq(&device->al_lock);
1086 if (in_use) { 1086 if (in_use) {
1087 dev_err(DEV, "Activity log still in use!\n"); 1087 drbd_err(device, "Activity log still in use!\n");
1088 lc_destroy(n); 1088 lc_destroy(n);
1089 return -EBUSY; 1089 return -EBUSY;
1090 } else { 1090 } else {
@@ -1123,7 +1123,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_
1123 blk_queue_stack_limits(q, b); 1123 blk_queue_stack_limits(q, b);
1124 1124
1125 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 1125 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1126 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 1126 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1127 q->backing_dev_info.ra_pages, 1127 q->backing_dev_info.ra_pages,
1128 b->backing_dev_info.ra_pages); 1128 b->backing_dev_info.ra_pages);
1129 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 1129 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
@@ -1165,10 +1165,10 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
1165 new = min(local, peer); 1165 new = min(local, peer);
1166 1166
1167 if (device->state.role == R_PRIMARY && new < now) 1167 if (device->state.role == R_PRIMARY && new < now)
1168 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); 1168 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1169 1169
1170 if (new != now) 1170 if (new != now)
1171 dev_info(DEV, "max BIO size = %u\n", new); 1171 drbd_info(device, "max BIO size = %u\n", new);
1172 1172
1173 drbd_setup_queue_param(device, new); 1173 drbd_setup_queue_param(device, new);
1174} 1174}
@@ -1202,7 +1202,7 @@ static void drbd_suspend_al(struct drbd_device *device)
1202 int s = 0; 1202 int s = 0;
1203 1203
1204 if (!lc_try_lock(device->act_log)) { 1204 if (!lc_try_lock(device->act_log)) {
1205 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); 1205 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1206 return; 1206 return;
1207 } 1207 }
1208 1208
@@ -1214,7 +1214,7 @@ static void drbd_suspend_al(struct drbd_device *device)
1214 lc_unlock(device->act_log); 1214 lc_unlock(device->act_log);
1215 1215
1216 if (s) 1216 if (s)
1217 dev_info(DEV, "Suspended AL updates\n"); 1217 drbd_info(device, "Suspended AL updates\n");
1218} 1218}
1219 1219
1220 1220
@@ -1309,7 +1309,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1309 if (fifo_size != device->rs_plan_s->size) { 1309 if (fifo_size != device->rs_plan_s->size) {
1310 new_plan = fifo_alloc(fifo_size); 1310 new_plan = fifo_alloc(fifo_size);
1311 if (!new_plan) { 1311 if (!new_plan) {
1312 dev_err(DEV, "kmalloc of fifo_buffer failed"); 1312 drbd_err(device, "kmalloc of fifo_buffer failed");
1313 retcode = ERR_NOMEM; 1313 retcode = ERR_NOMEM;
1314 goto fail_unlock; 1314 goto fail_unlock;
1315 } 1315 }
@@ -1485,7 +1485,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1485 bdev = blkdev_get_by_path(new_disk_conf->backing_dev, 1485 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1486 FMODE_READ | FMODE_WRITE | FMODE_EXCL, device); 1486 FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
1487 if (IS_ERR(bdev)) { 1487 if (IS_ERR(bdev)) {
1488 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev, 1488 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1489 PTR_ERR(bdev)); 1489 PTR_ERR(bdev));
1490 retcode = ERR_OPEN_DISK; 1490 retcode = ERR_OPEN_DISK;
1491 goto fail; 1491 goto fail;
@@ -1505,7 +1505,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1505 (new_disk_conf->meta_dev_idx < 0) ? 1505 (new_disk_conf->meta_dev_idx < 0) ?
1506 (void *)device : (void *)drbd_m_holder); 1506 (void *)device : (void *)drbd_m_holder);
1507 if (IS_ERR(bdev)) { 1507 if (IS_ERR(bdev)) {
1508 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev, 1508 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1509 PTR_ERR(bdev)); 1509 PTR_ERR(bdev));
1510 retcode = ERR_OPEN_MD_DISK; 1510 retcode = ERR_OPEN_MD_DISK;
1511 goto fail; 1511 goto fail;
@@ -1539,7 +1539,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1539 new_disk_conf->al_extents = drbd_al_extents_max(nbc); 1539 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1540 1540
1541 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { 1541 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1542 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 1542 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1543 (unsigned long long) drbd_get_max_capacity(nbc), 1543 (unsigned long long) drbd_get_max_capacity(nbc),
1544 (unsigned long long) new_disk_conf->disk_size); 1544 (unsigned long long) new_disk_conf->disk_size);
1545 retcode = ERR_DISK_TOO_SMALL; 1545 retcode = ERR_DISK_TOO_SMALL;
@@ -1557,7 +1557,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1557 1557
1558 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 1558 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1559 retcode = ERR_MD_DISK_TOO_SMALL; 1559 retcode = ERR_MD_DISK_TOO_SMALL;
1560 dev_warn(DEV, "refusing attach: md-device too small, " 1560 drbd_warn(device, "refusing attach: md-device too small, "
1561 "at least %llu sectors needed for this meta-disk type\n", 1561 "at least %llu sectors needed for this meta-disk type\n",
1562 (unsigned long long) min_md_device_sectors); 1562 (unsigned long long) min_md_device_sectors);
1563 goto fail; 1563 goto fail;
@@ -1574,11 +1574,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1574 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 1574 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1575 1575
1576 if (nbc->known_size > max_possible_sectors) { 1576 if (nbc->known_size > max_possible_sectors) {
1577 dev_warn(DEV, "==> truncating very big lower level device " 1577 drbd_warn(device, "==> truncating very big lower level device "
1578 "to currently maximum possible %llu sectors <==\n", 1578 "to currently maximum possible %llu sectors <==\n",
1579 (unsigned long long) max_possible_sectors); 1579 (unsigned long long) max_possible_sectors);
1580 if (new_disk_conf->meta_dev_idx >= 0) 1580 if (new_disk_conf->meta_dev_idx >= 0)
1581 dev_warn(DEV, "==>> using internal or flexible " 1581 drbd_warn(device, "==>> using internal or flexible "
1582 "meta data may help <<==\n"); 1582 "meta data may help <<==\n");
1583 } 1583 }
1584 1584
@@ -1613,7 +1613,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1613 if (device->state.conn < C_CONNECTED && 1613 if (device->state.conn < C_CONNECTED &&
1614 device->state.role == R_PRIMARY && 1614 device->state.role == R_PRIMARY &&
1615 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1615 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1616 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 1616 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1617 (unsigned long long)device->ed_uuid); 1617 (unsigned long long)device->ed_uuid);
1618 retcode = ERR_DATA_NOT_CURRENT; 1618 retcode = ERR_DATA_NOT_CURRENT;
1619 goto force_diskless_dec; 1619 goto force_diskless_dec;
@@ -1628,7 +1628,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1628 /* Prevent shrinking of consistent devices ! */ 1628 /* Prevent shrinking of consistent devices ! */
1629 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1629 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1630 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) { 1630 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1631 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1631 drbd_warn(device, "refusing to truncate a consistent device\n");
1632 retcode = ERR_DISK_TOO_SMALL; 1632 retcode = ERR_DISK_TOO_SMALL;
1633 goto force_diskless_dec; 1633 goto force_diskless_dec;
1634 } 1634 }
@@ -1702,7 +1702,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1702 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) || 1702 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
1703 (test_bit(CRASHED_PRIMARY, &device->flags) && 1703 (test_bit(CRASHED_PRIMARY, &device->flags) &&
1704 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) { 1704 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
1705 dev_info(DEV, "Assuming that all blocks are out of sync " 1705 drbd_info(device, "Assuming that all blocks are out of sync "
1706 "(aka FullSync)\n"); 1706 "(aka FullSync)\n");
1707 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, 1707 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
1708 "set_n_write from attaching", BM_LOCKED_MASK)) { 1708 "set_n_write from attaching", BM_LOCKED_MASK)) {
@@ -2381,7 +2381,7 @@ void resync_after_online_grow(struct drbd_device *device)
2381{ 2381{
2382 int iass; /* I am sync source */ 2382 int iass; /* I am sync source */
2383 2383
2384 dev_info(DEV, "Resync of new storage after online grow\n"); 2384 drbd_info(device, "Resync of new storage after online grow\n");
2385 if (device->state.role != device->state.peer) 2385 if (device->state.role != device->state.peer)
2386 iass = (device->state.role == R_PRIMARY); 2386 iass = (device->state.role == R_PRIMARY);
2387 else 2387 else
@@ -3203,7 +3203,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3203 if (device->state.conn == C_CONNECTED && 3203 if (device->state.conn == C_CONNECTED &&
3204 first_peer_device(device)->connection->agreed_pro_version >= 90 && 3204 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3205 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 3205 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3206 dev_info(DEV, "Preparing to skip initial sync\n"); 3206 drbd_info(device, "Preparing to skip initial sync\n");
3207 skip_initial_sync = 1; 3207 skip_initial_sync = 1;
3208 } else if (device->state.conn != C_STANDALONE) { 3208 } else if (device->state.conn != C_STANDALONE) {
3209 retcode = ERR_CONNECTED; 3209 retcode = ERR_CONNECTED;
@@ -3217,7 +3217,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3217 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write, 3217 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3218 "clear_n_write from new_c_uuid", BM_LOCKED_MASK); 3218 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3219 if (err) { 3219 if (err) {
3220 dev_err(DEV, "Writing bitmap failed with %d\n",err); 3220 drbd_err(device, "Writing bitmap failed with %d\n", err);
3221 retcode = ERR_IO_MD_DISK; 3221 retcode = ERR_IO_MD_DISK;
3222 } 3222 }
3223 if (skip_initial_sync) { 3223 if (skip_initial_sync) {
@@ -3513,7 +3513,7 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
3513nla_put_failure: 3513nla_put_failure:
3514 nlmsg_free(msg); 3514 nlmsg_free(msg);
3515failed: 3515failed:
3516 dev_err(DEV, "Error %d while broadcasting event. " 3516 drbd_err(device, "Error %d while broadcasting event. "
3517 "Event seq:%u sib_reason:%u\n", 3517 "Event seq:%u sib_reason:%u\n",
3518 err, seq, sib->sib_reason); 3518 err, seq, sib->sib_reason);
3519} 3519}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 24907877cae4..2ee9023b1808 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -274,7 +274,7 @@ struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
274 break; 274 break;
275 275
276 if (signal_pending(current)) { 276 if (signal_pending(current)) {
277 dev_warn(DEV, "drbd_alloc_pages interrupted!\n"); 277 drbd_warn(device, "drbd_alloc_pages interrupted!\n");
278 break; 278 break;
279 } 279 }
280 280
@@ -311,7 +311,7 @@ static void drbd_free_pages(struct drbd_device *device, struct page *page, int i
311 } 311 }
312 i = atomic_sub_return(i, a); 312 i = atomic_sub_return(i, a);
313 if (i < 0) 313 if (i < 0)
314 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", 314 drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
315 is_net ? "pp_in_use_by_net" : "pp_in_use", i); 315 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
316 wake_up(&drbd_pp_wait); 316 wake_up(&drbd_pp_wait);
317} 317}
@@ -344,7 +344,7 @@ drbd_alloc_peer_req(struct drbd_device *device, u64 id, sector_t sector,
344 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 344 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
345 if (!peer_req) { 345 if (!peer_req) {
346 if (!(gfp_mask & __GFP_NOWARN)) 346 if (!(gfp_mask & __GFP_NOWARN))
347 dev_err(DEV, "%s: allocation failed\n", __func__); 347 drbd_err(device, "%s: allocation failed\n", __func__);
348 return NULL; 348 return NULL;
349 } 349 }
350 350
@@ -1162,7 +1162,7 @@ static void drbd_flush(struct drbd_connection *connection)
1162 rv = blkdev_issue_flush(device->ldev->backing_bdev, 1162 rv = blkdev_issue_flush(device->ldev->backing_bdev,
1163 GFP_NOIO, NULL); 1163 GFP_NOIO, NULL);
1164 if (rv) { 1164 if (rv) {
1165 dev_info(DEV, "local disk flush failed with status %d\n", rv); 1165 drbd_info(device, "local disk flush failed with status %d\n", rv);
1166 /* would rather check on EOPNOTSUPP, but that is not reliable. 1166 /* would rather check on EOPNOTSUPP, but that is not reliable.
1167 * don't try again for ANY return value != 0 1167 * don't try again for ANY return value != 0
1168 * if (rv == -EOPNOTSUPP) */ 1168 * if (rv == -EOPNOTSUPP) */
@@ -1335,7 +1335,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
1335next_bio: 1335next_bio:
1336 bio = bio_alloc(GFP_NOIO, nr_pages); 1336 bio = bio_alloc(GFP_NOIO, nr_pages);
1337 if (!bio) { 1337 if (!bio) {
1338 dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 1338 drbd_err(device, "submit_ee: Allocation of a bio failed\n");
1339 goto fail; 1339 goto fail;
1340 } 1340 }
1341 /* > peer_req->i.sector, unless this is the first bio */ 1341 /* > peer_req->i.sector, unless this is the first bio */
@@ -1356,7 +1356,7 @@ next_bio:
1356 * But in case it fails anyways, 1356 * But in case it fails anyways,
1357 * we deal with it, and complain (below). */ 1357 * we deal with it, and complain (below). */
1358 if (bio->bi_vcnt == 0) { 1358 if (bio->bi_vcnt == 0) {
1359 dev_err(DEV, 1359 drbd_err(device,
1360 "bio_add_page failed for len=%u, " 1360 "bio_add_page failed for len=%u, "
1361 "bi_vcnt=0 (bi_sector=%llu)\n", 1361 "bi_vcnt=0 (bi_sector=%llu)\n",
1362 len, (uint64_t)bio->bi_iter.bi_sector); 1362 len, (uint64_t)bio->bi_iter.bi_sector);
@@ -1524,7 +1524,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1524 /* even though we trust out peer, 1524 /* even though we trust out peer,
1525 * we sometimes have to double check. */ 1525 * we sometimes have to double check. */
1526 if (sector + (data_size>>9) > capacity) { 1526 if (sector + (data_size>>9) > capacity) {
1527 dev_err(DEV, "request from peer beyond end of local disk: " 1527 drbd_err(device, "request from peer beyond end of local disk: "
1528 "capacity: %llus < sector: %llus + size: %u\n", 1528 "capacity: %llus < sector: %llus + size: %u\n",
1529 (unsigned long long)capacity, 1529 (unsigned long long)capacity,
1530 (unsigned long long)sector, data_size); 1530 (unsigned long long)sector, data_size);
@@ -1548,7 +1548,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1548 data = kmap(page); 1548 data = kmap(page);
1549 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len); 1549 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len);
1550 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { 1550 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1551 dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 1551 drbd_err(device, "Fault injection: Corrupting data on receive\n");
1552 data[0] = data[0] ^ (unsigned long)-1; 1552 data[0] = data[0] ^ (unsigned long)-1;
1553 } 1553 }
1554 kunmap(page); 1554 kunmap(page);
@@ -1562,7 +1562,7 @@ read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1562 if (dgs) { 1562 if (dgs) {
1563 drbd_csum_ee(device, first_peer_device(device)->connection->peer_integrity_tfm, peer_req, dig_vv); 1563 drbd_csum_ee(device, first_peer_device(device)->connection->peer_integrity_tfm, peer_req, dig_vv);
1564 if (memcmp(dig_in, dig_vv, dgs)) { 1564 if (memcmp(dig_in, dig_vv, dgs)) {
1565 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1565 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1566 (unsigned long long)sector, data_size); 1566 (unsigned long long)sector, data_size);
1567 drbd_free_peer_req(device, peer_req); 1567 drbd_free_peer_req(device, peer_req);
1568 return NULL; 1568 return NULL;
@@ -1639,7 +1639,7 @@ static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1639 if (dgs) { 1639 if (dgs) {
1640 drbd_csum_bio(device, first_peer_device(device)->connection->peer_integrity_tfm, bio, dig_vv); 1640 drbd_csum_bio(device, first_peer_device(device)->connection->peer_integrity_tfm, bio, dig_vv);
1641 if (memcmp(dig_in, dig_vv, dgs)) { 1641 if (memcmp(dig_in, dig_vv, dgs)) {
1642 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1642 drbd_err(device, "Digest integrity check FAILED. Broken NICs?\n");
1643 return -EINVAL; 1643 return -EINVAL;
1644 } 1644 }
1645 } 1645 }
@@ -1701,7 +1701,7 @@ static int recv_resync_read(struct drbd_device *device, sector_t sector, int dat
1701 return 0; 1701 return 0;
1702 1702
1703 /* don't care for the reason here */ 1703 /* don't care for the reason here */
1704 dev_err(DEV, "submit failed, triggering re-connect\n"); 1704 drbd_err(device, "submit failed, triggering re-connect\n");
1705 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 1705 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1706 list_del(&peer_req->w.list); 1706 list_del(&peer_req->w.list);
1707 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 1707 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
@@ -1723,7 +1723,7 @@ find_request(struct drbd_device *device, struct rb_root *root, u64 id,
1723 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) 1723 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1724 return req; 1724 return req;
1725 if (!missing_ok) { 1725 if (!missing_ok) {
1726 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func, 1726 drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
1727 (unsigned long)id, (unsigned long long)sector); 1727 (unsigned long)id, (unsigned long long)sector);
1728 } 1728 }
1729 return NULL; 1729 return NULL;
@@ -1783,7 +1783,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
1783 err = recv_resync_read(device, sector, pi->size); 1783 err = recv_resync_read(device, sector, pi->size);
1784 } else { 1784 } else {
1785 if (__ratelimit(&drbd_ratelimit_state)) 1785 if (__ratelimit(&drbd_ratelimit_state))
1786 dev_err(DEV, "Can not write resync data to local disk.\n"); 1786 drbd_err(device, "Can not write resync data to local disk.\n");
1787 1787
1788 err = drbd_drain_block(device, pi->size); 1788 err = drbd_drain_block(device, pi->size);
1789 1789
@@ -1997,7 +1997,7 @@ static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 pe
1997 spin_lock(&device->peer_seq_lock); 1997 spin_lock(&device->peer_seq_lock);
1998 if (!timeout) { 1998 if (!timeout) {
1999 ret = -ETIMEDOUT; 1999 ret = -ETIMEDOUT;
2000 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n"); 2000 drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2001 break; 2001 break;
2002 } 2002 }
2003 } 2003 }
@@ -2088,7 +2088,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2088 (i->size >> 9) >= sector + (size >> 9); 2088 (i->size >> 9) >= sector + (size >> 9);
2089 2089
2090 if (!equal) 2090 if (!equal)
2091 dev_alert(DEV, "Concurrent writes detected: " 2091 drbd_alert(device, "Concurrent writes detected: "
2092 "local=%llus +%u, remote=%llus +%u, " 2092 "local=%llus +%u, remote=%llus +%u, "
2093 "assuming %s came first\n", 2093 "assuming %s came first\n",
2094 (unsigned long long)i->sector, i->size, 2094 (unsigned long long)i->sector, i->size,
@@ -2108,7 +2108,7 @@ static int handle_write_conflicts(struct drbd_device *device,
2108 container_of(i, struct drbd_request, i); 2108 container_of(i, struct drbd_request, i);
2109 2109
2110 if (!equal) 2110 if (!equal)
2111 dev_alert(DEV, "Concurrent writes detected: " 2111 drbd_alert(device, "Concurrent writes detected: "
2112 "local=%llus +%u, remote=%llus +%u\n", 2112 "local=%llus +%u, remote=%llus +%u\n",
2113 (unsigned long long)i->sector, i->size, 2113 (unsigned long long)i->sector, i->size,
2114 (unsigned long long)sector, size); 2114 (unsigned long long)sector, size);
@@ -2277,7 +2277,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
2277 return 0; 2277 return 0;
2278 2278
2279 /* don't care for the reason here */ 2279 /* don't care for the reason here */
2280 dev_err(DEV, "submit failed, triggering re-connect\n"); 2280 drbd_err(device, "submit failed, triggering re-connect\n");
2281 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2281 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2282 list_del(&peer_req->w.list); 2282 list_del(&peer_req->w.list);
2283 drbd_remove_epoch_entry_interval(device, peer_req); 2283 drbd_remove_epoch_entry_interval(device, peer_req);
@@ -2384,12 +2384,12 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2384 size = be32_to_cpu(p->blksize); 2384 size = be32_to_cpu(p->blksize);
2385 2385
2386 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { 2386 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2387 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2387 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2388 (unsigned long long)sector, size); 2388 (unsigned long long)sector, size);
2389 return -EINVAL; 2389 return -EINVAL;
2390 } 2390 }
2391 if (sector + (size>>9) > capacity) { 2391 if (sector + (size>>9) > capacity) {
2392 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2392 drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2393 (unsigned long long)sector, size); 2393 (unsigned long long)sector, size);
2394 return -EINVAL; 2394 return -EINVAL;
2395 } 2395 }
@@ -2414,7 +2414,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2414 BUG(); 2414 BUG();
2415 } 2415 }
2416 if (verb && __ratelimit(&drbd_ratelimit_state)) 2416 if (verb && __ratelimit(&drbd_ratelimit_state))
2417 dev_err(DEV, "Can not satisfy peer's read request, " 2417 drbd_err(device, "Can not satisfy peer's read request, "
2418 "no local data.\n"); 2418 "no local data.\n");
2419 2419
2420 /* drain possibly payload */ 2420 /* drain possibly payload */
@@ -2489,7 +2489,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
2489 device->rs_mark_left[i] = device->ov_left; 2489 device->rs_mark_left[i] = device->ov_left;
2490 device->rs_mark_time[i] = now; 2490 device->rs_mark_time[i] = now;
2491 } 2491 }
2492 dev_info(DEV, "Online Verify start sector: %llu\n", 2492 drbd_info(device, "Online Verify start sector: %llu\n",
2493 (unsigned long long)sector); 2493 (unsigned long long)sector);
2494 } 2494 }
2495 peer_req->w.cb = w_e_end_ov_req; 2495 peer_req->w.cb = w_e_end_ov_req;
@@ -2540,7 +2540,7 @@ submit:
2540 return 0; 2540 return 0;
2541 2541
2542 /* don't care for the reason here */ 2542 /* don't care for the reason here */
2543 dev_err(DEV, "submit failed, triggering re-connect\n"); 2543 drbd_err(device, "submit failed, triggering re-connect\n");
2544 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 2544 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2545 list_del(&peer_req->w.list); 2545 list_del(&peer_req->w.list);
2546 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 2546 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
@@ -2572,7 +2572,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2572 case ASB_DISCARD_SECONDARY: 2572 case ASB_DISCARD_SECONDARY:
2573 case ASB_CALL_HELPER: 2573 case ASB_CALL_HELPER:
2574 case ASB_VIOLENTLY: 2574 case ASB_VIOLENTLY:
2575 dev_err(DEV, "Configuration error.\n"); 2575 drbd_err(device, "Configuration error.\n");
2576 break; 2576 break;
2577 case ASB_DISCONNECT: 2577 case ASB_DISCONNECT:
2578 break; 2578 break;
@@ -2596,7 +2596,7 @@ static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2596 break; 2596 break;
2597 } 2597 }
2598 /* Else fall through to one of the other strategies... */ 2598 /* Else fall through to one of the other strategies... */
2599 dev_warn(DEV, "Discard younger/older primary did not find a decision\n" 2599 drbd_warn(device, "Discard younger/older primary did not find a decision\n"
2600 "Using discard-least-changes instead\n"); 2600 "Using discard-least-changes instead\n");
2601 case ASB_DISCARD_ZERO_CHG: 2601 case ASB_DISCARD_ZERO_CHG:
2602 if (ch_peer == 0 && ch_self == 0) { 2602 if (ch_peer == 0 && ch_self == 0) {
@@ -2644,7 +2644,7 @@ static int drbd_asb_recover_1p(struct drbd_device *device) __must_hold(local)
2644 case ASB_DISCARD_LOCAL: 2644 case ASB_DISCARD_LOCAL:
2645 case ASB_DISCARD_REMOTE: 2645 case ASB_DISCARD_REMOTE:
2646 case ASB_DISCARD_ZERO_CHG: 2646 case ASB_DISCARD_ZERO_CHG:
2647 dev_err(DEV, "Configuration error.\n"); 2647 drbd_err(device, "Configuration error.\n");
2648 break; 2648 break;
2649 case ASB_DISCONNECT: 2649 case ASB_DISCONNECT:
2650 break; 2650 break;
@@ -2672,7 +2672,7 @@ static int drbd_asb_recover_1p(struct drbd_device *device) __must_hold(local)
2672 if (rv2 != SS_SUCCESS) { 2672 if (rv2 != SS_SUCCESS) {
2673 drbd_khelper(device, "pri-lost-after-sb"); 2673 drbd_khelper(device, "pri-lost-after-sb");
2674 } else { 2674 } else {
2675 dev_warn(DEV, "Successfully gave up primary role.\n"); 2675 drbd_warn(device, "Successfully gave up primary role.\n");
2676 rv = hg; 2676 rv = hg;
2677 } 2677 }
2678 } else 2678 } else
@@ -2699,7 +2699,7 @@ static int drbd_asb_recover_2p(struct drbd_device *device) __must_hold(local)
2699 case ASB_CONSENSUS: 2699 case ASB_CONSENSUS:
2700 case ASB_DISCARD_SECONDARY: 2700 case ASB_DISCARD_SECONDARY:
2701 case ASB_DISCARD_ZERO_CHG: 2701 case ASB_DISCARD_ZERO_CHG:
2702 dev_err(DEV, "Configuration error.\n"); 2702 drbd_err(device, "Configuration error.\n");
2703 break; 2703 break;
2704 case ASB_VIOLENTLY: 2704 case ASB_VIOLENTLY:
2705 rv = drbd_asb_recover_0p(device); 2705 rv = drbd_asb_recover_0p(device);
@@ -2718,7 +2718,7 @@ static int drbd_asb_recover_2p(struct drbd_device *device) __must_hold(local)
2718 if (rv2 != SS_SUCCESS) { 2718 if (rv2 != SS_SUCCESS) {
2719 drbd_khelper(device, "pri-lost-after-sb"); 2719 drbd_khelper(device, "pri-lost-after-sb");
2720 } else { 2720 } else {
2721 dev_warn(DEV, "Successfully gave up primary role.\n"); 2721 drbd_warn(device, "Successfully gave up primary role.\n");
2722 rv = hg; 2722 rv = hg;
2723 } 2723 }
2724 } else 2724 } else
@@ -2732,10 +2732,10 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
2732 u64 bits, u64 flags) 2732 u64 bits, u64 flags)
2733{ 2733{
2734 if (!uuid) { 2734 if (!uuid) {
2735 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); 2735 drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
2736 return; 2736 return;
2737 } 2737 }
2738 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 2738 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2739 text, 2739 text,
2740 (unsigned long long)uuid[UI_CURRENT], 2740 (unsigned long long)uuid[UI_CURRENT],
2741 (unsigned long long)uuid[UI_BITMAP], 2741 (unsigned long long)uuid[UI_BITMAP],
@@ -2789,7 +2789,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2789 2789
2790 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2790 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2791 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 2791 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2792 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); 2792 drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
2793 drbd_uuid_move_history(device); 2793 drbd_uuid_move_history(device);
2794 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; 2794 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
2795 device->ldev->md.uuid[UI_BITMAP] = 0; 2795 device->ldev->md.uuid[UI_BITMAP] = 0;
@@ -2798,7 +2798,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2798 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 2798 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
2799 *rule_nr = 34; 2799 *rule_nr = 34;
2800 } else { 2800 } else {
2801 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); 2801 drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
2802 *rule_nr = 36; 2802 *rule_nr = 36;
2803 } 2803 }
2804 2804
@@ -2812,7 +2812,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2812 2812
2813 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && 2813 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2814 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 2814 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2815 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 2815 drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2816 2816
2817 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START]; 2817 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
2818 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP]; 2818 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
@@ -2821,7 +2821,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2821 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 2821 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2822 *rule_nr = 35; 2822 *rule_nr = 35;
2823 } else { 2823 } else {
2824 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); 2824 drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
2825 *rule_nr = 37; 2825 *rule_nr = 37;
2826 } 2826 }
2827 2827
@@ -2866,7 +2866,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2866 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; 2866 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
2867 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1]; 2867 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
2868 2868
2869 dev_info(DEV, "Lost last syncUUID packet, corrected:\n"); 2869 drbd_info(device, "Lost last syncUUID packet, corrected:\n");
2870 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); 2870 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2871 2871
2872 return -1; 2872 return -1;
@@ -2903,7 +2903,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho
2903 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); 2903 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
2904 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); 2904 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
2905 2905
2906 dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); 2906 drbd_info(device, "Last syncUUID did not get through, corrected:\n");
2907 drbd_uuid_dump(device, "self", device->ldev->md.uuid, 2907 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
2908 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); 2908 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
2909 2909
@@ -2954,7 +2954,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
2954 if (mydisk == D_NEGOTIATING) 2954 if (mydisk == D_NEGOTIATING)
2955 mydisk = device->new_state_tmp.disk; 2955 mydisk = device->new_state_tmp.disk;
2956 2956
2957 dev_info(DEV, "drbd_sync_handshake:\n"); 2957 drbd_info(device, "drbd_sync_handshake:\n");
2958 2958
2959 spin_lock_irq(&device->ldev->md.uuid_lock); 2959 spin_lock_irq(&device->ldev->md.uuid_lock);
2960 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); 2960 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
@@ -2964,14 +2964,14 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
2964 hg = drbd_uuid_compare(device, &rule_nr); 2964 hg = drbd_uuid_compare(device, &rule_nr);
2965 spin_unlock_irq(&device->ldev->md.uuid_lock); 2965 spin_unlock_irq(&device->ldev->md.uuid_lock);
2966 2966
2967 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 2967 drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2968 2968
2969 if (hg == -1000) { 2969 if (hg == -1000) {
2970 dev_alert(DEV, "Unrelated data, aborting!\n"); 2970 drbd_alert(device, "Unrelated data, aborting!\n");
2971 return C_MASK; 2971 return C_MASK;
2972 } 2972 }
2973 if (hg < -1000) { 2973 if (hg < -1000) {
2974 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 2974 drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2975 return C_MASK; 2975 return C_MASK;
2976 } 2976 }
2977 2977
@@ -2981,7 +2981,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
2981 hg = mydisk > D_INCONSISTENT ? 1 : -1; 2981 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2982 if (f) 2982 if (f)
2983 hg = hg*2; 2983 hg = hg*2;
2984 dev_info(DEV, "Becoming sync %s due to disk states.\n", 2984 drbd_info(device, "Becoming sync %s due to disk states.\n",
2985 hg > 0 ? "source" : "target"); 2985 hg > 0 ? "source" : "target");
2986 } 2986 }
2987 2987
@@ -3008,11 +3008,11 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3008 break; 3008 break;
3009 } 3009 }
3010 if (abs(hg) < 100) { 3010 if (abs(hg) < 100) {
3011 dev_warn(DEV, "Split-Brain detected, %d primaries, " 3011 drbd_warn(device, "Split-Brain detected, %d primaries, "
3012 "automatically solved. Sync from %s node\n", 3012 "automatically solved. Sync from %s node\n",
3013 pcount, (hg < 0) ? "peer" : "this"); 3013 pcount, (hg < 0) ? "peer" : "this");
3014 if (forced) { 3014 if (forced) {
3015 dev_warn(DEV, "Doing a full sync, since" 3015 drbd_warn(device, "Doing a full sync, since"
3016 " UUIDs where ambiguous.\n"); 3016 " UUIDs where ambiguous.\n");
3017 hg = hg*2; 3017 hg = hg*2;
3018 } 3018 }
@@ -3026,7 +3026,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3026 hg = 1; 3026 hg = 1;
3027 3027
3028 if (abs(hg) < 100) 3028 if (abs(hg) < 100)
3029 dev_warn(DEV, "Split-Brain detected, manually solved. " 3029 drbd_warn(device, "Split-Brain detected, manually solved. "
3030 "Sync from %s node\n", 3030 "Sync from %s node\n",
3031 (hg < 0) ? "peer" : "this"); 3031 (hg < 0) ? "peer" : "this");
3032 } 3032 }
@@ -3039,13 +3039,13 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3039 * after an attempted attach on a diskless node. 3039 * after an attempted attach on a diskless node.
3040 * We just refuse to attach -- well, we drop the "connection" 3040 * We just refuse to attach -- well, we drop the "connection"
3041 * to that disk, in a way... */ 3041 * to that disk, in a way... */
3042 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); 3042 drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3043 drbd_khelper(device, "split-brain"); 3043 drbd_khelper(device, "split-brain");
3044 return C_MASK; 3044 return C_MASK;
3045 } 3045 }
3046 3046
3047 if (hg > 0 && mydisk <= D_INCONSISTENT) { 3047 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3048 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); 3048 drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
3049 return C_MASK; 3049 return C_MASK;
3050 } 3050 }
3051 3051
@@ -3056,26 +3056,26 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3056 drbd_khelper(device, "pri-lost"); 3056 drbd_khelper(device, "pri-lost");
3057 /* fall through */ 3057 /* fall through */
3058 case ASB_DISCONNECT: 3058 case ASB_DISCONNECT:
3059 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); 3059 drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
3060 return C_MASK; 3060 return C_MASK;
3061 case ASB_VIOLENTLY: 3061 case ASB_VIOLENTLY:
3062 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" 3062 drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
3063 "assumption\n"); 3063 "assumption\n");
3064 } 3064 }
3065 } 3065 }
3066 3066
3067 if (tentative || test_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags)) { 3067 if (tentative || test_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags)) {
3068 if (hg == 0) 3068 if (hg == 0)
3069 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 3069 drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3070 else 3070 else
3071 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", 3071 drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3072 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 3072 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3073 abs(hg) >= 2 ? "full" : "bit-map based"); 3073 abs(hg) >= 2 ? "full" : "bit-map based");
3074 return C_MASK; 3074 return C_MASK;
3075 } 3075 }
3076 3076
3077 if (abs(hg) >= 2) { 3077 if (abs(hg) >= 2) {
3078 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 3078 drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3079 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 3079 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3080 BM_LOCKED_SET_ALLOWED)) 3080 BM_LOCKED_SET_ALLOWED))
3081 return C_MASK; 3081 return C_MASK;
@@ -3088,7 +3088,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd
3088 } else { 3088 } else {
3089 rv = C_CONNECTED; 3089 rv = C_CONNECTED;
3090 if (drbd_bm_total_weight(device)) { 3090 if (drbd_bm_total_weight(device)) {
3091 dev_info(DEV, "No resync, but %lu bits in bitmap!\n", 3091 drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3092 drbd_bm_total_weight(device)); 3092 drbd_bm_total_weight(device));
3093 } 3093 }
3094 } 3094 }
@@ -3276,7 +3276,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *devi
3276 3276
3277 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 3277 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3278 if (IS_ERR(tfm)) { 3278 if (IS_ERR(tfm)) {
3279 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", 3279 drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3280 alg, name, PTR_ERR(tfm)); 3280 alg, name, PTR_ERR(tfm));
3281 return tfm; 3281 return tfm;
3282 } 3282 }
@@ -3346,7 +3346,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3346 : /* apv >= 95 */ sizeof(struct p_rs_param_95); 3346 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3347 3347
3348 if (pi->size > exp_max_sz) { 3348 if (pi->size > exp_max_sz) {
3349 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 3349 drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3350 pi->size, exp_max_sz); 3350 pi->size, exp_max_sz);
3351 return -EIO; 3351 return -EIO;
3352 } 3352 }
@@ -3379,7 +3379,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3379 if (!new_disk_conf) { 3379 if (!new_disk_conf) {
3380 put_ldev(device); 3380 put_ldev(device);
3381 mutex_unlock(&first_peer_device(device)->connection->conf_update); 3381 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3382 dev_err(DEV, "Allocation of new disk_conf failed\n"); 3382 drbd_err(device, "Allocation of new disk_conf failed\n");
3383 return -ENOMEM; 3383 return -ENOMEM;
3384 } 3384 }
3385 3385
@@ -3392,7 +3392,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3392 if (apv >= 88) { 3392 if (apv >= 88) {
3393 if (apv == 88) { 3393 if (apv == 88) {
3394 if (data_size > SHARED_SECRET_MAX || data_size == 0) { 3394 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3395 dev_err(DEV, "verify-alg of wrong size, " 3395 drbd_err(device, "verify-alg of wrong size, "
3396 "peer wants %u, accepting only up to %u byte\n", 3396 "peer wants %u, accepting only up to %u byte\n",
3397 data_size, SHARED_SECRET_MAX); 3397 data_size, SHARED_SECRET_MAX);
3398 err = -EIO; 3398 err = -EIO;
@@ -3418,7 +3418,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3418 3418
3419 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) { 3419 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3420 if (device->state.conn == C_WF_REPORT_PARAMS) { 3420 if (device->state.conn == C_WF_REPORT_PARAMS) {
3421 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 3421 drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3422 old_net_conf->verify_alg, p->verify_alg); 3422 old_net_conf->verify_alg, p->verify_alg);
3423 goto disconnect; 3423 goto disconnect;
3424 } 3424 }
@@ -3432,7 +3432,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3432 3432
3433 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) { 3433 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3434 if (device->state.conn == C_WF_REPORT_PARAMS) { 3434 if (device->state.conn == C_WF_REPORT_PARAMS) {
3435 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 3435 drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3436 old_net_conf->csums_alg, p->csums_alg); 3436 old_net_conf->csums_alg, p->csums_alg);
3437 goto disconnect; 3437 goto disconnect;
3438 } 3438 }
@@ -3454,7 +3454,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3454 if (fifo_size != device->rs_plan_s->size) { 3454 if (fifo_size != device->rs_plan_s->size) {
3455 new_plan = fifo_alloc(fifo_size); 3455 new_plan = fifo_alloc(fifo_size);
3456 if (!new_plan) { 3456 if (!new_plan) {
3457 dev_err(DEV, "kmalloc of fifo_buffer failed"); 3457 drbd_err(device, "kmalloc of fifo_buffer failed");
3458 put_ldev(device); 3458 put_ldev(device);
3459 goto disconnect; 3459 goto disconnect;
3460 } 3460 }
@@ -3464,7 +3464,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3464 if (verify_tfm || csums_tfm) { 3464 if (verify_tfm || csums_tfm) {
3465 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 3465 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3466 if (!new_net_conf) { 3466 if (!new_net_conf) {
3467 dev_err(DEV, "Allocation of new net_conf failed\n"); 3467 drbd_err(device, "Allocation of new net_conf failed\n");
3468 goto disconnect; 3468 goto disconnect;
3469 } 3469 }
3470 3470
@@ -3475,14 +3475,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
3475 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; 3475 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3476 crypto_free_hash(first_peer_device(device)->connection->verify_tfm); 3476 crypto_free_hash(first_peer_device(device)->connection->verify_tfm);
3477 first_peer_device(device)->connection->verify_tfm = verify_tfm; 3477 first_peer_device(device)->connection->verify_tfm = verify_tfm;
3478 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 3478 drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
3479 } 3479 }
3480 if (csums_tfm) { 3480 if (csums_tfm) {
3481 strcpy(new_net_conf->csums_alg, p->csums_alg); 3481 strcpy(new_net_conf->csums_alg, p->csums_alg);
3482 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; 3482 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3483 crypto_free_hash(first_peer_device(device)->connection->csums_tfm); 3483 crypto_free_hash(first_peer_device(device)->connection->csums_tfm);
3484 first_peer_device(device)->connection->csums_tfm = csums_tfm; 3484 first_peer_device(device)->connection->csums_tfm = csums_tfm;
3485 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 3485 drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
3486 } 3486 }
3487 rcu_assign_pointer(connection->net_conf, new_net_conf); 3487 rcu_assign_pointer(connection->net_conf, new_net_conf);
3488 } 3488 }
@@ -3540,7 +3540,7 @@ static void warn_if_differ_considerably(struct drbd_device *device,
3540 return; 3540 return;
3541 d = (a > b) ? (a - b) : (b - a); 3541 d = (a > b) ? (a - b) : (b - a);
3542 if (d > (a>>3) || d > (b>>3)) 3542 if (d > (a>>3) || d > (b>>3))
3543 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, 3543 drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
3544 (unsigned long long)a, (unsigned long long)b); 3544 (unsigned long long)a, (unsigned long long)b);
3545} 3545}
3546 3546
@@ -3585,7 +3585,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3585 drbd_get_capacity(device->this_bdev) && 3585 drbd_get_capacity(device->this_bdev) &&
3586 device->state.disk >= D_OUTDATED && 3586 device->state.disk >= D_OUTDATED &&
3587 device->state.conn < C_CONNECTED) { 3587 device->state.conn < C_CONNECTED) {
3588 dev_err(DEV, "The peer's disk size is too small!\n"); 3588 drbd_err(device, "The peer's disk size is too small!\n");
3589 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3589 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3590 put_ldev(device); 3590 put_ldev(device);
3591 return -EIO; 3591 return -EIO;
@@ -3596,7 +3596,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3596 3596
3597 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); 3597 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3598 if (!new_disk_conf) { 3598 if (!new_disk_conf) {
3599 dev_err(DEV, "Allocation of new disk_conf failed\n"); 3599 drbd_err(device, "Allocation of new disk_conf failed\n");
3600 put_ldev(device); 3600 put_ldev(device);
3601 return -ENOMEM; 3601 return -ENOMEM;
3602 } 3602 }
@@ -3611,7 +3611,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3611 synchronize_rcu(); 3611 synchronize_rcu();
3612 kfree(old_disk_conf); 3612 kfree(old_disk_conf);
3613 3613
3614 dev_info(DEV, "Peer sets u_size to %lu sectors\n", 3614 drbd_info(device, "Peer sets u_size to %lu sectors\n",
3615 (unsigned long)my_usize); 3615 (unsigned long)my_usize);
3616 } 3616 }
3617 3617
@@ -3654,7 +3654,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
3654 if (device->state.pdsk >= D_INCONSISTENT && 3654 if (device->state.pdsk >= D_INCONSISTENT &&
3655 device->state.disk >= D_INCONSISTENT) { 3655 device->state.disk >= D_INCONSISTENT) {
3656 if (ddsf & DDSF_NO_RESYNC) 3656 if (ddsf & DDSF_NO_RESYNC)
3657 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); 3657 drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
3658 else 3658 else
3659 resync_after_online_grow(device); 3659 resync_after_online_grow(device);
3660 } else 3660 } else
@@ -3678,7 +3678,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
3678 3678
3679 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3679 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3680 if (!p_uuid) { 3680 if (!p_uuid) {
3681 dev_err(DEV, "kmalloc of p_uuid failed\n"); 3681 drbd_err(device, "kmalloc of p_uuid failed\n");
3682 return false; 3682 return false;
3683 } 3683 }
3684 3684
@@ -3692,7 +3692,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
3692 device->state.disk < D_INCONSISTENT && 3692 device->state.disk < D_INCONSISTENT &&
3693 device->state.role == R_PRIMARY && 3693 device->state.role == R_PRIMARY &&
3694 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3694 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3695 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3695 drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
3696 (unsigned long long)device->ed_uuid); 3696 (unsigned long long)device->ed_uuid);
3697 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); 3697 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3698 return -EIO; 3698 return -EIO;
@@ -3705,7 +3705,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
3705 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3705 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3706 (p_uuid[UI_FLAGS] & 8); 3706 (p_uuid[UI_FLAGS] & 8);
3707 if (skip_initial_sync) { 3707 if (skip_initial_sync) {
3708 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3708 drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
3709 drbd_bitmap_io(device, &drbd_bmio_clear_n_write, 3709 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3710 "clear_n_write from receive_uuids", 3710 "clear_n_write from receive_uuids",
3711 BM_LOCKED_TEST_ALLOWED); 3711 BM_LOCKED_TEST_ALLOWED);
@@ -3843,7 +3843,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3843 real_peer_disk = peer_state.disk; 3843 real_peer_disk = peer_state.disk;
3844 if (peer_state.disk == D_NEGOTIATING) { 3844 if (peer_state.disk == D_NEGOTIATING) {
3845 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 3845 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3846 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3846 drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3847 } 3847 }
3848 3848
3849 spin_lock_irq(&first_peer_device(device)->connection->req_lock); 3849 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
@@ -3939,7 +3939,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3939 if (device->state.disk == D_NEGOTIATING) { 3939 if (device->state.disk == D_NEGOTIATING) {
3940 drbd_force_state(device, NS(disk, D_FAILED)); 3940 drbd_force_state(device, NS(disk, D_FAILED));
3941 } else if (peer_state.disk == D_NEGOTIATING) { 3941 } else if (peer_state.disk == D_NEGOTIATING) {
3942 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3942 drbd_err(device, "Disk attach process on the peer node was aborted.\n");
3943 peer_state.disk = D_DISKLESS; 3943 peer_state.disk = D_DISKLESS;
3944 real_peer_disk = D_DISKLESS; 3944 real_peer_disk = D_DISKLESS;
3945 } else { 3945 } else {
@@ -3967,7 +3967,7 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
3967 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3967 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3968 for temporal network outages! */ 3968 for temporal network outages! */
3969 spin_unlock_irq(&first_peer_device(device)->connection->req_lock); 3969 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3970 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3970 drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3971 tl_clear(first_peer_device(device)->connection); 3971 tl_clear(first_peer_device(device)->connection);
3972 drbd_uuid_new_current(device); 3972 drbd_uuid_new_current(device);
3973 clear_bit(NEW_CUR_UUID, &device->flags); 3973 clear_bit(NEW_CUR_UUID, &device->flags);
@@ -4029,7 +4029,7 @@ static int receive_sync_uuid(struct drbd_connection *connection, struct packet_i
4029 4029
4030 put_ldev(device); 4030 put_ldev(device);
4031 } else 4031 } else
4032 dev_err(DEV, "Ignoring SyncUUID packet!\n"); 4032 drbd_err(device, "Ignoring SyncUUID packet!\n");
4033 4033
4034 return 0; 4034 return 0;
4035} 4035}
@@ -4052,7 +4052,7 @@ receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4052 int err; 4052 int err;
4053 4053
4054 if (want != size) { 4054 if (want != size) {
4055 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size); 4055 drbd_err(device, "%s:want (%u) != size (%u)\n", __func__, want, size);
4056 return -EIO; 4056 return -EIO;
4057 } 4057 }
4058 if (want == 0) 4058 if (want == 0)
@@ -4122,14 +4122,14 @@ recv_bm_rle_bits(struct drbd_device *device,
4122 if (toggle) { 4122 if (toggle) {
4123 e = s + rl -1; 4123 e = s + rl -1;
4124 if (e >= c->bm_bits) { 4124 if (e >= c->bm_bits) {
4125 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 4125 drbd_err(device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4126 return -EIO; 4126 return -EIO;
4127 } 4127 }
4128 _drbd_bm_set_bits(device, s, e); 4128 _drbd_bm_set_bits(device, s, e);
4129 } 4129 }
4130 4130
4131 if (have < bits) { 4131 if (have < bits) {
4132 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 4132 drbd_err(device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4133 have, bits, look_ahead, 4133 have, bits, look_ahead,
4134 (unsigned int)(bs.cur.b - p->code), 4134 (unsigned int)(bs.cur.b - p->code),
4135 (unsigned int)bs.buf_len); 4135 (unsigned int)bs.buf_len);
@@ -4174,7 +4174,7 @@ decode_bitmap_c(struct drbd_device *device,
4174 * but have been dropped as this one turned out to be "best" 4174 * but have been dropped as this one turned out to be "best"
4175 * during all our tests. */ 4175 * during all our tests. */
4176 4176
4177 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 4177 drbd_err(device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4178 conn_request_state(first_peer_device(device)->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); 4178 conn_request_state(first_peer_device(device)->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4179 return -EIO; 4179 return -EIO;
4180} 4180}
@@ -4207,7 +4207,7 @@ void INFO_bm_xfer_stats(struct drbd_device *device,
4207 r = 1000; 4207 r = 1000;
4208 4208
4209 r = 1000 - r; 4209 r = 1000 - r;
4210 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 4210 drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4211 "total %u; compression: %u.%u%%\n", 4211 "total %u; compression: %u.%u%%\n",
4212 direction, 4212 direction,
4213 c->bytes[1], c->packets[1], 4213 c->bytes[1], c->packets[1],
@@ -4251,12 +4251,12 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4251 struct p_compressed_bm *p = pi->data; 4251 struct p_compressed_bm *p = pi->data;
4252 4252
4253 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) { 4253 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4254 dev_err(DEV, "ReportCBitmap packet too large\n"); 4254 drbd_err(device, "ReportCBitmap packet too large\n");
4255 err = -EIO; 4255 err = -EIO;
4256 goto out; 4256 goto out;
4257 } 4257 }
4258 if (pi->size <= sizeof(*p)) { 4258 if (pi->size <= sizeof(*p)) {
4259 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size); 4259 drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4260 err = -EIO; 4260 err = -EIO;
4261 goto out; 4261 goto out;
4262 } 4262 }
@@ -4265,7 +4265,7 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4265 goto out; 4265 goto out;
4266 err = decode_bitmap_c(device, p, &c, pi->size); 4266 err = decode_bitmap_c(device, p, &c, pi->size);
4267 } else { 4267 } else {
4268 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); 4268 drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4269 err = -EIO; 4269 err = -EIO;
4270 goto out; 4270 goto out;
4271 } 4271 }
@@ -4297,7 +4297,7 @@ static int receive_bitmap(struct drbd_connection *connection, struct packet_info
4297 } else if (device->state.conn != C_WF_BITMAP_S) { 4297 } else if (device->state.conn != C_WF_BITMAP_S) {
4298 /* admin may have requested C_DISCONNECTING, 4298 /* admin may have requested C_DISCONNECTING,
4299 * other threads may have noticed network errors */ 4299 * other threads may have noticed network errors */
4300 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", 4300 drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4301 drbd_conn_str(device->state.conn)); 4301 drbd_conn_str(device->state.conn));
4302 } 4302 }
4303 err = 0; 4303 err = 0;
@@ -4341,7 +4341,7 @@ static int receive_out_of_sync(struct drbd_connection *connection, struct packet
4341 case C_BEHIND: 4341 case C_BEHIND:
4342 break; 4342 break;
4343 default: 4343 default:
4344 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 4344 drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4345 drbd_conn_str(device->state.conn)); 4345 drbd_conn_str(device->state.conn));
4346 } 4346 }
4347 4347
@@ -4561,13 +4561,13 @@ static int drbd_disconnected(struct drbd_device *device)
4561 */ 4561 */
4562 i = drbd_free_peer_reqs(device, &device->net_ee); 4562 i = drbd_free_peer_reqs(device, &device->net_ee);
4563 if (i) 4563 if (i)
4564 dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 4564 drbd_info(device, "net_ee not empty, killed %u entries\n", i);
4565 i = atomic_read(&device->pp_in_use_by_net); 4565 i = atomic_read(&device->pp_in_use_by_net);
4566 if (i) 4566 if (i)
4567 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); 4567 drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
4568 i = atomic_read(&device->pp_in_use); 4568 i = atomic_read(&device->pp_in_use);
4569 if (i) 4569 if (i)
4570 dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 4570 drbd_info(device, "pp_in_use = %d, expected 0\n", i);
4571 4571
4572 D_ASSERT(list_empty(&device->read_ee)); 4572 D_ASSERT(list_empty(&device->read_ee));
4573 D_ASSERT(list_empty(&device->active_ee)); 4573 D_ASSERT(list_empty(&device->active_ee));
@@ -4910,7 +4910,7 @@ static int got_RqSReply(struct drbd_connection *connection, struct packet_info *
4910 set_bit(CL_ST_CHG_SUCCESS, &device->flags); 4910 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
4911 } else { 4911 } else {
4912 set_bit(CL_ST_CHG_FAIL, &device->flags); 4912 set_bit(CL_ST_CHG_FAIL, &device->flags);
4913 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4913 drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
4914 drbd_set_st_err_str(retcode), retcode); 4914 drbd_set_st_err_str(retcode), retcode);
4915 } 4915 }
4916 wake_up(&device->state_wait); 4916 wake_up(&device->state_wait);
@@ -5074,7 +5074,7 @@ static int got_NegDReply(struct drbd_connection *connection, struct packet_info
5074 5074
5075 update_peer_seq(device, be32_to_cpu(p->seq_num)); 5075 update_peer_seq(device, be32_to_cpu(p->seq_num));
5076 5076
5077 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n", 5077 drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
5078 (unsigned long long)sector, be32_to_cpu(p->blksize)); 5078 (unsigned long long)sector, be32_to_cpu(p->blksize));
5079 5079
5080 return validate_req_change_req_state(device, p->block_id, sector, 5080 return validate_req_change_req_state(device, p->block_id, sector,
@@ -5181,7 +5181,7 @@ static int got_OVResult(struct drbd_connection *connection, struct packet_info *
5181 w->device = device; 5181 w->device = device;
5182 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w); 5182 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w);
5183 } else { 5183 } else {
5184 dev_err(DEV, "kmalloc(w) failed."); 5184 drbd_err(device, "kmalloc(w) failed.");
5185 ov_out_of_sync_print(device); 5185 ov_out_of_sync_print(device);
5186 drbd_resync_finished(device); 5186 drbd_resync_finished(device);
5187 } 5187 }
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index dd1033472763..17fade0118ff 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -102,7 +102,7 @@ void drbd_req_destroy(struct kref *kref)
102 atomic_read(&req->completion_ref) || 102 atomic_read(&req->completion_ref) ||
103 (s & RQ_LOCAL_PENDING) || 103 (s & RQ_LOCAL_PENDING) ||
104 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { 104 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
105 dev_err(DEV, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", 105 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
106 s, atomic_read(&req->completion_ref)); 106 s, atomic_read(&req->completion_ref));
107 return; 107 return;
108 } 108 }
@@ -153,7 +153,7 @@ void drbd_req_destroy(struct kref *kref)
153 drbd_al_complete_io(device, &req->i); 153 drbd_al_complete_io(device, &req->i);
154 put_ldev(device); 154 put_ldev(device);
155 } else if (__ratelimit(&drbd_ratelimit_state)) { 155 } else if (__ratelimit(&drbd_ratelimit_state)) {
156 dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), " 156 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
157 "but my Disk seems to have failed :(\n", 157 "but my Disk seems to have failed :(\n",
158 (unsigned long long) req->i.sector, req->i.size); 158 (unsigned long long) req->i.sector, req->i.size);
159 } 159 }
@@ -227,12 +227,12 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
227 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || 227 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
228 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || 228 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
229 (s & RQ_COMPLETION_SUSP)) { 229 (s & RQ_COMPLETION_SUSP)) {
230 dev_err(DEV, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); 230 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
231 return; 231 return;
232 } 232 }
233 233
234 if (!req->master_bio) { 234 if (!req->master_bio) {
235 dev_err(DEV, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); 235 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
236 return; 236 return;
237 } 237 }
238 238
@@ -410,7 +410,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
410 int at_least = k_put + !!c_put; 410 int at_least = k_put + !!c_put;
411 int refcount = atomic_read(&req->kref.refcount); 411 int refcount = atomic_read(&req->kref.refcount);
412 if (refcount < at_least) 412 if (refcount < at_least)
413 dev_err(DEV, 413 drbd_err(device,
414 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", 414 "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
415 s, req->rq_state, refcount, at_least); 415 s, req->rq_state, refcount, at_least);
416 } 416 }
@@ -432,7 +432,7 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request
432 if (!__ratelimit(&drbd_ratelimit_state)) 432 if (!__ratelimit(&drbd_ratelimit_state))
433 return; 433 return;
434 434
435 dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n", 435 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
436 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", 436 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
437 (unsigned long long)req->i.sector, 437 (unsigned long long)req->i.sector,
438 req->i.size >> 9, 438 req->i.size >> 9,
@@ -463,7 +463,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
463 463
464 switch (what) { 464 switch (what) {
465 default: 465 default:
466 dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); 466 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
467 break; 467 break;
468 468
469 /* does not happen... 469 /* does not happen...
@@ -741,7 +741,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
741 /* barrier came in before all requests were acked. 741 /* barrier came in before all requests were acked.
742 * this is bad, because if the connection is lost now, 742 * this is bad, because if the connection is lost now,
743 * we won't be able to clean them up... */ 743 * we won't be able to clean them up... */
744 dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n"); 744 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
745 } 745 }
746 /* Allowed to complete requests, even while suspended. 746 /* Allowed to complete requests, even while suspended.
747 * As this is called for all requests within a matching epoch, 747 * As this is called for all requests within a matching epoch,
@@ -883,12 +883,12 @@ static void maybe_pull_ahead(struct drbd_device *device)
883 883
884 if (nc->cong_fill && 884 if (nc->cong_fill &&
885 atomic_read(&device->ap_in_flight) >= nc->cong_fill) { 885 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
886 dev_info(DEV, "Congestion-fill threshold reached\n"); 886 drbd_info(device, "Congestion-fill threshold reached\n");
887 congested = true; 887 congested = true;
888 } 888 }
889 889
890 if (device->act_log->used >= nc->cong_extents) { 890 if (device->act_log->used >= nc->cong_extents) {
891 dev_info(DEV, "Congestion-extents threshold reached\n"); 891 drbd_info(device, "Congestion-extents threshold reached\n");
892 congested = true; 892 congested = true;
893 } 893 }
894 894
@@ -1046,7 +1046,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1046 dec_ap_bio(device); 1046 dec_ap_bio(device);
1047 /* only pass the error to the upper layers. 1047 /* only pass the error to the upper layers.
1048 * if user cannot handle io errors, that's not our business. */ 1048 * if user cannot handle io errors, that's not our business. */
1049 dev_err(DEV, "could not kmalloc() req\n"); 1049 drbd_err(device, "could not kmalloc() req\n");
1050 bio_endio(bio, -ENOMEM); 1050 bio_endio(bio, -ENOMEM);
1051 return ERR_PTR(-ENOMEM); 1051 return ERR_PTR(-ENOMEM);
1052 } 1052 }
@@ -1146,7 +1146,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
1146 } else if (no_remote) { 1146 } else if (no_remote) {
1147nodata: 1147nodata:
1148 if (__ratelimit(&drbd_ratelimit_state)) 1148 if (__ratelimit(&drbd_ratelimit_state))
1149 dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n", 1149 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1150 (unsigned long long)req->i.sector, req->i.size >> 9); 1150 (unsigned long long)req->i.sector, req->i.size >> 9);
1151 /* A write may have been queued for send_oos, however. 1151 /* A write may have been queued for send_oos, however.
1152 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ 1152 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
@@ -1387,13 +1387,13 @@ void request_timer_fn(unsigned long data)
1387 if (ent && req->rq_state & RQ_NET_PENDING && 1387 if (ent && req->rq_state & RQ_NET_PENDING &&
1388 time_after(now, req->start_time + ent) && 1388 time_after(now, req->start_time + ent) &&
1389 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { 1389 !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
1390 dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); 1390 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
1391 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); 1391 _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1392 } 1392 }
1393 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device && 1393 if (dt && req->rq_state & RQ_LOCAL_PENDING && req->w.device == device &&
1394 time_after(now, req->start_time + dt) && 1394 time_after(now, req->start_time + dt) &&
1395 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { 1395 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1396 dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); 1396 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1397 __drbd_chk_io_error(device, DRBD_FORCE_DETACH); 1397 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1398 } 1398 }
1399 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; 1399 nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 6435797903b1..df917c2067ca 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -411,7 +411,7 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
411 411
412static void print_st(struct drbd_device *device, char *name, union drbd_state ns) 412static void print_st(struct drbd_device *device, char *name, union drbd_state ns)
413{ 413{
414 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", 414 drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
415 name, 415 name,
416 drbd_conn_str(ns.conn), 416 drbd_conn_str(ns.conn),
417 drbd_role_str(ns.role), 417 drbd_role_str(ns.role),
@@ -432,7 +432,7 @@ void print_st_err(struct drbd_device *device, union drbd_state os,
432{ 432{
433 if (err == SS_IN_TRANSIENT_STATE) 433 if (err == SS_IN_TRANSIENT_STATE)
434 return; 434 return;
435 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err)); 435 drbd_err(device, "State change failed: %s\n", drbd_set_st_err_str(err));
436 print_st(device, " state", os); 436 print_st(device, " state", os);
437 print_st(device, "wanted", ns); 437 print_st(device, "wanted", ns);
438} 438}
@@ -490,7 +490,7 @@ static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os
490 ns.user_isp); 490 ns.user_isp);
491 491
492 if (pbp != pb) 492 if (pbp != pb)
493 dev_info(DEV, "%s\n", pb); 493 drbd_info(device, "%s\n", pb);
494} 494}
495 495
496static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns, 496static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
@@ -726,7 +726,7 @@ static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_st
726 }; 726 };
727 727
728 if (warn != NO_WARNING) 728 if (warn != NO_WARNING)
729 dev_warn(DEV, "%s\n", msg_table[warn]); 729 drbd_warn(device, "%s\n", msg_table[warn]);
730} 730}
731 731
732/** 732/**
@@ -906,7 +906,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
906void drbd_resume_al(struct drbd_device *device) 906void drbd_resume_al(struct drbd_device *device)
907{ 907{
908 if (test_and_clear_bit(AL_SUSPENDED, &device->flags)) 908 if (test_and_clear_bit(AL_SUSPENDED, &device->flags))
909 dev_info(DEV, "Resumed AL updates\n"); 909 drbd_info(device, "Resumed AL updates\n");
910} 910}
911 911
912/* helper for __drbd_set_state */ 912/* helper for __drbd_set_state */
@@ -1035,13 +1035,13 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1035 device->ov_start_sector = 1035 device->ov_start_sector =
1036 BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left); 1036 BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left);
1037 if (device->ov_left) 1037 if (device->ov_left)
1038 dev_info(DEV, "Online Verify reached sector %llu\n", 1038 drbd_info(device, "Online Verify reached sector %llu\n",
1039 (unsigned long long)device->ov_start_sector); 1039 (unsigned long long)device->ov_start_sector);
1040 } 1040 }
1041 1041
1042 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) && 1042 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1043 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) { 1043 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1044 dev_info(DEV, "Syncer continues.\n"); 1044 drbd_info(device, "Syncer continues.\n");
1045 device->rs_paused += (long)jiffies 1045 device->rs_paused += (long)jiffies
1046 -(long)device->rs_mark_time[device->rs_last_mark]; 1046 -(long)device->rs_mark_time[device->rs_last_mark];
1047 if (ns.conn == C_SYNC_TARGET) 1047 if (ns.conn == C_SYNC_TARGET)
@@ -1050,7 +1050,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1050 1050
1051 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) && 1051 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1052 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) { 1052 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1053 dev_info(DEV, "Resync suspended\n"); 1053 drbd_info(device, "Resync suspended\n");
1054 device->rs_mark_time[device->rs_last_mark] = jiffies; 1054 device->rs_mark_time[device->rs_last_mark] = jiffies;
1055 } 1055 }
1056 1056
@@ -1074,7 +1074,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1074 drbd_rs_controller_reset(device); 1074 drbd_rs_controller_reset(device);
1075 1075
1076 if (ns.conn == C_VERIFY_S) { 1076 if (ns.conn == C_VERIFY_S) {
1077 dev_info(DEV, "Starting Online Verify from sector %llu\n", 1077 drbd_info(device, "Starting Online Verify from sector %llu\n",
1078 (unsigned long long)device->ov_position); 1078 (unsigned long long)device->ov_position);
1079 mod_timer(&device->resync_timer, jiffies); 1079 mod_timer(&device->resync_timer, jiffies);
1080 } 1080 }
@@ -1149,7 +1149,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
1149 ascw->done = done; 1149 ascw->done = done;
1150 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &ascw->w); 1150 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &ascw->w);
1151 } else { 1151 } else {
1152 dev_err(DEV, "Could not kmalloc an ascw\n"); 1152 drbd_err(device, "Could not kmalloc an ascw\n");
1153 } 1153 }
1154 1154
1155 return rv; 1155 return rv;
@@ -1174,7 +1174,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
1174static void abw_start_sync(struct drbd_device *device, int rv) 1174static void abw_start_sync(struct drbd_device *device, int rv)
1175{ 1175{
1176 if (rv) { 1176 if (rv) {
1177 dev_err(DEV, "Writing the bitmap failed not starting resync.\n"); 1177 drbd_err(device, "Writing the bitmap failed not starting resync.\n");
1178 _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE); 1178 _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE);
1179 return; 1179 return;
1180 } 1180 }
@@ -1441,7 +1441,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1441 * there is only one way out: to D_DISKLESS, 1441 * there is only one way out: to D_DISKLESS,
1442 * and that may only happen after our put_ldev below. */ 1442 * and that may only happen after our put_ldev below. */
1443 if (device->state.disk != D_FAILED) 1443 if (device->state.disk != D_FAILED)
1444 dev_err(DEV, 1444 drbd_err(device,
1445 "ASSERT FAILED: disk is %s during detach\n", 1445 "ASSERT FAILED: disk is %s during detach\n",
1446 drbd_disk_str(device->state.disk)); 1446 drbd_disk_str(device->state.disk));
1447 1447
@@ -1465,9 +1465,9 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1465 /* We must still be diskless, 1465 /* We must still be diskless,
1466 * re-attach has to be serialized with this! */ 1466 * re-attach has to be serialized with this! */
1467 if (device->state.disk != D_DISKLESS) 1467 if (device->state.disk != D_DISKLESS)
1468 dev_err(DEV, 1468 drbd_err(device,
1469 "ASSERT FAILED: disk is %s while going diskless\n", 1469 "ASSERT FAILED: disk is %s while going diskless\n",
1470 drbd_disk_str(device->state.disk)); 1470 drbd_disk_str(device->state.disk));
1471 1471
1472 if (ns.conn >= C_CONNECTED) 1472 if (ns.conn >= C_CONNECTED)
1473 drbd_send_state(device, ns); 1473 drbd_send_state(device, ns);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 7aa10568ac59..040e8c7ab1db 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -176,12 +176,12 @@ void drbd_peer_request_endio(struct bio *bio, int error)
176 int is_write = bio_data_dir(bio) == WRITE; 176 int is_write = bio_data_dir(bio) == WRITE;
177 177
178 if (error && __ratelimit(&drbd_ratelimit_state)) 178 if (error && __ratelimit(&drbd_ratelimit_state))
179 dev_warn(DEV, "%s: error=%d s=%llus\n", 179 drbd_warn(device, "%s: error=%d s=%llus\n",
180 is_write ? "write" : "read", error, 180 is_write ? "write" : "read", error,
181 (unsigned long long)peer_req->i.sector); 181 (unsigned long long)peer_req->i.sector);
182 if (!error && !uptodate) { 182 if (!error && !uptodate) {
183 if (__ratelimit(&drbd_ratelimit_state)) 183 if (__ratelimit(&drbd_ratelimit_state))
184 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", 184 drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
185 is_write ? "write" : "read", 185 is_write ? "write" : "read",
186 (unsigned long long)peer_req->i.sector); 186 (unsigned long long)peer_req->i.sector);
187 /* strange behavior of some lower level drivers... 187 /* strange behavior of some lower level drivers...
@@ -214,7 +214,7 @@ void drbd_request_endio(struct bio *bio, int error)
214 int uptodate = bio_flagged(bio, BIO_UPTODATE); 214 int uptodate = bio_flagged(bio, BIO_UPTODATE);
215 215
216 if (!error && !uptodate) { 216 if (!error && !uptodate) {
217 dev_warn(DEV, "p %s: setting error to -EIO\n", 217 drbd_warn(device, "p %s: setting error to -EIO\n",
218 bio_data_dir(bio) == WRITE ? "write" : "read"); 218 bio_data_dir(bio) == WRITE ? "write" : "read");
219 /* strange behavior of some lower level drivers... 219 /* strange behavior of some lower level drivers...
220 * fail the request by clearing the uptodate flag, 220 * fail the request by clearing the uptodate flag,
@@ -253,7 +253,7 @@ void drbd_request_endio(struct bio *bio, int error)
253 */ 253 */
254 if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) { 254 if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
255 if (__ratelimit(&drbd_ratelimit_state)) 255 if (__ratelimit(&drbd_ratelimit_state))
256 dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); 256 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
257 257
258 if (!error) 258 if (!error)
259 panic("possible random memory corruption caused by delayed completion of aborted local request\n"); 259 panic("possible random memory corruption caused by delayed completion of aborted local request\n");
@@ -364,7 +364,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
364 P_CSUM_RS_REQUEST); 364 P_CSUM_RS_REQUEST);
365 kfree(digest); 365 kfree(digest);
366 } else { 366 } else {
367 dev_err(DEV, "kmalloc() of digest failed.\n"); 367 drbd_err(device, "kmalloc() of digest failed.\n");
368 err = -ENOMEM; 368 err = -ENOMEM;
369 } 369 }
370 370
@@ -373,7 +373,7 @@ out:
373 drbd_free_peer_req(device, peer_req); 373 drbd_free_peer_req(device, peer_req);
374 374
375 if (unlikely(err)) 375 if (unlikely(err))
376 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); 376 drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
377 return err; 377 return err;
378} 378}
379 379
@@ -534,7 +534,7 @@ static int drbd_rs_controller(struct drbd_device *device)
534 req_sect = max_sect; 534 req_sect = max_sect;
535 535
536 /* 536 /*
537 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", 537 drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
538 sect_in, device->rs_in_flight, want, correction, 538 sect_in, device->rs_in_flight, want, correction,
539 steps, cps, device->rs_planed, curr_corr, req_sect); 539 steps, cps, device->rs_planed, curr_corr, req_sect);
540 */ 540 */
@@ -586,7 +586,7 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
586 get_ldev_if_state(device,D_FAILED) would be sufficient, but 586 get_ldev_if_state(device,D_FAILED) would be sufficient, but
587 to continue resync with a broken disk makes no sense at 587 to continue resync with a broken disk makes no sense at
588 all */ 588 all */
589 dev_err(DEV, "Disk broke down during resync!\n"); 589 drbd_err(device, "Disk broke down during resync!\n");
590 return 0; 590 return 0;
591 } 591 }
592 592
@@ -699,7 +699,7 @@ next_sector:
699 err = drbd_send_drequest(device, P_RS_DATA_REQUEST, 699 err = drbd_send_drequest(device, P_RS_DATA_REQUEST,
700 sector, size, ID_SYNCER); 700 sector, size, ID_SYNCER);
701 if (err) { 701 if (err) {
702 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n"); 702 drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
703 dec_rs_pending(device); 703 dec_rs_pending(device);
704 put_ldev(device); 704 put_ldev(device);
705 return err; 705 return err;
@@ -835,7 +835,7 @@ int drbd_resync_finished(struct drbd_device *device)
835 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w); 835 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w);
836 return 1; 836 return 1;
837 } 837 }
838 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); 838 drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
839 } 839 }
840 840
841 dt = (jiffies - device->rs_start - device->rs_paused) / HZ; 841 dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
@@ -868,7 +868,7 @@ int drbd_resync_finished(struct drbd_device *device)
868 ns = os; 868 ns = os;
869 ns.conn = C_CONNECTED; 869 ns.conn = C_CONNECTED;
870 870
871 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", 871 drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
872 verify_done ? "Online verify" : "Resync", 872 verify_done ? "Online verify" : "Resync",
873 dt + device->rs_paused, device->rs_paused, dbdt); 873 dt + device->rs_paused, device->rs_paused, dbdt);
874 874
@@ -876,7 +876,7 @@ int drbd_resync_finished(struct drbd_device *device)
876 876
877 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) { 877 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
878 if (n_oos) { 878 if (n_oos) {
879 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n", 879 drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
880 n_oos, Bit2KB(1)); 880 n_oos, Bit2KB(1));
881 khelper_cmd = "out-of-sync"; 881 khelper_cmd = "out-of-sync";
882 } 882 }
@@ -892,7 +892,7 @@ int drbd_resync_finished(struct drbd_device *device)
892 const int ratio = 892 const int ratio =
893 (t == 0) ? 0 : 893 (t == 0) ? 0 :
894 (t < 100000) ? ((s*100)/t) : (s/(t/100)); 894 (t < 100000) ? ((s*100)/t) : (s/(t/100));
895 dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " 895 drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
896 "transferred %luK total %luK\n", 896 "transferred %luK total %luK\n",
897 ratio, 897 ratio,
898 Bit2KB(device->rs_same_csum), 898 Bit2KB(device->rs_same_csum),
@@ -902,7 +902,7 @@ int drbd_resync_finished(struct drbd_device *device)
902 } 902 }
903 903
904 if (device->rs_failed) { 904 if (device->rs_failed) {
905 dev_info(DEV, " %lu failed blocks\n", device->rs_failed); 905 drbd_info(device, " %lu failed blocks\n", device->rs_failed);
906 906
907 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { 907 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
908 ns.disk = D_INCONSISTENT; 908 ns.disk = D_INCONSISTENT;
@@ -923,7 +923,7 @@ int drbd_resync_finished(struct drbd_device *device)
923 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); 923 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
924 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]); 924 _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
925 } else { 925 } else {
926 dev_err(DEV, "device->p_uuid is NULL! BUG\n"); 926 drbd_err(device, "device->p_uuid is NULL! BUG\n");
927 } 927 }
928 } 928 }
929 929
@@ -1001,7 +1001,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
1001 err = drbd_send_block(device, P_DATA_REPLY, peer_req); 1001 err = drbd_send_block(device, P_DATA_REPLY, peer_req);
1002 } else { 1002 } else {
1003 if (__ratelimit(&drbd_ratelimit_state)) 1003 if (__ratelimit(&drbd_ratelimit_state))
1004 dev_err(DEV, "Sending NegDReply. sector=%llus.\n", 1004 drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1005 (unsigned long long)peer_req->i.sector); 1005 (unsigned long long)peer_req->i.sector);
1006 1006
1007 err = drbd_send_ack(device, P_NEG_DREPLY, peer_req); 1007 err = drbd_send_ack(device, P_NEG_DREPLY, peer_req);
@@ -1012,7 +1012,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
1012 move_to_net_ee_or_free(device, peer_req); 1012 move_to_net_ee_or_free(device, peer_req);
1013 1013
1014 if (unlikely(err)) 1014 if (unlikely(err))
1015 dev_err(DEV, "drbd_send_block() failed\n"); 1015 drbd_err(device, "drbd_send_block() failed\n");
1016 return err; 1016 return err;
1017} 1017}
1018 1018
@@ -1047,13 +1047,13 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1047 err = drbd_send_block(device, P_RS_DATA_REPLY, peer_req); 1047 err = drbd_send_block(device, P_RS_DATA_REPLY, peer_req);
1048 } else { 1048 } else {
1049 if (__ratelimit(&drbd_ratelimit_state)) 1049 if (__ratelimit(&drbd_ratelimit_state))
1050 dev_err(DEV, "Not sending RSDataReply, " 1050 drbd_err(device, "Not sending RSDataReply, "
1051 "partner DISKLESS!\n"); 1051 "partner DISKLESS!\n");
1052 err = 0; 1052 err = 0;
1053 } 1053 }
1054 } else { 1054 } else {
1055 if (__ratelimit(&drbd_ratelimit_state)) 1055 if (__ratelimit(&drbd_ratelimit_state))
1056 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", 1056 drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1057 (unsigned long long)peer_req->i.sector); 1057 (unsigned long long)peer_req->i.sector);
1058 1058
1059 err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req); 1059 err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req);
@@ -1067,7 +1067,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1067 move_to_net_ee_or_free(device, peer_req); 1067 move_to_net_ee_or_free(device, peer_req);
1068 1068
1069 if (unlikely(err)) 1069 if (unlikely(err))
1070 dev_err(DEV, "drbd_send_block() failed\n"); 1070 drbd_err(device, "drbd_send_block() failed\n");
1071 return err; 1071 return err;
1072} 1072}
1073 1073
@@ -1123,14 +1123,14 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1123 } else { 1123 } else {
1124 err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req); 1124 err = drbd_send_ack(device, P_NEG_RS_DREPLY, peer_req);
1125 if (__ratelimit(&drbd_ratelimit_state)) 1125 if (__ratelimit(&drbd_ratelimit_state))
1126 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); 1126 drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1127 } 1127 }
1128 1128
1129 dec_unacked(device); 1129 dec_unacked(device);
1130 move_to_net_ee_or_free(device, peer_req); 1130 move_to_net_ee_or_free(device, peer_req);
1131 1131
1132 if (unlikely(err)) 1132 if (unlikely(err))
1133 dev_err(DEV, "drbd_send_block/ack() failed\n"); 1133 drbd_err(device, "drbd_send_block/ack() failed\n");
1134 return err; 1134 return err;
1135} 1135}
1136 1136
@@ -1590,7 +1590,7 @@ int w_start_resync(struct drbd_work *w, int cancel)
1590 struct drbd_device *device = w->device; 1590 struct drbd_device *device = w->device;
1591 1591
1592 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { 1592 if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1593 dev_warn(DEV, "w_start_resync later...\n"); 1593 drbd_warn(device, "w_start_resync later...\n");
1594 device->start_resync_timer.expires = jiffies + HZ/10; 1594 device->start_resync_timer.expires = jiffies + HZ/10;
1595 add_timer(&device->start_resync_timer); 1595 add_timer(&device->start_resync_timer);
1596 return 0; 1596 return 0;
@@ -1615,7 +1615,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1615 int r; 1615 int r;
1616 1616
1617 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { 1617 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
1618 dev_err(DEV, "Resync already running!\n"); 1618 drbd_err(device, "Resync already running!\n");
1619 return; 1619 return;
1620 } 1620 }
1621 1621
@@ -1627,7 +1627,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1627 r = drbd_khelper(device, "before-resync-target"); 1627 r = drbd_khelper(device, "before-resync-target");
1628 r = (r >> 8) & 0xff; 1628 r = (r >> 8) & 0xff;
1629 if (r > 0) { 1629 if (r > 0) {
1630 dev_info(DEV, "before-resync-target handler returned %d, " 1630 drbd_info(device, "before-resync-target handler returned %d, "
1631 "dropping connection.\n", r); 1631 "dropping connection.\n", r);
1632 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); 1632 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
1633 return; 1633 return;
@@ -1637,10 +1637,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1637 r = (r >> 8) & 0xff; 1637 r = (r >> 8) & 0xff;
1638 if (r > 0) { 1638 if (r > 0) {
1639 if (r == 3) { 1639 if (r == 3) {
1640 dev_info(DEV, "before-resync-source handler returned %d, " 1640 drbd_info(device, "before-resync-source handler returned %d, "
1641 "ignoring. Old userland tools?", r); 1641 "ignoring. Old userland tools?", r);
1642 } else { 1642 } else {
1643 dev_info(DEV, "before-resync-source handler returned %d, " 1643 drbd_info(device, "before-resync-source handler returned %d, "
1644 "dropping connection.\n", r); 1644 "dropping connection.\n", r);
1645 conn_request_state(first_peer_device(device)->connection, 1645 conn_request_state(first_peer_device(device)->connection,
1646 NS(conn, C_DISCONNECTING), CS_HARD); 1646 NS(conn, C_DISCONNECTING), CS_HARD);
@@ -1715,7 +1715,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1715 * to deal with potential jiffies wrap. */ 1715 * to deal with potential jiffies wrap. */
1716 device->rs_last_bcast = jiffies - HZ; 1716 device->rs_last_bcast = jiffies - HZ;
1717 1717
1718 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", 1718 drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1719 drbd_conn_str(ns.conn), 1719 drbd_conn_str(ns.conn),
1720 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), 1720 (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
1721 (unsigned long) device->rs_total); 1721 (unsigned long) device->rs_total);