aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_actlog.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/drbd/drbd_actlog.c')
-rw-r--r--drivers/block/drbd/drbd_actlog.c62
1 files changed, 1 insertions, 61 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 74b4835d3107..17956ff6a08d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -26,7 +26,6 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/drbd.h> 27#include <linux/drbd.h>
28#include "drbd_int.h" 28#include "drbd_int.h"
29#include "drbd_tracing.h"
30#include "drbd_wrappers.h" 29#include "drbd_wrappers.h"
31 30
32/* We maintain a trivial check sum in our on disk activity log. 31/* We maintain a trivial check sum in our on disk activity log.
@@ -66,17 +65,6 @@ struct drbd_atodb_wait {
66 65
67int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); 66int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
68 67
69/* The actual tracepoint needs to have constant number of known arguments...
70 */
71void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...)
72{
73 va_list ap;
74
75 va_start(ap, fmt);
76 trace__drbd_resync(mdev, level, fmt, ap);
77 va_end(ap);
78}
79
80static int _drbd_md_sync_page_io(struct drbd_conf *mdev, 68static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
81 struct drbd_backing_dev *bdev, 69 struct drbd_backing_dev *bdev,
82 struct page *page, sector_t sector, 70 struct page *page, sector_t sector,
@@ -105,8 +93,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
105 bio->bi_end_io = drbd_md_io_complete; 93 bio->bi_end_io = drbd_md_io_complete;
106 bio->bi_rw = rw; 94 bio->bi_rw = rw;
107 95
108 trace_drbd_bio(mdev, "Md", bio, 0, NULL);
109
110 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 96 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
111 bio_endio(bio, -EIO); 97 bio_endio(bio, -EIO);
112 else 98 else
@@ -236,8 +222,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
236 222
237 D_ASSERT(atomic_read(&mdev->local_cnt) > 0); 223 D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
238 224
239 trace_drbd_actlog(mdev, sector, "al_begin_io");
240
241 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); 225 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
242 226
243 if (al_ext->lc_number != enr) { 227 if (al_ext->lc_number != enr) {
@@ -270,8 +254,6 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
270 struct lc_element *extent; 254 struct lc_element *extent;
271 unsigned long flags; 255 unsigned long flags;
272 256
273 trace_drbd_actlog(mdev, sector, "al_complete_io");
274
275 spin_lock_irqsave(&mdev->al_lock, flags); 257 spin_lock_irqsave(&mdev->al_lock, flags);
276 258
277 extent = lc_find(mdev->act_log, enr); 259 extent = lc_find(mdev->act_log, enr);
@@ -967,10 +949,6 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
967 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); 949 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
968 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); 950 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
969 951
970 trace_drbd_resync(mdev, TRACE_LVL_METRICS,
971 "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
972 (unsigned long long)sector, size, sbnr, ebnr);
973
974 if (sbnr > ebnr) 952 if (sbnr > ebnr)
975 return; 953 return;
976 954
@@ -1045,10 +1023,6 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
1045 sbnr = BM_SECT_TO_BIT(sector); 1023 sbnr = BM_SECT_TO_BIT(sector);
1046 ebnr = BM_SECT_TO_BIT(esector); 1024 ebnr = BM_SECT_TO_BIT(esector);
1047 1025
1048 trace_drbd_resync(mdev, TRACE_LVL_METRICS,
1049 "drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
1050 (unsigned long long)sector, size, sbnr, ebnr);
1051
1052 /* ok, (capacity & 7) != 0 sometimes, but who cares... 1026 /* ok, (capacity & 7) != 0 sometimes, but who cares...
1053 * we count rs_{total,left} in bits, not sectors. */ 1027 * we count rs_{total,left} in bits, not sectors. */
1054 spin_lock_irqsave(&mdev->al_lock, flags); 1028 spin_lock_irqsave(&mdev->al_lock, flags);
@@ -1143,10 +1117,6 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1143 struct bm_extent *bm_ext; 1117 struct bm_extent *bm_ext;
1144 int i, sig; 1118 int i, sig;
1145 1119
1146 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1147 "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
1148 (unsigned long long)sector, enr);
1149
1150 sig = wait_event_interruptible(mdev->al_wait, 1120 sig = wait_event_interruptible(mdev->al_wait,
1151 (bm_ext = _bme_get(mdev, enr))); 1121 (bm_ext = _bme_get(mdev, enr)));
1152 if (sig) 1122 if (sig)
@@ -1192,9 +1162,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1192 struct bm_extent *bm_ext; 1162 struct bm_extent *bm_ext;
1193 int i; 1163 int i;
1194 1164
1195 trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n",
1196 (unsigned long long)sector);
1197
1198 spin_lock_irq(&mdev->al_lock); 1165 spin_lock_irq(&mdev->al_lock);
1199 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { 1166 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
1200 /* in case you have very heavy scattered io, it may 1167 /* in case you have very heavy scattered io, it may
@@ -1210,11 +1177,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1210 * the lc_put here... 1177 * the lc_put here...
1211 * we also have to wake_up 1178 * we also have to wake_up
1212 */ 1179 */
1213
1214 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1215 "dropping %u, apparently got 'synced' by application io\n",
1216 mdev->resync_wenr);
1217
1218 e = lc_find(mdev->resync, mdev->resync_wenr); 1180 e = lc_find(mdev->resync, mdev->resync_wenr);
1219 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1181 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1220 if (bm_ext) { 1182 if (bm_ext) {
@@ -1242,21 +1204,14 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1242 * but then could not set BME_LOCKED, 1204 * but then could not set BME_LOCKED,
1243 * so we tried again. 1205 * so we tried again.
1244 * drop the extra reference. */ 1206 * drop the extra reference. */
1245 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1246 "dropping extra reference on %u\n", enr);
1247
1248 bm_ext->lce.refcnt--; 1207 bm_ext->lce.refcnt--;
1249 D_ASSERT(bm_ext->lce.refcnt > 0); 1208 D_ASSERT(bm_ext->lce.refcnt > 0);
1250 } 1209 }
1251 goto check_al; 1210 goto check_al;
1252 } else { 1211 } else {
1253 /* do we rather want to try later? */ 1212 /* do we rather want to try later? */
1254 if (mdev->resync_locked > mdev->resync->nr_elements-3) { 1213 if (mdev->resync_locked > mdev->resync->nr_elements-3)
1255 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1256 "resync_locked = %u!\n", mdev->resync_locked);
1257
1258 goto try_again; 1214 goto try_again;
1259 }
1260 /* Do or do not. There is no try. -- Yoda */ 1215 /* Do or do not. There is no try. -- Yoda */
1261 e = lc_get(mdev->resync, enr); 1216 e = lc_get(mdev->resync, enr);
1262 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1217 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1281,8 +1236,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1281 goto check_al; 1236 goto check_al;
1282 } 1237 }
1283check_al: 1238check_al:
1284 trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr);
1285
1286 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1239 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1287 if (unlikely(al_enr+i == mdev->act_log->new_number)) 1240 if (unlikely(al_enr+i == mdev->act_log->new_number))
1288 goto try_again; 1241 goto try_again;
@@ -1296,7 +1249,6 @@ proceed:
1296 return 0; 1249 return 0;
1297 1250
1298try_again: 1251try_again:
1299 trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr);
1300 if (bm_ext) 1252 if (bm_ext)
1301 mdev->resync_wenr = enr; 1253 mdev->resync_wenr = enr;
1302 spin_unlock_irq(&mdev->al_lock); 1254 spin_unlock_irq(&mdev->al_lock);
@@ -1310,10 +1262,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1310 struct bm_extent *bm_ext; 1262 struct bm_extent *bm_ext;
1311 unsigned long flags; 1263 unsigned long flags;
1312 1264
1313 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1314 "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
1315 (long long)sector, enr);
1316
1317 spin_lock_irqsave(&mdev->al_lock, flags); 1265 spin_lock_irqsave(&mdev->al_lock, flags);
1318 e = lc_find(mdev->resync, enr); 1266 e = lc_find(mdev->resync, enr);
1319 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1267 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1348,8 +1296,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1348 */ 1296 */
1349void drbd_rs_cancel_all(struct drbd_conf *mdev) 1297void drbd_rs_cancel_all(struct drbd_conf *mdev)
1350{ 1298{
1351 trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n");
1352
1353 spin_lock_irq(&mdev->al_lock); 1299 spin_lock_irq(&mdev->al_lock);
1354 1300
1355 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ 1301 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
@@ -1375,8 +1321,6 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
1375 struct bm_extent *bm_ext; 1321 struct bm_extent *bm_ext;
1376 int i; 1322 int i;
1377 1323
1378 trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n");
1379
1380 spin_lock_irq(&mdev->al_lock); 1324 spin_lock_irq(&mdev->al_lock);
1381 1325
1382 if (get_ldev_if_state(mdev, D_FAILED)) { 1326 if (get_ldev_if_state(mdev, D_FAILED)) {
@@ -1429,10 +1373,6 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1429 sector_t esector, nr_sectors; 1373 sector_t esector, nr_sectors;
1430 int wake_up = 0; 1374 int wake_up = 0;
1431 1375
1432 trace_drbd_resync(mdev, TRACE_LVL_SUMMARY,
1433 "drbd_rs_failed_io: sector=%llus, size=%u\n",
1434 (unsigned long long)sector, size);
1435
1436 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1376 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1437 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1377 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1438 (unsigned long long)sector, size); 1378 (unsigned long long)sector, size);