aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-10-01 03:04:14 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-01 15:17:58 -0400
commit6a0afdf58d40200abd0c717261d1bc4c49195c2f (patch)
tree9ff7ca32d16e6ddb774105528fe051bd04695b3d /drivers/block/drbd
parentab8fafc2e1ecc0090f2c78902d3b992eec8b11f8 (diff)
drbd: remove tracing bits
They should be reimplemented in the current scheme. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r--drivers/block/drbd/Kconfig11
-rw-r--r--drivers/block/drbd/Makefile3
-rw-r--r--drivers/block/drbd/drbd_actlog.c62
-rw-r--r--drivers/block/drbd/drbd_int.h7
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_nl.c9
-rw-r--r--drivers/block/drbd/drbd_receiver.c30
-rw-r--r--drivers/block/drbd/drbd_req.c11
-rw-r--r--drivers/block/drbd/drbd_tracing.c752
-rw-r--r--drivers/block/drbd/drbd_tracing.h87
-rw-r--r--drivers/block/drbd/drbd_worker.c16
11 files changed, 3 insertions, 1021 deletions
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 4e6f90f487c2..f4acd04ebeef 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -38,17 +38,6 @@ config BLK_DEV_DRBD
38 38
39 If unsure, say N. 39 If unsure, say N.
40 40
41config DRBD_TRACE
42 tristate "DRBD tracing"
43 depends on BLK_DEV_DRBD
44 select TRACEPOINTS
45 default n
46 help
47
48 Say Y here if you want to be able to trace various events in DRBD.
49
50 If unsure, say N.
51
52config DRBD_FAULT_INJECTION 41config DRBD_FAULT_INJECTION
53 bool "DRBD fault injection" 42 bool "DRBD fault injection"
54 depends on BLK_DEV_DRBD 43 depends on BLK_DEV_DRBD
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index 7d86ef8a8b40..0d3f337ff5ff 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -2,7 +2,4 @@ drbd-y := drbd_bitmap.o drbd_proc.o
2drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o 2drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
3drbd-y += drbd_main.o drbd_strings.o drbd_nl.o 3drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
4 4
5drbd_trace-y := drbd_tracing.o
6
7obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o 5obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
8obj-$(CONFIG_DRBD_TRACE) += drbd_trace.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 74b4835d3107..17956ff6a08d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -26,7 +26,6 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/drbd.h> 27#include <linux/drbd.h>
28#include "drbd_int.h" 28#include "drbd_int.h"
29#include "drbd_tracing.h"
30#include "drbd_wrappers.h" 29#include "drbd_wrappers.h"
31 30
32/* We maintain a trivial check sum in our on disk activity log. 31/* We maintain a trivial check sum in our on disk activity log.
@@ -66,17 +65,6 @@ struct drbd_atodb_wait {
66 65
67int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); 66int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
68 67
69/* The actual tracepoint needs to have constant number of known arguments...
70 */
71void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...)
72{
73 va_list ap;
74
75 va_start(ap, fmt);
76 trace__drbd_resync(mdev, level, fmt, ap);
77 va_end(ap);
78}
79
80static int _drbd_md_sync_page_io(struct drbd_conf *mdev, 68static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
81 struct drbd_backing_dev *bdev, 69 struct drbd_backing_dev *bdev,
82 struct page *page, sector_t sector, 70 struct page *page, sector_t sector,
@@ -105,8 +93,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
105 bio->bi_end_io = drbd_md_io_complete; 93 bio->bi_end_io = drbd_md_io_complete;
106 bio->bi_rw = rw; 94 bio->bi_rw = rw;
107 95
108 trace_drbd_bio(mdev, "Md", bio, 0, NULL);
109
110 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 96 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
111 bio_endio(bio, -EIO); 97 bio_endio(bio, -EIO);
112 else 98 else
@@ -236,8 +222,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
236 222
237 D_ASSERT(atomic_read(&mdev->local_cnt) > 0); 223 D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
238 224
239 trace_drbd_actlog(mdev, sector, "al_begin_io");
240
241 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); 225 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
242 226
243 if (al_ext->lc_number != enr) { 227 if (al_ext->lc_number != enr) {
@@ -270,8 +254,6 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
270 struct lc_element *extent; 254 struct lc_element *extent;
271 unsigned long flags; 255 unsigned long flags;
272 256
273 trace_drbd_actlog(mdev, sector, "al_complete_io");
274
275 spin_lock_irqsave(&mdev->al_lock, flags); 257 spin_lock_irqsave(&mdev->al_lock, flags);
276 258
277 extent = lc_find(mdev->act_log, enr); 259 extent = lc_find(mdev->act_log, enr);
@@ -967,10 +949,6 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
967 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); 949 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
968 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); 950 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
969 951
970 trace_drbd_resync(mdev, TRACE_LVL_METRICS,
971 "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
972 (unsigned long long)sector, size, sbnr, ebnr);
973
974 if (sbnr > ebnr) 952 if (sbnr > ebnr)
975 return; 953 return;
976 954
@@ -1045,10 +1023,6 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
1045 sbnr = BM_SECT_TO_BIT(sector); 1023 sbnr = BM_SECT_TO_BIT(sector);
1046 ebnr = BM_SECT_TO_BIT(esector); 1024 ebnr = BM_SECT_TO_BIT(esector);
1047 1025
1048 trace_drbd_resync(mdev, TRACE_LVL_METRICS,
1049 "drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
1050 (unsigned long long)sector, size, sbnr, ebnr);
1051
1052 /* ok, (capacity & 7) != 0 sometimes, but who cares... 1026 /* ok, (capacity & 7) != 0 sometimes, but who cares...
1053 * we count rs_{total,left} in bits, not sectors. */ 1027 * we count rs_{total,left} in bits, not sectors. */
1054 spin_lock_irqsave(&mdev->al_lock, flags); 1028 spin_lock_irqsave(&mdev->al_lock, flags);
@@ -1143,10 +1117,6 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1143 struct bm_extent *bm_ext; 1117 struct bm_extent *bm_ext;
1144 int i, sig; 1118 int i, sig;
1145 1119
1146 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1147 "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
1148 (unsigned long long)sector, enr);
1149
1150 sig = wait_event_interruptible(mdev->al_wait, 1120 sig = wait_event_interruptible(mdev->al_wait,
1151 (bm_ext = _bme_get(mdev, enr))); 1121 (bm_ext = _bme_get(mdev, enr)));
1152 if (sig) 1122 if (sig)
@@ -1192,9 +1162,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1192 struct bm_extent *bm_ext; 1162 struct bm_extent *bm_ext;
1193 int i; 1163 int i;
1194 1164
1195 trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n",
1196 (unsigned long long)sector);
1197
1198 spin_lock_irq(&mdev->al_lock); 1165 spin_lock_irq(&mdev->al_lock);
1199 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { 1166 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
1200 /* in case you have very heavy scattered io, it may 1167 /* in case you have very heavy scattered io, it may
@@ -1210,11 +1177,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1210 * the lc_put here... 1177 * the lc_put here...
1211 * we also have to wake_up 1178 * we also have to wake_up
1212 */ 1179 */
1213
1214 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1215 "dropping %u, apparently got 'synced' by application io\n",
1216 mdev->resync_wenr);
1217
1218 e = lc_find(mdev->resync, mdev->resync_wenr); 1180 e = lc_find(mdev->resync, mdev->resync_wenr);
1219 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1181 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1220 if (bm_ext) { 1182 if (bm_ext) {
@@ -1242,21 +1204,14 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1242 * but then could not set BME_LOCKED, 1204 * but then could not set BME_LOCKED,
1243 * so we tried again. 1205 * so we tried again.
1244 * drop the extra reference. */ 1206 * drop the extra reference. */
1245 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1246 "dropping extra reference on %u\n", enr);
1247
1248 bm_ext->lce.refcnt--; 1207 bm_ext->lce.refcnt--;
1249 D_ASSERT(bm_ext->lce.refcnt > 0); 1208 D_ASSERT(bm_ext->lce.refcnt > 0);
1250 } 1209 }
1251 goto check_al; 1210 goto check_al;
1252 } else { 1211 } else {
1253 /* do we rather want to try later? */ 1212 /* do we rather want to try later? */
1254 if (mdev->resync_locked > mdev->resync->nr_elements-3) { 1213 if (mdev->resync_locked > mdev->resync->nr_elements-3)
1255 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1256 "resync_locked = %u!\n", mdev->resync_locked);
1257
1258 goto try_again; 1214 goto try_again;
1259 }
1260 /* Do or do not. There is no try. -- Yoda */ 1215 /* Do or do not. There is no try. -- Yoda */
1261 e = lc_get(mdev->resync, enr); 1216 e = lc_get(mdev->resync, enr);
1262 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1217 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1281,8 +1236,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1281 goto check_al; 1236 goto check_al;
1282 } 1237 }
1283check_al: 1238check_al:
1284 trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr);
1285
1286 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1239 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1287 if (unlikely(al_enr+i == mdev->act_log->new_number)) 1240 if (unlikely(al_enr+i == mdev->act_log->new_number))
1288 goto try_again; 1241 goto try_again;
@@ -1296,7 +1249,6 @@ proceed:
1296 return 0; 1249 return 0;
1297 1250
1298try_again: 1251try_again:
1299 trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr);
1300 if (bm_ext) 1252 if (bm_ext)
1301 mdev->resync_wenr = enr; 1253 mdev->resync_wenr = enr;
1302 spin_unlock_irq(&mdev->al_lock); 1254 spin_unlock_irq(&mdev->al_lock);
@@ -1310,10 +1262,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1310 struct bm_extent *bm_ext; 1262 struct bm_extent *bm_ext;
1311 unsigned long flags; 1263 unsigned long flags;
1312 1264
1313 trace_drbd_resync(mdev, TRACE_LVL_ALL,
1314 "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
1315 (long long)sector, enr);
1316
1317 spin_lock_irqsave(&mdev->al_lock, flags); 1265 spin_lock_irqsave(&mdev->al_lock, flags);
1318 e = lc_find(mdev->resync, enr); 1266 e = lc_find(mdev->resync, enr);
1319 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1267 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
@@ -1348,8 +1296,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1348 */ 1296 */
1349void drbd_rs_cancel_all(struct drbd_conf *mdev) 1297void drbd_rs_cancel_all(struct drbd_conf *mdev)
1350{ 1298{
1351 trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n");
1352
1353 spin_lock_irq(&mdev->al_lock); 1299 spin_lock_irq(&mdev->al_lock);
1354 1300
1355 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ 1301 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
@@ -1375,8 +1321,6 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
1375 struct bm_extent *bm_ext; 1321 struct bm_extent *bm_ext;
1376 int i; 1322 int i;
1377 1323
1378 trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n");
1379
1380 spin_lock_irq(&mdev->al_lock); 1324 spin_lock_irq(&mdev->al_lock);
1381 1325
1382 if (get_ldev_if_state(mdev, D_FAILED)) { 1326 if (get_ldev_if_state(mdev, D_FAILED)) {
@@ -1429,10 +1373,6 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1429 sector_t esector, nr_sectors; 1373 sector_t esector, nr_sectors;
1430 int wake_up = 0; 1374 int wake_up = 0;
1431 1375
1432 trace_drbd_resync(mdev, TRACE_LVL_SUMMARY,
1433 "drbd_rs_failed_io: sector=%llus, size=%u\n",
1434 (unsigned long long)sector, size);
1435
1436 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1376 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1437 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1377 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1438 (unsigned long long)sector, size); 1378 (unsigned long long)sector, size);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8da602e010bb..4e6255991e5b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -135,8 +135,6 @@ enum {
135 DRBD_FAULT_MAX, 135 DRBD_FAULT_MAX,
136}; 136};
137 137
138extern void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...);
139
140#ifdef CONFIG_DRBD_FAULT_INJECTION 138#ifdef CONFIG_DRBD_FAULT_INJECTION
141extern unsigned int 139extern unsigned int
142_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); 140_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
@@ -712,11 +710,6 @@ enum epoch_event {
712 EV_GOT_BARRIER_NR, 710 EV_GOT_BARRIER_NR,
713 EV_BARRIER_DONE, 711 EV_BARRIER_DONE,
714 EV_BECAME_LAST, 712 EV_BECAME_LAST,
715 EV_TRACE_FLUSH, /* TRACE_ are not real events, only used for tracing */
716 EV_TRACE_ADD_BARRIER, /* Doing the first write as a barrier write */
717 EV_TRACE_SETTING_BI, /* Barrier is expressed with the first write of the next epoch */
718 EV_TRACE_ALLOC,
719 EV_TRACE_FREE,
720 EV_CLEANUP = 32, /* used as flag */ 713 EV_CLEANUP = 32, /* used as flag */
721}; 714};
722 715
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 80273f21a4aa..11d8ff6016ac 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -53,7 +53,6 @@
53 53
54#include <linux/drbd_limits.h> 54#include <linux/drbd_limits.h>
55#include "drbd_int.h" 55#include "drbd_int.h"
56#include "drbd_tracing.h"
57#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ 56#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
58 57
59#include "drbd_vli.h" 58#include "drbd_vli.h"
@@ -80,18 +79,6 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
80static void md_sync_timer_fn(unsigned long data); 79static void md_sync_timer_fn(unsigned long data);
81static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); 80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82 81
83DEFINE_TRACE(drbd_unplug);
84DEFINE_TRACE(drbd_uuid);
85DEFINE_TRACE(drbd_ee);
86DEFINE_TRACE(drbd_packet);
87DEFINE_TRACE(drbd_md_io);
88DEFINE_TRACE(drbd_epoch);
89DEFINE_TRACE(drbd_netlink);
90DEFINE_TRACE(drbd_actlog);
91DEFINE_TRACE(drbd_bio);
92DEFINE_TRACE(_drbd_resync);
93DEFINE_TRACE(drbd_req);
94
95MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " 82MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
96 "Lars Ellenberg <lars@linbit.com>"); 83 "Lars Ellenberg <lars@linbit.com>");
97MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); 84MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
@@ -1576,7 +1563,6 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1576 h->command = cpu_to_be16(cmd); 1563 h->command = cpu_to_be16(cmd);
1577 h->length = cpu_to_be16(size-sizeof(struct p_header)); 1564 h->length = cpu_to_be16(size-sizeof(struct p_header));
1578 1565
1579 trace_drbd_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
1580 sent = drbd_send(mdev, sock, h, size, msg_flags); 1566 sent = drbd_send(mdev, sock, h, size, msg_flags);
1581 1567
1582 ok = (sent == size); 1568 ok = (sent == size);
@@ -1628,8 +1614,6 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1628 if (!drbd_get_data_sock(mdev)) 1614 if (!drbd_get_data_sock(mdev))
1629 return 0; 1615 return 0;
1630 1616
1631 trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);
1632
1633 ok = (sizeof(h) == 1617 ok = (sizeof(h) ==
1634 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0)); 1618 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1635 ok = ok && (size == 1619 ok = ok && (size ==
@@ -2359,7 +2343,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2359 dp_flags |= DP_MAY_SET_IN_SYNC; 2343 dp_flags |= DP_MAY_SET_IN_SYNC;
2360 2344
2361 p.dp_flags = cpu_to_be32(dp_flags); 2345 p.dp_flags = cpu_to_be32(dp_flags);
2362 trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
2363 set_bit(UNPLUG_REMOTE, &mdev->flags); 2346 set_bit(UNPLUG_REMOTE, &mdev->flags);
2364 ok = (sizeof(p) == 2347 ok = (sizeof(p) ==
2365 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE)); 2348 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
@@ -2410,7 +2393,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2410 if (!drbd_get_data_sock(mdev)) 2393 if (!drbd_get_data_sock(mdev))
2411 return 0; 2394 return 0;
2412 2395
2413 trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
2414 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, 2396 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2415 sizeof(p), MSG_MORE); 2397 sizeof(p), MSG_MORE);
2416 if (ok && dgs) { 2398 if (ok && dgs) {
@@ -2546,8 +2528,6 @@ static void drbd_unplug_fn(struct request_queue *q)
2546{ 2528{
2547 struct drbd_conf *mdev = q->queuedata; 2529 struct drbd_conf *mdev = q->queuedata;
2548 2530
2549 trace_drbd_unplug(mdev, "got unplugged");
2550
2551 /* unplug FIRST */ 2531 /* unplug FIRST */
2552 spin_lock_irq(q->queue_lock); 2532 spin_lock_irq(q->queue_lock);
2553 blk_remove_plug(q); 2533 blk_remove_plug(q);
@@ -3252,8 +3232,6 @@ void drbd_md_sync(struct drbd_conf *mdev)
3252 if (!get_ldev_if_state(mdev, D_FAILED)) 3232 if (!get_ldev_if_state(mdev, D_FAILED))
3253 return; 3233 return;
3254 3234
3255 trace_drbd_md_io(mdev, WRITE, mdev->ldev);
3256
3257 mutex_lock(&mdev->md_io_mutex); 3235 mutex_lock(&mdev->md_io_mutex);
3258 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); 3236 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3259 memset(buffer, 0, 512); 3237 memset(buffer, 0, 512);
@@ -3308,8 +3286,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3308 if (!get_ldev_if_state(mdev, D_ATTACHING)) 3286 if (!get_ldev_if_state(mdev, D_ATTACHING))
3309 return ERR_IO_MD_DISK; 3287 return ERR_IO_MD_DISK;
3310 3288
3311 trace_drbd_md_io(mdev, READ, bdev);
3312
3313 mutex_lock(&mdev->md_io_mutex); 3289 mutex_lock(&mdev->md_io_mutex);
3314 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); 3290 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3315 3291
@@ -3388,11 +3364,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3388{ 3364{
3389 int i; 3365 int i;
3390 3366
3391 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { 3367 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3392 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; 3368 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3393
3394 trace_drbd_uuid(mdev, i+1);
3395 }
3396} 3369}
3397 3370
3398void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) 3371void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
@@ -3407,7 +3380,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3407 } 3380 }
3408 3381
3409 mdev->ldev->md.uuid[idx] = val; 3382 mdev->ldev->md.uuid[idx] = val;
3410 trace_drbd_uuid(mdev, idx);
3411 drbd_md_mark_dirty(mdev); 3383 drbd_md_mark_dirty(mdev);
3412} 3384}
3413 3385
@@ -3417,7 +3389,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3417 if (mdev->ldev->md.uuid[idx]) { 3389 if (mdev->ldev->md.uuid[idx]) {
3418 drbd_uuid_move_history(mdev); 3390 drbd_uuid_move_history(mdev);
3419 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; 3391 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3420 trace_drbd_uuid(mdev, UI_HISTORY_START);
3421 } 3392 }
3422 _drbd_uuid_set(mdev, idx, val); 3393 _drbd_uuid_set(mdev, idx, val);
3423} 3394}
@@ -3436,7 +3407,6 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3436 dev_info(DEV, "Creating new current UUID\n"); 3407 dev_info(DEV, "Creating new current UUID\n");
3437 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); 3408 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3438 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; 3409 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3439 trace_drbd_uuid(mdev, UI_BITMAP);
3440 3410
3441 get_random_bytes(&val, sizeof(u64)); 3411 get_random_bytes(&val, sizeof(u64));
3442 _drbd_uuid_set(mdev, UI_CURRENT, val); 3412 _drbd_uuid_set(mdev, UI_CURRENT, val);
@@ -3451,8 +3421,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3451 drbd_uuid_move_history(mdev); 3421 drbd_uuid_move_history(mdev);
3452 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; 3422 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3453 mdev->ldev->md.uuid[UI_BITMAP] = 0; 3423 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3454 trace_drbd_uuid(mdev, UI_HISTORY_START);
3455 trace_drbd_uuid(mdev, UI_BITMAP);
3456 } else { 3424 } else {
3457 if (mdev->ldev->md.uuid[UI_BITMAP]) 3425 if (mdev->ldev->md.uuid[UI_BITMAP])
3458 dev_warn(DEV, "bm UUID already set"); 3426 dev_warn(DEV, "bm UUID already set");
@@ -3460,7 +3428,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3460 mdev->ldev->md.uuid[UI_BITMAP] = val; 3428 mdev->ldev->md.uuid[UI_BITMAP] = val;
3461 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); 3429 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3462 3430
3463 trace_drbd_uuid(mdev, UI_BITMAP);
3464 } 3431 }
3465 drbd_md_mark_dirty(mdev); 3432 drbd_md_mark_dirty(mdev);
3466} 3433}
@@ -3727,7 +3694,6 @@ const char *drbd_buildtag(void)
3727module_init(drbd_init) 3694module_init(drbd_init)
3728module_exit(drbd_cleanup) 3695module_exit(drbd_cleanup)
3729 3696
3730/* For drbd_tracing: */
3731EXPORT_SYMBOL(drbd_conn_str); 3697EXPORT_SYMBOL(drbd_conn_str);
3732EXPORT_SYMBOL(drbd_role_str); 3698EXPORT_SYMBOL(drbd_role_str);
3733EXPORT_SYMBOL(drbd_disk_str); 3699EXPORT_SYMBOL(drbd_disk_str);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index cfde31002dff..73c55ccb629a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -33,7 +33,6 @@
33#include <linux/blkpg.h> 33#include <linux/blkpg.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include "drbd_int.h" 35#include "drbd_int.h"
36#include "drbd_tracing.h"
37#include "drbd_wrappers.h" 36#include "drbd_wrappers.h"
38#include <asm/unaligned.h> 37#include <asm/unaligned.h>
39#include <linux/drbd_tag_magic.h> 38#include <linux/drbd_tag_magic.h>
@@ -2024,8 +2023,6 @@ static void drbd_connector_callback(struct cn_msg *req)
2024 goto fail; 2023 goto fail;
2025 } 2024 }
2026 2025
2027 trace_drbd_netlink(req, 1);
2028
2029 if (nlp->packet_type >= P_nl_after_last_packet) { 2026 if (nlp->packet_type >= P_nl_after_last_packet) {
2030 retcode = ERR_PACKET_NR; 2027 retcode = ERR_PACKET_NR;
2031 goto fail; 2028 goto fail;
@@ -2063,7 +2060,6 @@ static void drbd_connector_callback(struct cn_msg *req)
2063 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2060 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2064 cn_reply->flags = 0; 2061 cn_reply->flags = 0;
2065 2062
2066 trace_drbd_netlink(cn_reply, 0);
2067 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2063 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2068 if (rr && rr != -ESRCH) 2064 if (rr && rr != -ESRCH)
2069 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2065 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
@@ -2157,7 +2153,6 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2157 reply->minor = mdev_to_minor(mdev); 2153 reply->minor = mdev_to_minor(mdev);
2158 reply->ret_code = NO_ERROR; 2154 reply->ret_code = NO_ERROR;
2159 2155
2160 trace_drbd_netlink(cn_reply, 0);
2161 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2156 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2162} 2157}
2163 2158
@@ -2190,7 +2185,6 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2190 reply->minor = mdev_to_minor(mdev); 2185 reply->minor = mdev_to_minor(mdev);
2191 reply->ret_code = NO_ERROR; 2186 reply->ret_code = NO_ERROR;
2192 2187
2193 trace_drbd_netlink(cn_reply, 0);
2194 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2188 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2195} 2189}
2196 2190
@@ -2262,7 +2256,6 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
2262 reply->minor = mdev_to_minor(mdev); 2256 reply->minor = mdev_to_minor(mdev);
2263 reply->ret_code = NO_ERROR; 2257 reply->ret_code = NO_ERROR;
2264 2258
2265 trace_drbd_netlink(cn_reply, 0);
2266 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2259 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2267 kfree(cn_reply); 2260 kfree(cn_reply);
2268} 2261}
@@ -2302,7 +2295,6 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2302 reply->minor = mdev_to_minor(mdev); 2295 reply->minor = mdev_to_minor(mdev);
2303 reply->ret_code = NO_ERROR; 2296 reply->ret_code = NO_ERROR;
2304 2297
2305 trace_drbd_netlink(cn_reply, 0);
2306 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2298 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2307} 2299}
2308 2300
@@ -2356,7 +2348,6 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2356 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2348 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2357 reply->ret_code = ret_code; 2349 reply->ret_code = ret_code;
2358 2350
2359 trace_drbd_netlink(cn_reply, 0);
2360 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2351 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2361 if (rr && rr != -ESRCH) 2352 if (rr && rr != -ESRCH)
2362 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2353 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 2f81821c2e06..360baf60f574 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -47,7 +47,6 @@
47#include <linux/string.h> 47#include <linux/string.h>
48#include <linux/scatterlist.h> 48#include <linux/scatterlist.h>
49#include "drbd_int.h" 49#include "drbd_int.h"
50#include "drbd_tracing.h"
51#include "drbd_req.h" 50#include "drbd_req.h"
52 51
53#include "drbd_vli.h" 52#include "drbd_vli.h"
@@ -350,8 +349,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
350 e->epoch = NULL; 349 e->epoch = NULL;
351 e->flags = 0; 350 e->flags = 0;
352 351
353 trace_drbd_ee(mdev, e, "allocated");
354
355 return e; 352 return e;
356 353
357 fail2: 354 fail2:
@@ -366,7 +363,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
366void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) 363void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
367{ 364{
368 struct bio *bio = e->private_bio; 365 struct bio *bio = e->private_bio;
369 trace_drbd_ee(mdev, e, "freed");
370 drbd_pp_free_bio_pages(mdev, bio); 366 drbd_pp_free_bio_pages(mdev, bio);
371 bio_put(bio); 367 bio_put(bio);
372 D_ASSERT(hlist_unhashed(&e->colision)); 368 D_ASSERT(hlist_unhashed(&e->colision));
@@ -420,7 +416,6 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
420 * all ignore the last argument. 416 * all ignore the last argument.
421 */ 417 */
422 list_for_each_entry_safe(e, t, &work_list, w.list) { 418 list_for_each_entry_safe(e, t, &work_list, w.list) {
423 trace_drbd_ee(mdev, e, "process_done_ee");
424 /* list_del not necessary, next/prev members not touched */ 419 /* list_del not necessary, next/prev members not touched */
425 ok = e->w.cb(mdev, &e->w, !ok) && ok; 420 ok = e->w.cb(mdev, &e->w, !ok) && ok;
426 drbd_free_ee(mdev, e); 421 drbd_free_ee(mdev, e);
@@ -1021,8 +1016,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1021 break; 1016 break;
1022 } 1017 }
1023 1018
1024 trace_drbd_epoch(mdev, epoch, ev);
1025
1026 if (epoch_size != 0 && 1019 if (epoch_size != 0 &&
1027 atomic_read(&epoch->active) == 0 && 1020 atomic_read(&epoch->active) == 0 &&
1028 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && 1021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
@@ -1054,7 +1047,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1054 list_del(&epoch->list); 1047 list_del(&epoch->list);
1055 ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1048 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1056 mdev->epochs--; 1049 mdev->epochs--;
1057 trace_drbd_epoch(mdev, epoch, EV_TRACE_FREE);
1058 kfree(epoch); 1050 kfree(epoch);
1059 1051
1060 if (rv == FE_STILL_LIVE) 1052 if (rv == FE_STILL_LIVE)
@@ -1080,7 +1072,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1080 struct flush_work *fw; 1072 struct flush_work *fw;
1081 fw = kmalloc(sizeof(*fw), GFP_ATOMIC); 1073 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1082 if (fw) { 1074 if (fw) {
1083 trace_drbd_epoch(mdev, epoch, EV_TRACE_FLUSH);
1084 fw->w.cb = w_flush; 1075 fw->w.cb = w_flush;
1085 fw->epoch = epoch; 1076 fw->epoch = epoch;
1086 drbd_queue_work(&mdev->data.work, &fw->w); 1077 drbd_queue_work(&mdev->data.work, &fw->w);
@@ -1251,7 +1242,6 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1251 list_add(&epoch->list, &mdev->current_epoch->list); 1242 list_add(&epoch->list, &mdev->current_epoch->list);
1252 mdev->current_epoch = epoch; 1243 mdev->current_epoch = epoch;
1253 mdev->epochs++; 1244 mdev->epochs++;
1254 trace_drbd_epoch(mdev, epoch, EV_TRACE_ALLOC);
1255 } else { 1245 } else {
1256 /* The current_epoch got recycled while we allocated this one... */ 1246 /* The current_epoch got recycled while we allocated this one... */
1257 kfree(epoch); 1247 kfree(epoch);
@@ -1458,8 +1448,6 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1458 list_add(&e->w.list, &mdev->sync_ee); 1448 list_add(&e->w.list, &mdev->sync_ee);
1459 spin_unlock_irq(&mdev->req_lock); 1449 spin_unlock_irq(&mdev->req_lock);
1460 1450
1461 trace_drbd_ee(mdev, e, "submitting for (rs)write");
1462 trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
1463 drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio); 1451 drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
1464 /* accounting done in endio */ 1452 /* accounting done in endio */
1465 1453
@@ -1721,16 +1709,13 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1721 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); 1709 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1722 if (epoch == e->epoch) { 1710 if (epoch == e->epoch) {
1723 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1711 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1724 trace_drbd_epoch(mdev, e->epoch, EV_TRACE_ADD_BARRIER);
1725 rw |= (1<<BIO_RW_BARRIER); 1712 rw |= (1<<BIO_RW_BARRIER);
1726 e->flags |= EE_IS_BARRIER; 1713 e->flags |= EE_IS_BARRIER;
1727 } else { 1714 } else {
1728 if (atomic_read(&epoch->epoch_size) > 1 || 1715 if (atomic_read(&epoch->epoch_size) > 1 ||
1729 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { 1716 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1730 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1717 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1731 trace_drbd_epoch(mdev, epoch, EV_TRACE_SETTING_BI);
1732 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1718 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1733 trace_drbd_epoch(mdev, e->epoch, EV_TRACE_ADD_BARRIER);
1734 rw |= (1<<BIO_RW_BARRIER); 1719 rw |= (1<<BIO_RW_BARRIER);
1735 e->flags |= EE_IS_BARRIER; 1720 e->flags |= EE_IS_BARRIER;
1736 } 1721 }
@@ -1905,8 +1890,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1905 } 1890 }
1906 1891
1907 e->private_bio->bi_rw = rw; 1892 e->private_bio->bi_rw = rw;
1908 trace_drbd_ee(mdev, e, "submitting for (data)write");
1909 trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
1910 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio); 1893 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
1911 /* accounting done in endio */ 1894 /* accounting done in endio */
1912 1895
@@ -2065,8 +2048,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2065 2048
2066 inc_unacked(mdev); 2049 inc_unacked(mdev);
2067 2050
2068 trace_drbd_ee(mdev, e, "submitting for read");
2069 trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
2070 drbd_generic_make_request(mdev, fault_type, e->private_bio); 2051 drbd_generic_make_request(mdev, fault_type, e->private_bio);
2071 maybe_kick_lo(mdev); 2052 maybe_kick_lo(mdev);
2072 2053
@@ -3543,9 +3524,6 @@ static void drbdd(struct drbd_conf *mdev)
3543 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3524 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3544 break; 3525 break;
3545 } 3526 }
3546
3547 trace_drbd_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
3548 __FILE__, __LINE__);
3549 } 3527 }
3550} 3528}
3551 3529
@@ -3825,9 +3803,6 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
3825 return 0; 3803 return 0;
3826 } 3804 }
3827 3805
3828 trace_drbd_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
3829 __FILE__, __LINE__);
3830
3831 p->protocol_min = be32_to_cpu(p->protocol_min); 3806 p->protocol_min = be32_to_cpu(p->protocol_min);
3832 p->protocol_max = be32_to_cpu(p->protocol_max); 3807 p->protocol_max = be32_to_cpu(p->protocol_max);
3833 if (p->protocol_max == 0) 3808 if (p->protocol_max == 0)
@@ -4420,14 +4395,11 @@ int drbd_asender(struct drbd_thread *thi)
4420 goto disconnect; 4395 goto disconnect;
4421 } 4396 }
4422 expect = cmd->pkt_size; 4397 expect = cmd->pkt_size;
4423 ERR_IF(len != expect-sizeof(struct p_header)) { 4398 ERR_IF(len != expect-sizeof(struct p_header))
4424 trace_drbd_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
4425 goto reconnect; 4399 goto reconnect;
4426 }
4427 } 4400 }
4428 if (received == expect) { 4401 if (received == expect) {
4429 D_ASSERT(cmd != NULL); 4402 D_ASSERT(cmd != NULL);
4430 trace_drbd_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
4431 if (!cmd->process(mdev, h)) 4403 if (!cmd->process(mdev, h))
4432 goto reconnect; 4404 goto reconnect;
4433 4405
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 1aaa397669a8..3678d3d66c6c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/drbd.h> 29#include <linux/drbd.h>
30#include "drbd_int.h" 30#include "drbd_int.h"
31#include "drbd_tracing.h"
32#include "drbd_req.h" 31#include "drbd_req.h"
33 32
34 33
@@ -218,7 +217,6 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
218void complete_master_bio(struct drbd_conf *mdev, 217void complete_master_bio(struct drbd_conf *mdev,
219 struct bio_and_error *m) 218 struct bio_and_error *m)
220{ 219{
221 trace_drbd_bio(mdev, "Rq", m->bio, 1, NULL);
222 bio_endio(m->bio, m->error); 220 bio_endio(m->bio, m->error);
223 dec_ap_bio(mdev); 221 dec_ap_bio(mdev);
224} 222}
@@ -236,8 +234,6 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
236 /* only WRITES may end up here without a master bio (on barrier ack) */ 234 /* only WRITES may end up here without a master bio (on barrier ack) */
237 int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE; 235 int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
238 236
239 trace_drbd_req(req, nothing, "_req_may_be_done");
240
241 /* we must not complete the master bio, while it is 237 /* we must not complete the master bio, while it is
242 * still being processed by _drbd_send_zc_bio (drbd_send_dblock) 238 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
243 * not yet acknowledged by the peer 239 * not yet acknowledged by the peer
@@ -415,8 +411,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
415 struct drbd_conf *mdev = req->mdev; 411 struct drbd_conf *mdev = req->mdev;
416 m->bio = NULL; 412 m->bio = NULL;
417 413
418 trace_drbd_req(req, what, NULL);
419
420 switch (what) { 414 switch (what) {
421 default: 415 default:
422 dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); 416 dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
@@ -666,7 +660,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
666 * this is bad, because if the connection is lost now, 660 * this is bad, because if the connection is lost now,
667 * we won't be able to clean them up... */ 661 * we won't be able to clean them up... */
668 dev_err(DEV, "FIXME (barrier_acked but pending)\n"); 662 dev_err(DEV, "FIXME (barrier_acked but pending)\n");
669 trace_drbd_req(req, nothing, "FIXME (barrier_acked but pending)");
670 list_move(&req->tl_requests, &mdev->out_of_sequence_requests); 663 list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
671 } 664 }
672 D_ASSERT(req->rq_state & RQ_NET_SENT); 665 D_ASSERT(req->rq_state & RQ_NET_SENT);
@@ -736,8 +729,6 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
736 return 0; 729 return 0;
737 } 730 }
738 731
739 trace_drbd_bio(mdev, "Rq", bio, 0, req);
740
741 local = get_ldev(mdev); 732 local = get_ldev(mdev);
742 if (!local) { 733 if (!local) {
743 bio_put(req->private_bio); /* or we get a bio leak */ 734 bio_put(req->private_bio); /* or we get a bio leak */
@@ -928,8 +919,6 @@ allocate_barrier:
928 if (local) { 919 if (local) {
929 req->private_bio->bi_bdev = mdev->ldev->backing_bdev; 920 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
930 921
931 trace_drbd_bio(mdev, "Pri", req->private_bio, 0, NULL);
932
933 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 922 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
934 : rw == READ ? DRBD_FAULT_DT_RD 923 : rw == READ ? DRBD_FAULT_DT_RD
935 : DRBD_FAULT_DT_RA)) 924 : DRBD_FAULT_DT_RA))
diff --git a/drivers/block/drbd/drbd_tracing.c b/drivers/block/drbd/drbd_tracing.c
deleted file mode 100644
index d18d4f7b4bef..000000000000
--- a/drivers/block/drbd/drbd_tracing.c
+++ /dev/null
@@ -1,752 +0,0 @@
1/*
2 drbd_tracing.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/ctype.h>
29#include "drbd_int.h"
30#include "drbd_tracing.h"
31#include <linux/drbd_tag_magic.h>
32
33MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Philipp Reisner, Lars Ellenberg");
35MODULE_DESCRIPTION("DRBD tracepoint probes");
36MODULE_PARM_DESC(trace_mask, "Bitmap of events to trace see drbd_tracing.c");
37MODULE_PARM_DESC(trace_level, "Current tracing level (changeable in /sys)");
38MODULE_PARM_DESC(trace_devs, "Bitmap of devices to trace (changeable in /sys)");
39
40unsigned int trace_mask = 0; /* Bitmap of events to trace */
41int trace_level; /* Current trace level */
42int trace_devs; /* Bitmap of devices to trace */
43
44module_param(trace_mask, uint, 0444);
45module_param(trace_level, int, 0644);
46module_param(trace_devs, int, 0644);
47
48enum {
49 TRACE_PACKET = 0x0001,
50 TRACE_RQ = 0x0002,
51 TRACE_UUID = 0x0004,
52 TRACE_RESYNC = 0x0008,
53 TRACE_EE = 0x0010,
54 TRACE_UNPLUG = 0x0020,
55 TRACE_NL = 0x0040,
56 TRACE_AL_EXT = 0x0080,
57 TRACE_INT_RQ = 0x0100,
58 TRACE_MD_IO = 0x0200,
59 TRACE_EPOCH = 0x0400,
60};
61
62/* Buffer printing support
63 * dbg_print_flags: used for Flags arg to drbd_print_buffer
64 * - DBGPRINT_BUFFADDR; if set, each line starts with the
65 * virtual address of the line being output. If clear,
66 * each line starts with the offset from the beginning
67 * of the buffer. */
68enum dbg_print_flags {
69 DBGPRINT_BUFFADDR = 0x0001,
70};
71
72/* Macro stuff */
73static char *nl_packet_name(int packet_type)
74{
75/* Generate packet type strings */
76#define NL_PACKET(name, number, fields) \
77 [P_ ## name] = # name,
78#define NL_INTEGER Argh!
79#define NL_BIT Argh!
80#define NL_INT64 Argh!
81#define NL_STRING Argh!
82
83 static char *nl_tag_name[P_nl_after_last_packet] = {
84#include "linux/drbd_nl.h"
85 };
86
87 return (packet_type < sizeof(nl_tag_name)/sizeof(nl_tag_name[0])) ?
88 nl_tag_name[packet_type] : "*Unknown*";
89}
90/* /Macro stuff */
91
92static inline int is_mdev_trace(struct drbd_conf *mdev, unsigned int level)
93{
94 return trace_level >= level && ((1 << mdev_to_minor(mdev)) & trace_devs);
95}
96
97static void probe_drbd_unplug(struct drbd_conf *mdev, char *msg)
98{
99 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
100 return;
101
102 dev_info(DEV, "%s, ap_bio_count=%d\n", msg, atomic_read(&mdev->ap_bio_cnt));
103}
104
105static void probe_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
106{
107 static char *uuid_str[UI_EXTENDED_SIZE] = {
108 [UI_CURRENT] = "CURRENT",
109 [UI_BITMAP] = "BITMAP",
110 [UI_HISTORY_START] = "HISTORY_START",
111 [UI_HISTORY_END] = "HISTORY_END",
112 [UI_SIZE] = "SIZE",
113 [UI_FLAGS] = "FLAGS",
114 };
115
116 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
117 return;
118
119 if (index >= UI_EXTENDED_SIZE) {
120 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
121 return;
122 }
123
124 dev_info(DEV, " uuid[%s] now %016llX\n",
125 uuid_str[index],
126 (unsigned long long)mdev->ldev->md.uuid[index]);
127}
128
129static void probe_drbd_md_io(struct drbd_conf *mdev, int rw,
130 struct drbd_backing_dev *bdev)
131{
132 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
133 return;
134
135 dev_info(DEV, " %s metadata superblock now\n",
136 rw == READ ? "Reading" : "Writing");
137}
138
139static void probe_drbd_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, char* msg)
140{
141 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
142 return;
143
144 dev_info(DEV, "EE %s sec=%llus size=%u e=%p\n",
145 msg, (unsigned long long)e->sector, e->size, e);
146}
147
148static void probe_drbd_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch,
149 enum epoch_event ev)
150{
151 static char *epoch_event_str[] = {
152 [EV_PUT] = "put",
153 [EV_GOT_BARRIER_NR] = "got_barrier_nr",
154 [EV_BARRIER_DONE] = "barrier_done",
155 [EV_BECAME_LAST] = "became_last",
156 [EV_TRACE_FLUSH] = "issuing_flush",
157 [EV_TRACE_ADD_BARRIER] = "added_barrier",
158 [EV_TRACE_SETTING_BI] = "just set barrier_in_next_epoch",
159 };
160
161 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
162 return;
163
164 ev &= ~EV_CLEANUP;
165
166 switch (ev) {
167 case EV_TRACE_ALLOC:
168 dev_info(DEV, "Allocate epoch %p/xxxx { } nr_epochs=%d\n", epoch, mdev->epochs);
169 break;
170 case EV_TRACE_FREE:
171 dev_info(DEV, "Freeing epoch %p/%d { size=%d } nr_epochs=%d\n",
172 epoch, epoch->barrier_nr, atomic_read(&epoch->epoch_size),
173 mdev->epochs);
174 break;
175 default:
176 dev_info(DEV, "Update epoch %p/%d { size=%d active=%d %c%c n%c%c } ev=%s\n",
177 epoch, epoch->barrier_nr, atomic_read(&epoch->epoch_size),
178 atomic_read(&epoch->active),
179 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) ? 'n' : '-',
180 test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) ? 'b' : '-',
181 test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) ? 'i' : '-',
182 test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ? 'd' : '-',
183 epoch_event_str[ev]);
184 }
185}
186
187static void probe_drbd_netlink(void *data, int is_req)
188{
189 struct cn_msg *msg = data;
190
191 if (is_req) {
192 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)msg->data;
193
194 printk(KERN_INFO "drbd%d: "
195 "Netlink: << %s (%d) - seq: %x, ack: %x, len: %x\n",
196 nlp->drbd_minor,
197 nl_packet_name(nlp->packet_type),
198 nlp->packet_type,
199 msg->seq, msg->ack, msg->len);
200 } else {
201 struct drbd_nl_cfg_reply *nlp = (struct drbd_nl_cfg_reply *)msg->data;
202
203 printk(KERN_INFO "drbd%d: "
204 "Netlink: >> %s (%d) - seq: %x, ack: %x, len: %x\n",
205 nlp->minor,
206 nlp->packet_type == P_nl_after_last_packet ?
207 "Empty-Reply" : nl_packet_name(nlp->packet_type),
208 nlp->packet_type,
209 msg->seq, msg->ack, msg->len);
210 }
211}
212
213static void probe_drbd_actlog(struct drbd_conf *mdev, sector_t sector, char* msg)
214{
215 unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
216
217 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
218 return;
219
220 dev_info(DEV, "%s (sec=%llus, al_enr=%u, rs_enr=%d)\n",
221 msg, (unsigned long long) sector, enr,
222 (int)BM_SECT_TO_EXT(sector));
223}
224
225/**
226 * drbd_print_buffer() - Hexdump arbitrary binary data into a buffer
227 * @prefix: String is output at the beginning of each line output.
228 * @flags: Currently only defined flag: DBGPRINT_BUFFADDR; if set, each
229 * line starts with the virtual address of the line being
230 * output. If clear, each line starts with the offset from the
231 * beginning of the buffer.
232 * @size: Indicates the size of each entry in the buffer. Supported
233 * values are sizeof(char), sizeof(short) and sizeof(int)
234 * @buffer: Start address of buffer
235 * @buffer_va: Virtual address of start of buffer (normally the same
236 * as Buffer, but having it separate allows it to hold
237 * file address for example)
238 * @length: length of buffer
239 */
240static void drbd_print_buffer(const char *prefix, unsigned int flags, int size,
241 const void *buffer, const void *buffer_va,
242 unsigned int length)
243
244#define LINE_SIZE 16
245#define LINE_ENTRIES (int)(LINE_SIZE/size)
246{
247 const unsigned char *pstart;
248 const unsigned char *pstart_va;
249 const unsigned char *pend;
250 char bytes_str[LINE_SIZE*3+8], ascii_str[LINE_SIZE+8];
251 char *pbytes = bytes_str, *pascii = ascii_str;
252 int offset = 0;
253 long sizemask;
254 int field_width;
255 int index;
256 const unsigned char *pend_str;
257 const unsigned char *p;
258 int count;
259
260 /* verify size parameter */
261 if (size != sizeof(char) &&
262 size != sizeof(short) &&
263 size != sizeof(int)) {
264 printk(KERN_DEBUG "drbd_print_buffer: "
265 "ERROR invalid size %d\n", size);
266 return;
267 }
268
269 sizemask = size-1;
270 field_width = size*2;
271
272 /* Adjust start/end to be on appropriate boundary for size */
273 buffer = (const char *)((long)buffer & ~sizemask);
274 pend = (const unsigned char *)
275 (((long)buffer + length + sizemask) & ~sizemask);
276
277 if (flags & DBGPRINT_BUFFADDR) {
278 /* Move start back to nearest multiple of line size,
279 * if printing address. This results in nicely formatted output
280 * with addresses being on line size (16) byte boundaries */
281 pstart = (const unsigned char *)((long)buffer & ~(LINE_SIZE-1));
282 } else {
283 pstart = (const unsigned char *)buffer;
284 }
285
286 /* Set value of start VA to print if addresses asked for */
287 pstart_va = (const unsigned char *)buffer_va
288 - ((const unsigned char *)buffer-pstart);
289
290 /* Calculate end position to nicely align right hand side */
291 pend_str = pstart + (((pend-pstart) + LINE_SIZE-1) & ~(LINE_SIZE-1));
292
293 /* Init strings */
294 *pbytes = *pascii = '\0';
295
296 /* Start at beginning of first line */
297 p = pstart;
298 count = 0;
299
300 while (p < pend_str) {
301 if (p < (const unsigned char *)buffer || p >= pend) {
302 /* Before start of buffer or after end- print spaces */
303 pbytes += sprintf(pbytes, "%*c ", field_width, ' ');
304 pascii += sprintf(pascii, "%*c", size, ' ');
305 p += size;
306 } else {
307 /* Add hex and ascii to strings */
308 int val;
309 switch (size) {
310 default:
311 case 1:
312 val = *(unsigned char *)p;
313 break;
314 case 2:
315 val = *(unsigned short *)p;
316 break;
317 case 4:
318 val = *(unsigned int *)p;
319 break;
320 }
321
322 pbytes += sprintf(pbytes, "%0*x ", field_width, val);
323
324 for (index = size; index; index--) {
325 *pascii++ = isprint(*p) ? *p : '.';
326 p++;
327 }
328 }
329
330 count++;
331
332 if (count == LINE_ENTRIES || p >= pend_str) {
333 /* Null terminate and print record */
334 *pascii = '\0';
335 printk(KERN_DEBUG "%s%8.8lx: %*s|%*s|\n",
336 prefix,
337 (flags & DBGPRINT_BUFFADDR)
338 ? (long)pstart_va:(long)offset,
339 LINE_ENTRIES*(field_width+1), bytes_str,
340 LINE_SIZE, ascii_str);
341
342 /* Move onto next line */
343 pstart_va += (p-pstart);
344 pstart = p;
345 count = 0;
346 offset += LINE_SIZE;
347
348 /* Re-init strings */
349 pbytes = bytes_str;
350 pascii = ascii_str;
351 *pbytes = *pascii = '\0';
352 }
353 }
354}
355
356static void probe_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, va_list args)
357{
358 char str[256];
359
360 if (!is_mdev_trace(mdev, level))
361 return;
362
363 if (vsnprintf(str, 256, fmt, args) >= 256)
364 str[255] = 0;
365
366 printk(KERN_INFO "%s %s: %s", dev_driver_string(disk_to_dev(mdev->vdisk)),
367 dev_name(disk_to_dev(mdev->vdisk)), str);
368}
369
370static void probe_drbd_bio(struct drbd_conf *mdev, const char *pfx, struct bio *bio, int complete,
371 struct drbd_request *r)
372{
373#if defined(CONFIG_LBDAF) || defined(CONFIG_LBD)
374#define SECTOR_FORMAT "%Lx"
375#else
376#define SECTOR_FORMAT "%lx"
377#endif
378#define SECTOR_SHIFT 9
379
380 unsigned long lowaddr = (unsigned long)(bio->bi_sector << SECTOR_SHIFT);
381 char *faddr = (char *)(lowaddr);
382 char rb[sizeof(void *)*2+6] = { 0, };
383 struct bio_vec *bvec;
384 int segno;
385
386 const int rw = bio->bi_rw;
387 const int biorw = (rw & (RW_MASK|RWA_MASK));
388 const int biobarrier = (rw & (1<<BIO_RW_BARRIER));
389 const int biosync = (rw & ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)));
390
391 if (!is_mdev_trace(mdev, TRACE_LVL_ALWAYS))
392 return;
393
394 if (r)
395 sprintf(rb, "Req:%p ", r);
396
397 dev_info(DEV, "%s %s:%s%s%s Bio:%p %s- %soffset " SECTOR_FORMAT ", size %x\n",
398 complete ? "<<<" : ">>>",
399 pfx,
400 biorw == WRITE ? "Write" : "Read",
401 biobarrier ? " : B" : "",
402 biosync ? " : S" : "",
403 bio,
404 rb,
405 complete ? (bio_flagged(bio, BIO_UPTODATE) ? "Success, " : "Failed, ") : "",
406 bio->bi_sector << SECTOR_SHIFT,
407 bio->bi_size);
408
409 if (trace_level >= TRACE_LVL_METRICS &&
410 ((biorw == WRITE) ^ complete)) {
411 printk(KERN_DEBUG " ind page offset length\n");
412 __bio_for_each_segment(bvec, bio, segno, 0) {
413 printk(KERN_DEBUG " [%d] %p %8.8x %8.8x\n", segno,
414 bvec->bv_page, bvec->bv_offset, bvec->bv_len);
415
416 if (trace_level >= TRACE_LVL_ALL) {
417 char *bvec_buf;
418 unsigned long flags;
419
420 bvec_buf = bvec_kmap_irq(bvec, &flags);
421
422 drbd_print_buffer(" ", DBGPRINT_BUFFADDR, 1,
423 bvec_buf,
424 faddr,
425 (bvec->bv_len <= 0x80)
426 ? bvec->bv_len : 0x80);
427
428 bvec_kunmap_irq(bvec_buf, &flags);
429
430 if (bvec->bv_len > 0x40)
431 printk(KERN_DEBUG " ....\n");
432
433 faddr += bvec->bv_len;
434 }
435 }
436 }
437}
438
439static void probe_drbd_req(struct drbd_request *req, enum drbd_req_event what, char *msg)
440{
441 static const char *rq_event_names[] = {
442 [created] = "created",
443 [to_be_send] = "to_be_send",
444 [to_be_submitted] = "to_be_submitted",
445 [queue_for_net_write] = "queue_for_net_write",
446 [queue_for_net_read] = "queue_for_net_read",
447 [send_canceled] = "send_canceled",
448 [send_failed] = "send_failed",
449 [handed_over_to_network] = "handed_over_to_network",
450 [connection_lost_while_pending] =
451 "connection_lost_while_pending",
452 [recv_acked_by_peer] = "recv_acked_by_peer",
453 [write_acked_by_peer] = "write_acked_by_peer",
454 [neg_acked] = "neg_acked",
455 [conflict_discarded_by_peer] = "conflict_discarded_by_peer",
456 [barrier_acked] = "barrier_acked",
457 [data_received] = "data_received",
458 [read_completed_with_error] = "read_completed_with_error",
459 [read_ahead_completed_with_error] = "reada_completed_with_error",
460 [write_completed_with_error] = "write_completed_with_error",
461 [completed_ok] = "completed_ok",
462 };
463
464 struct drbd_conf *mdev = req->mdev;
465
466 const int rw = (req->master_bio == NULL ||
467 bio_data_dir(req->master_bio) == WRITE) ?
468 'W' : 'R';
469 const unsigned long s = req->rq_state;
470
471 if (what != nothing) {
472 dev_info(DEV, "__req_mod(%p %c ,%s)\n", req, rw, rq_event_names[what]);
473 } else {
474 dev_info(DEV, "%s %p %c L%c%c%cN%c%c%c%c%c %u (%llus +%u) %s\n",
475 msg, req, rw,
476 s & RQ_LOCAL_PENDING ? 'p' : '-',
477 s & RQ_LOCAL_COMPLETED ? 'c' : '-',
478 s & RQ_LOCAL_OK ? 'o' : '-',
479 s & RQ_NET_PENDING ? 'p' : '-',
480 s & RQ_NET_QUEUED ? 'q' : '-',
481 s & RQ_NET_SENT ? 's' : '-',
482 s & RQ_NET_DONE ? 'd' : '-',
483 s & RQ_NET_OK ? 'o' : '-',
484 req->epoch,
485 (unsigned long long)req->sector,
486 req->size,
487 drbd_conn_str(mdev->state.conn));
488 }
489}
490
491
492#define drbd_peer_str drbd_role_str
493#define drbd_pdsk_str drbd_disk_str
494
495#define PSM(A) \
496do { \
497 if (mask.A) { \
498 int i = snprintf(p, len, " " #A "( %s )", \
499 drbd_##A##_str(val.A)); \
500 if (i >= len) \
501 return op; \
502 p += i; \
503 len -= i; \
504 } \
505} while (0)
506
507static char *dump_st(char *p, int len, union drbd_state mask, union drbd_state val)
508{
509 char *op = p;
510 *p = '\0';
511 PSM(role);
512 PSM(peer);
513 PSM(conn);
514 PSM(disk);
515 PSM(pdsk);
516
517 return op;
518}
519
520#define INFOP(fmt, args...) \
521do { \
522 if (trace_level >= TRACE_LVL_ALL) { \
523 dev_info(DEV, "%s:%d: %s [%d] %s %s " fmt , \
524 file, line, current->comm, current->pid, \
525 sockname, recv ? "<<<" : ">>>" , \
526 ## args); \
527 } else { \
528 dev_info(DEV, "%s %s " fmt, sockname, \
529 recv ? "<<<" : ">>>" , \
530 ## args); \
531 } \
532} while (0)
533
534static char *_dump_block_id(u64 block_id, char *buff)
535{
536 if (is_syncer_block_id(block_id))
537 strcpy(buff, "SyncerId");
538 else
539 sprintf(buff, "%llx", (unsigned long long)block_id);
540
541 return buff;
542}
543
544static void probe_drbd_packet(struct drbd_conf *mdev, struct socket *sock,
545 int recv, union p_polymorph *p, char *file, int line)
546{
547 char *sockname = sock == mdev->meta.socket ? "meta" : "data";
548 int cmd = (recv == 2) ? p->header.command : be16_to_cpu(p->header.command);
549 char tmp[300];
550 union drbd_state m, v;
551
552 switch (cmd) {
553 case P_HAND_SHAKE:
554 INFOP("%s (protocol %u-%u)\n", cmdname(cmd),
555 be32_to_cpu(p->handshake.protocol_min),
556 be32_to_cpu(p->handshake.protocol_max));
557 break;
558
559 case P_BITMAP: /* don't report this */
560 case P_COMPRESSED_BITMAP: /* don't report this */
561 break;
562
563 case P_DATA:
564 INFOP("%s (sector %llus, id %s, seq %u, f %x)\n", cmdname(cmd),
565 (unsigned long long)be64_to_cpu(p->data.sector),
566 _dump_block_id(p->data.block_id, tmp),
567 be32_to_cpu(p->data.seq_num),
568 be32_to_cpu(p->data.dp_flags)
569 );
570 break;
571
572 case P_DATA_REPLY:
573 case P_RS_DATA_REPLY:
574 INFOP("%s (sector %llus, id %s)\n", cmdname(cmd),
575 (unsigned long long)be64_to_cpu(p->data.sector),
576 _dump_block_id(p->data.block_id, tmp)
577 );
578 break;
579
580 case P_RECV_ACK:
581 case P_WRITE_ACK:
582 case P_RS_WRITE_ACK:
583 case P_DISCARD_ACK:
584 case P_NEG_ACK:
585 case P_NEG_RS_DREPLY:
586 INFOP("%s (sector %llus, size %u, id %s, seq %u)\n",
587 cmdname(cmd),
588 (long long)be64_to_cpu(p->block_ack.sector),
589 be32_to_cpu(p->block_ack.blksize),
590 _dump_block_id(p->block_ack.block_id, tmp),
591 be32_to_cpu(p->block_ack.seq_num)
592 );
593 break;
594
595 case P_DATA_REQUEST:
596 case P_RS_DATA_REQUEST:
597 INFOP("%s (sector %llus, size %u, id %s)\n", cmdname(cmd),
598 (long long)be64_to_cpu(p->block_req.sector),
599 be32_to_cpu(p->block_req.blksize),
600 _dump_block_id(p->block_req.block_id, tmp)
601 );
602 break;
603
604 case P_BARRIER:
605 case P_BARRIER_ACK:
606 INFOP("%s (barrier %u)\n", cmdname(cmd), p->barrier.barrier);
607 break;
608
609 case P_SYNC_PARAM:
610 case P_SYNC_PARAM89:
611 INFOP("%s (rate %u, verify-alg \"%.64s\", csums-alg \"%.64s\")\n",
612 cmdname(cmd), be32_to_cpu(p->rs_param_89.rate),
613 p->rs_param_89.verify_alg, p->rs_param_89.csums_alg);
614 break;
615
616 case P_UUIDS:
617 INFOP("%s Curr:%016llX, Bitmap:%016llX, "
618 "HisSt:%016llX, HisEnd:%016llX\n",
619 cmdname(cmd),
620 (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_CURRENT]),
621 (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_BITMAP]),
622 (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_HISTORY_START]),
623 (unsigned long long)be64_to_cpu(p->uuids.uuid[UI_HISTORY_END]));
624 break;
625
626 case P_SIZES:
627 INFOP("%s (d %lluMiB, u %lluMiB, c %lldMiB, "
628 "max bio %x, q order %x)\n",
629 cmdname(cmd),
630 (long long)(be64_to_cpu(p->sizes.d_size)>>(20-9)),
631 (long long)(be64_to_cpu(p->sizes.u_size)>>(20-9)),
632 (long long)(be64_to_cpu(p->sizes.c_size)>>(20-9)),
633 be32_to_cpu(p->sizes.max_segment_size),
634 be32_to_cpu(p->sizes.queue_order_type));
635 break;
636
637 case P_STATE:
638 v.i = be32_to_cpu(p->state.state);
639 m.i = 0xffffffff;
640 dump_st(tmp, sizeof(tmp), m, v);
641 INFOP("%s (s %x {%s})\n", cmdname(cmd), v.i, tmp);
642 break;
643
644 case P_STATE_CHG_REQ:
645 m.i = be32_to_cpu(p->req_state.mask);
646 v.i = be32_to_cpu(p->req_state.val);
647 dump_st(tmp, sizeof(tmp), m, v);
648 INFOP("%s (m %x v %x {%s})\n", cmdname(cmd), m.i, v.i, tmp);
649 break;
650
651 case P_STATE_CHG_REPLY:
652 INFOP("%s (ret %x)\n", cmdname(cmd),
653 be32_to_cpu(p->req_state_reply.retcode));
654 break;
655
656 case P_PING:
657 case P_PING_ACK:
658 /*
659 * Dont trace pings at summary level
660 */
661 if (trace_level < TRACE_LVL_ALL)
662 break;
663 /* fall through... */
664 default:
665 INFOP("%s (%u)\n", cmdname(cmd), cmd);
666 break;
667 }
668}
669
670
671static int __init drbd_trace_init(void)
672{
673 int ret;
674
675 if (trace_mask & TRACE_UNPLUG) {
676 ret = register_trace_drbd_unplug(probe_drbd_unplug);
677 WARN_ON(ret);
678 }
679 if (trace_mask & TRACE_UUID) {
680 ret = register_trace_drbd_uuid(probe_drbd_uuid);
681 WARN_ON(ret);
682 }
683 if (trace_mask & TRACE_EE) {
684 ret = register_trace_drbd_ee(probe_drbd_ee);
685 WARN_ON(ret);
686 }
687 if (trace_mask & TRACE_PACKET) {
688 ret = register_trace_drbd_packet(probe_drbd_packet);
689 WARN_ON(ret);
690 }
691 if (trace_mask & TRACE_MD_IO) {
692 ret = register_trace_drbd_md_io(probe_drbd_md_io);
693 WARN_ON(ret);
694 }
695 if (trace_mask & TRACE_EPOCH) {
696 ret = register_trace_drbd_epoch(probe_drbd_epoch);
697 WARN_ON(ret);
698 }
699 if (trace_mask & TRACE_NL) {
700 ret = register_trace_drbd_netlink(probe_drbd_netlink);
701 WARN_ON(ret);
702 }
703 if (trace_mask & TRACE_AL_EXT) {
704 ret = register_trace_drbd_actlog(probe_drbd_actlog);
705 WARN_ON(ret);
706 }
707 if (trace_mask & TRACE_RQ) {
708 ret = register_trace_drbd_bio(probe_drbd_bio);
709 WARN_ON(ret);
710 }
711 if (trace_mask & TRACE_INT_RQ) {
712 ret = register_trace_drbd_req(probe_drbd_req);
713 WARN_ON(ret);
714 }
715 if (trace_mask & TRACE_RESYNC) {
716 ret = register_trace__drbd_resync(probe_drbd_resync);
717 WARN_ON(ret);
718 }
719 return 0;
720}
721
722module_init(drbd_trace_init);
723
724static void __exit drbd_trace_exit(void)
725{
726 if (trace_mask & TRACE_UNPLUG)
727 unregister_trace_drbd_unplug(probe_drbd_unplug);
728 if (trace_mask & TRACE_UUID)
729 unregister_trace_drbd_uuid(probe_drbd_uuid);
730 if (trace_mask & TRACE_EE)
731 unregister_trace_drbd_ee(probe_drbd_ee);
732 if (trace_mask & TRACE_PACKET)
733 unregister_trace_drbd_packet(probe_drbd_packet);
734 if (trace_mask & TRACE_MD_IO)
735 unregister_trace_drbd_md_io(probe_drbd_md_io);
736 if (trace_mask & TRACE_EPOCH)
737 unregister_trace_drbd_epoch(probe_drbd_epoch);
738 if (trace_mask & TRACE_NL)
739 unregister_trace_drbd_netlink(probe_drbd_netlink);
740 if (trace_mask & TRACE_AL_EXT)
741 unregister_trace_drbd_actlog(probe_drbd_actlog);
742 if (trace_mask & TRACE_RQ)
743 unregister_trace_drbd_bio(probe_drbd_bio);
744 if (trace_mask & TRACE_INT_RQ)
745 unregister_trace_drbd_req(probe_drbd_req);
746 if (trace_mask & TRACE_RESYNC)
747 unregister_trace__drbd_resync(probe_drbd_resync);
748
749 tracepoint_synchronize_unregister();
750}
751
752module_exit(drbd_trace_exit);
diff --git a/drivers/block/drbd/drbd_tracing.h b/drivers/block/drbd/drbd_tracing.h
deleted file mode 100644
index c4531a137f65..000000000000
--- a/drivers/block/drbd/drbd_tracing.h
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 drbd_tracing.h
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#ifndef DRBD_TRACING_H
27#define DRBD_TRACING_H
28
29#include <linux/tracepoint.h>
30#include "drbd_int.h"
31#include "drbd_req.h"
32
33enum {
34 TRACE_LVL_ALWAYS = 0,
35 TRACE_LVL_SUMMARY,
36 TRACE_LVL_METRICS,
37 TRACE_LVL_ALL,
38 TRACE_LVL_MAX
39};
40
41DECLARE_TRACE(drbd_unplug,
42 TP_PROTO(struct drbd_conf *mdev, char* msg),
43 TP_ARGS(mdev, msg));
44
45DECLARE_TRACE(drbd_uuid,
46 TP_PROTO(struct drbd_conf *mdev, enum drbd_uuid_index index),
47 TP_ARGS(mdev, index));
48
49DECLARE_TRACE(drbd_ee,
50 TP_PROTO(struct drbd_conf *mdev, struct drbd_epoch_entry *e, char* msg),
51 TP_ARGS(mdev, e, msg));
52
53DECLARE_TRACE(drbd_md_io,
54 TP_PROTO(struct drbd_conf *mdev, int rw, struct drbd_backing_dev *bdev),
55 TP_ARGS(mdev, rw, bdev));
56
57DECLARE_TRACE(drbd_epoch,
58 TP_PROTO(struct drbd_conf *mdev, struct drbd_epoch *epoch, enum epoch_event ev),
59 TP_ARGS(mdev, epoch, ev));
60
61DECLARE_TRACE(drbd_netlink,
62 TP_PROTO(void *data, int is_req),
63 TP_ARGS(data, is_req));
64
65DECLARE_TRACE(drbd_actlog,
66 TP_PROTO(struct drbd_conf *mdev, sector_t sector, char* msg),
67 TP_ARGS(mdev, sector, msg));
68
69DECLARE_TRACE(drbd_bio,
70 TP_PROTO(struct drbd_conf *mdev, const char *pfx, struct bio *bio, int complete,
71 struct drbd_request *r),
72 TP_ARGS(mdev, pfx, bio, complete, r));
73
74DECLARE_TRACE(drbd_req,
75 TP_PROTO(struct drbd_request *req, enum drbd_req_event what, char *msg),
76 TP_ARGS(req, what, msg));
77
78DECLARE_TRACE(drbd_packet,
79 TP_PROTO(struct drbd_conf *mdev, struct socket *sock,
80 int recv, union p_polymorph *p, char *file, int line),
81 TP_ARGS(mdev, sock, recv, p, file, line));
82
83DECLARE_TRACE(_drbd_resync,
84 TP_PROTO(struct drbd_conf *mdev, int level, const char *fmt, va_list args),
85 TP_ARGS(mdev, level, fmt, args));
86
87#endif
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 34a4b3ef6c0e..ed8796f1112d 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -40,7 +40,6 @@
40 40
41#include "drbd_int.h" 41#include "drbd_int.h"
42#include "drbd_req.h" 42#include "drbd_req.h"
43#include "drbd_tracing.h"
44 43
45#define SLEEP_TIME (HZ/10) 44#define SLEEP_TIME (HZ/10)
46 45
@@ -82,8 +81,6 @@ void drbd_md_io_complete(struct bio *bio, int error)
82 md_io = (struct drbd_md_io *)bio->bi_private; 81 md_io = (struct drbd_md_io *)bio->bi_private;
83 md_io->error = error; 82 md_io->error = error;
84 83
85 trace_drbd_bio(md_io->mdev, "Md", bio, 1, NULL);
86
87 complete(&md_io->event); 84 complete(&md_io->event);
88} 85}
89 86
@@ -114,8 +111,6 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
114 111
115 D_ASSERT(e->block_id != ID_VACANT); 112 D_ASSERT(e->block_id != ID_VACANT);
116 113
117 trace_drbd_bio(mdev, "Sec", bio, 1, NULL);
118
119 spin_lock_irqsave(&mdev->req_lock, flags); 114 spin_lock_irqsave(&mdev->req_lock, flags);
120 mdev->read_cnt += e->size >> 9; 115 mdev->read_cnt += e->size >> 9;
121 list_del(&e->w.list); 116 list_del(&e->w.list);
@@ -126,8 +121,6 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
126 drbd_chk_io_error(mdev, error, FALSE); 121 drbd_chk_io_error(mdev, error, FALSE);
127 drbd_queue_work(&mdev->data.work, &e->w); 122 drbd_queue_work(&mdev->data.work, &e->w);
128 put_ldev(mdev); 123 put_ldev(mdev);
129
130 trace_drbd_ee(mdev, e, "read completed");
131} 124}
132 125
133/* writes on behalf of the partner, or resync writes, 126/* writes on behalf of the partner, or resync writes,
@@ -176,8 +169,6 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
176 169
177 D_ASSERT(e->block_id != ID_VACANT); 170 D_ASSERT(e->block_id != ID_VACANT);
178 171
179 trace_drbd_bio(mdev, "Sec", bio, 1, NULL);
180
181 spin_lock_irqsave(&mdev->req_lock, flags); 172 spin_lock_irqsave(&mdev->req_lock, flags);
182 mdev->writ_cnt += e->size >> 9; 173 mdev->writ_cnt += e->size >> 9;
183 is_syncer_req = is_syncer_block_id(e->block_id); 174 is_syncer_req = is_syncer_block_id(e->block_id);
@@ -192,8 +183,6 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
192 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 183 list_del(&e->w.list); /* has been on active_ee or sync_ee */
193 list_add_tail(&e->w.list, &mdev->done_ee); 184 list_add_tail(&e->w.list, &mdev->done_ee);
194 185
195 trace_drbd_ee(mdev, e, "write completed");
196
197 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet, 186 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
198 * neither did we wake possibly waiting conflicting requests. 187 * neither did we wake possibly waiting conflicting requests.
199 * done from "drbd_process_done_ee" within the appropriate w.cb 188 * done from "drbd_process_done_ee" within the appropriate w.cb
@@ -244,8 +233,6 @@ void drbd_endio_pri(struct bio *bio, int error)
244 error = -EIO; 233 error = -EIO;
245 } 234 }
246 235
247 trace_drbd_bio(mdev, "Pri", bio, 1, NULL);
248
249 /* to avoid recursion in __req_mod */ 236 /* to avoid recursion in __req_mod */
250 if (unlikely(error)) { 237 if (unlikely(error)) {
251 what = (bio_data_dir(bio) == WRITE) 238 what = (bio_data_dir(bio) == WRITE)
@@ -1321,9 +1308,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1321 return; 1308 return;
1322 } 1309 }
1323 1310
1324 trace_drbd_resync(mdev, TRACE_LVL_SUMMARY, "Resync starting: side=%s\n",
1325 side == C_SYNC_TARGET ? "SyncTarget" : "SyncSource");
1326
1327 /* In case a previous resync run was aborted by an IO error/detach on the peer. */ 1311 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1328 drbd_rs_cancel_all(mdev); 1312 drbd_rs_cancel_all(mdev);
1329 1313