aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/Makefile1
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/glock.c12
-rw-r--r--fs/gfs2/log.c9
-rw-r--r--fs/gfs2/lops.c3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c11
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/gfs2/trace_gfs2.h407
9 files changed, 442 insertions, 10 deletions
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index d53a9bea1c2f..3da2f1f4f738 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,3 +1,4 @@
1EXTRA_CFLAGS := -I$(src)
1obj-$(CONFIG_GFS2_FS) += gfs2.o 2obj-$(CONFIG_GFS2_FS) += gfs2.o
2gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ 3gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \
3 glops.o inode.o log.o lops.o main.o meta_io.o \ 4 glops.o inode.o log.o lops.o main.o meta_io.o \
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 329763530dc0..6d47379e794b 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -25,6 +25,7 @@
25#include "trans.h" 25#include "trans.h"
26#include "dir.h" 26#include "dir.h"
27#include "util.h" 27#include "util.h"
28#include "trace_gfs2.h"
28 29
29/* This doesn't need to be that large as max 64 bit pointers in a 4k 30/* This doesn't need to be that large as max 64 bit pointers in a 4k
30 * block is 512, so __u16 is fine for that. It saves stack space to 31 * block is 512, so __u16 is fine for that. It saves stack space to
@@ -589,6 +590,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
589 clear_buffer_mapped(bh_map); 590 clear_buffer_mapped(bh_map);
590 clear_buffer_new(bh_map); 591 clear_buffer_new(bh_map);
591 clear_buffer_boundary(bh_map); 592 clear_buffer_boundary(bh_map);
593 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
592 if (gfs2_is_dir(ip)) { 594 if (gfs2_is_dir(ip)) {
593 bsize = sdp->sd_jbsize; 595 bsize = sdp->sd_jbsize;
594 arr = sdp->sd_jheightsize; 596 arr = sdp->sd_jheightsize;
@@ -623,6 +625,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
623 ret = 0; 625 ret = 0;
624out: 626out:
625 release_metapath(&mp); 627 release_metapath(&mp);
628 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
626 bmap_unlock(ip, create); 629 bmap_unlock(ip, create);
627 return ret; 630 return ret;
628 631
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2bf62bcc5181..297421c0427a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -39,6 +39,8 @@
39#include "super.h" 39#include "super.h"
40#include "util.h" 40#include "util.h"
41#include "bmap.h" 41#include "bmap.h"
42#define CREATE_TRACE_POINTS
43#include "trace_gfs2.h"
42 44
43struct gfs2_gl_hash_bucket { 45struct gfs2_gl_hash_bucket {
44 struct hlist_head hb_list; 46 struct hlist_head hb_list;
@@ -155,7 +157,7 @@ static void glock_free(struct gfs2_glock *gl)
155 157
156 if (aspace) 158 if (aspace)
157 gfs2_aspace_put(aspace); 159 gfs2_aspace_put(aspace);
158 160 trace_gfs2_glock_put(gl);
159 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); 161 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
160} 162}
161 163
@@ -317,14 +319,17 @@ restart:
317 return 2; 319 return 2;
318 gh->gh_error = ret; 320 gh->gh_error = ret;
319 list_del_init(&gh->gh_list); 321 list_del_init(&gh->gh_list);
322 trace_gfs2_glock_queue(gh, 0);
320 gfs2_holder_wake(gh); 323 gfs2_holder_wake(gh);
321 goto restart; 324 goto restart;
322 } 325 }
323 set_bit(HIF_HOLDER, &gh->gh_iflags); 326 set_bit(HIF_HOLDER, &gh->gh_iflags);
327 trace_gfs2_promote(gh, 1);
324 gfs2_holder_wake(gh); 328 gfs2_holder_wake(gh);
325 goto restart; 329 goto restart;
326 } 330 }
327 set_bit(HIF_HOLDER, &gh->gh_iflags); 331 set_bit(HIF_HOLDER, &gh->gh_iflags);
332 trace_gfs2_promote(gh, 0);
328 gfs2_holder_wake(gh); 333 gfs2_holder_wake(gh);
329 continue; 334 continue;
330 } 335 }
@@ -354,6 +359,7 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
354 else 359 else
355 continue; 360 continue;
356 list_del_init(&gh->gh_list); 361 list_del_init(&gh->gh_list);
362 trace_gfs2_glock_queue(gh, 0);
357 gfs2_holder_wake(gh); 363 gfs2_holder_wake(gh);
358 } 364 }
359} 365}
@@ -422,6 +428,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
422 int rv; 428 int rv;
423 429
424 spin_lock(&gl->gl_spin); 430 spin_lock(&gl->gl_spin);
431 trace_gfs2_glock_state_change(gl, state);
425 state_change(gl, state); 432 state_change(gl, state);
426 gh = find_first_waiter(gl); 433 gh = find_first_waiter(gl);
427 434
@@ -851,6 +858,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
851 gl->gl_demote_state != state) { 858 gl->gl_demote_state != state) {
852 gl->gl_demote_state = LM_ST_UNLOCKED; 859 gl->gl_demote_state = LM_ST_UNLOCKED;
853 } 860 }
861 trace_gfs2_demote_rq(gl);
854} 862}
855 863
856/** 864/**
@@ -936,6 +944,7 @@ fail:
936 goto do_cancel; 944 goto do_cancel;
937 return; 945 return;
938 } 946 }
947 trace_gfs2_glock_queue(gh, 1);
939 list_add_tail(&gh->gh_list, insert_pt); 948 list_add_tail(&gh->gh_list, insert_pt);
940do_cancel: 949do_cancel:
941 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 950 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@ -1032,6 +1041,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1032 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1041 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1033 fast_path = 1; 1042 fast_path = 1;
1034 } 1043 }
1044 trace_gfs2_glock_queue(gh, 0);
1035 spin_unlock(&gl->gl_spin); 1045 spin_unlock(&gl->gl_spin);
1036 if (likely(fast_path)) 1046 if (likely(fast_path))
1037 return; 1047 return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f2e449c595b4..13c6237c5f67 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -28,6 +28,7 @@
28#include "meta_io.h" 28#include "meta_io.h"
29#include "util.h" 29#include "util.h"
30#include "dir.h" 30#include "dir.h"
31#include "trace_gfs2.h"
31 32
32#define PULL 1 33#define PULL 1
33 34
@@ -313,6 +314,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
313 gfs2_log_lock(sdp); 314 gfs2_log_lock(sdp);
314 } 315 }
315 atomic_sub(blks, &sdp->sd_log_blks_free); 316 atomic_sub(blks, &sdp->sd_log_blks_free);
317 trace_gfs2_log_blocks(sdp, -blks);
316 gfs2_log_unlock(sdp); 318 gfs2_log_unlock(sdp);
317 mutex_unlock(&sdp->sd_log_reserve_mutex); 319 mutex_unlock(&sdp->sd_log_reserve_mutex);
318 320
@@ -333,6 +335,7 @@ void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
333 335
334 gfs2_log_lock(sdp); 336 gfs2_log_lock(sdp);
335 atomic_add(blks, &sdp->sd_log_blks_free); 337 atomic_add(blks, &sdp->sd_log_blks_free);
338 trace_gfs2_log_blocks(sdp, blks);
336 gfs2_assert_withdraw(sdp, 339 gfs2_assert_withdraw(sdp,
337 atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); 340 atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
338 gfs2_log_unlock(sdp); 341 gfs2_log_unlock(sdp);
@@ -558,6 +561,7 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
558 561
559 gfs2_log_lock(sdp); 562 gfs2_log_lock(sdp);
560 atomic_add(dist, &sdp->sd_log_blks_free); 563 atomic_add(dist, &sdp->sd_log_blks_free);
564 trace_gfs2_log_blocks(sdp, dist);
561 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); 565 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
562 gfs2_log_unlock(sdp); 566 gfs2_log_unlock(sdp);
563 567
@@ -715,6 +719,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
715 up_write(&sdp->sd_log_flush_lock); 719 up_write(&sdp->sd_log_flush_lock);
716 return; 720 return;
717 } 721 }
722 trace_gfs2_log_flush(sdp, 1);
718 723
719 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); 724 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
720 INIT_LIST_HEAD(&ai->ai_ail1_list); 725 INIT_LIST_HEAD(&ai->ai_ail1_list);
@@ -746,6 +751,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
746 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ 751 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
747 gfs2_log_lock(sdp); 752 gfs2_log_lock(sdp);
748 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ 753 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
754 trace_gfs2_log_blocks(sdp, -1);
749 gfs2_log_unlock(sdp); 755 gfs2_log_unlock(sdp);
750 log_write_header(sdp, 0, PULL); 756 log_write_header(sdp, 0, PULL);
751 } 757 }
@@ -763,7 +769,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
763 ai = NULL; 769 ai = NULL;
764 } 770 }
765 gfs2_log_unlock(sdp); 771 gfs2_log_unlock(sdp);
766 772 trace_gfs2_log_flush(sdp, 0);
767 up_write(&sdp->sd_log_flush_lock); 773 up_write(&sdp->sd_log_flush_lock);
768 774
769 kfree(ai); 775 kfree(ai);
@@ -787,6 +793,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
787 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved); 793 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
788 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; 794 unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
789 atomic_add(unused, &sdp->sd_log_blks_free); 795 atomic_add(unused, &sdp->sd_log_blks_free);
796 trace_gfs2_log_blocks(sdp, unused);
790 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= 797 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
791 sdp->sd_jdesc->jd_blocks); 798 sdp->sd_jdesc->jd_blocks);
792 sdp->sd_log_blks_reserved = reserved; 799 sdp->sd_log_blks_reserved = reserved;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 00315f50fa46..9969ff062c5b 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -27,6 +27,7 @@
27#include "rgrp.h" 27#include "rgrp.h"
28#include "trans.h" 28#include "trans.h"
29#include "util.h" 29#include "util.h"
30#include "trace_gfs2.h"
30 31
31/** 32/**
32 * gfs2_pin - Pin a buffer in memory 33 * gfs2_pin - Pin a buffer in memory
@@ -53,6 +54,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
53 if (bd->bd_ail) 54 if (bd->bd_ail)
54 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); 55 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
55 get_bh(bh); 56 get_bh(bh);
57 trace_gfs2_pin(bd, 1);
56} 58}
57 59
58/** 60/**
@@ -89,6 +91,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
89 bd->bd_ail = ai; 91 bd->bd_ail = ai;
90 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); 92 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
91 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); 93 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
94 trace_gfs2_pin(bd, 0);
92 gfs2_log_unlock(sdp); 95 gfs2_log_unlock(sdp);
93 unlock_buffer(bh); 96 unlock_buffer(bh);
94} 97}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index cc34f271b3e7..7bc3c45cd676 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -33,6 +33,7 @@
33#include "log.h" 33#include "log.h"
34#include "quota.h" 34#include "quota.h"
35#include "dir.h" 35#include "dir.h"
36#include "trace_gfs2.h"
36 37
37#define DO 0 38#define DO 0
38#define UNDO 1 39#define UNDO 1
@@ -775,6 +776,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
775 /* Map the extents for this journal's blocks */ 776 /* Map the extents for this journal's blocks */
776 map_journal_extents(sdp); 777 map_journal_extents(sdp);
777 } 778 }
779 trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
778 780
779 if (sdp->sd_lockstruct.ls_first) { 781 if (sdp->sd_lockstruct.ls_first) {
780 unsigned int x; 782 unsigned int x;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index de3239731db8..daa4ae341a29 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -29,6 +29,7 @@
29#include "util.h" 29#include "util.h"
30#include "log.h" 30#include "log.h"
31#include "inode.h" 31#include "inode.h"
32#include "trace_gfs2.h"
32 33
33#define BFITNOENT ((u32)~0) 34#define BFITNOENT ((u32)~0)
34#define NO_BLOCK ((u64)~0) 35#define NO_BLOCK ((u64)~0)
@@ -1519,7 +1520,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n)
1519 spin_lock(&sdp->sd_rindex_spin); 1520 spin_lock(&sdp->sd_rindex_spin);
1520 rgd->rd_free_clone -= *n; 1521 rgd->rd_free_clone -= *n;
1521 spin_unlock(&sdp->sd_rindex_spin); 1522 spin_unlock(&sdp->sd_rindex_spin);
1522 1523 trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED);
1523 *bn = block; 1524 *bn = block;
1524 return 0; 1525 return 0;
1525 1526
@@ -1571,7 +1572,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1571 spin_lock(&sdp->sd_rindex_spin); 1572 spin_lock(&sdp->sd_rindex_spin);
1572 rgd->rd_free_clone--; 1573 rgd->rd_free_clone--;
1573 spin_unlock(&sdp->sd_rindex_spin); 1574 spin_unlock(&sdp->sd_rindex_spin);
1574 1575 trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE);
1575 return block; 1576 return block;
1576} 1577}
1577 1578
@@ -1591,7 +1592,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1591 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1592 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1592 if (!rgd) 1593 if (!rgd)
1593 return; 1594 return;
1594 1595 trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
1595 rgd->rd_free += blen; 1596 rgd->rd_free += blen;
1596 1597
1597 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1598 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
@@ -1619,7 +1620,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1619 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); 1620 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1620 if (!rgd) 1621 if (!rgd)
1621 return; 1622 return;
1622 1623 trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
1623 rgd->rd_free += blen; 1624 rgd->rd_free += blen;
1624 1625
1625 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1626 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
@@ -1642,6 +1643,7 @@ void gfs2_unlink_di(struct inode *inode)
1642 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); 1643 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1643 if (!rgd) 1644 if (!rgd)
1644 return; 1645 return;
1646 trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED);
1645 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1647 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1646 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 1648 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1647 gfs2_trans_add_rg(rgd); 1649 gfs2_trans_add_rg(rgd);
@@ -1673,6 +1675,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1673void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1675void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1674{ 1676{
1675 gfs2_free_uninit_di(rgd, ip->i_no_addr); 1677 gfs2_free_uninit_di(rgd, ip->i_no_addr);
1678 trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE);
1676 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); 1679 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1677 gfs2_meta_wipe(ip, ip->i_no_addr, 1); 1680 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
1678} 1681}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c8930b31cdf0..0a6801336470 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -719,8 +719,6 @@ static void gfs2_put_super(struct super_block *sb)
719 int error; 719 int error;
720 struct gfs2_jdesc *jd; 720 struct gfs2_jdesc *jd;
721 721
722 lock_kernel();
723
724 /* Unfreeze the filesystem, if we need to */ 722 /* Unfreeze the filesystem, if we need to */
725 723
726 mutex_lock(&sdp->sd_freeze_lock); 724 mutex_lock(&sdp->sd_freeze_lock);
@@ -787,8 +785,6 @@ restart:
787 785
788 /* At this point, we're through participating in the lockspace */ 786 /* At this point, we're through participating in the lockspace */
789 gfs2_sys_fs_del(sdp); 787 gfs2_sys_fs_del(sdp);
790
791 unlock_kernel();
792} 788}
793 789
794/** 790/**
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
new file mode 100644
index 000000000000..98d6ef1c1dc0
--- /dev/null
+++ b/fs/gfs2/trace_gfs2.h
@@ -0,0 +1,407 @@
1#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_GFS2_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM gfs2
8#define TRACE_INCLUDE_FILE trace_gfs2
9
10#include <linux/fs.h>
11#include <linux/buffer_head.h>
12#include <linux/dlmconstants.h>
13#include <linux/gfs2_ondisk.h>
14#include "incore.h"
15#include "glock.h"
16
17#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
18#define glock_trace_name(x) __print_symbolic(x, \
19 dlm_state_name(IV), \
20 dlm_state_name(NL), \
21 dlm_state_name(CR), \
22 dlm_state_name(CW), \
23 dlm_state_name(PR), \
24 dlm_state_name(PW), \
25 dlm_state_name(EX))
26
27#define block_state_name(x) __print_symbolic(x, \
28 { GFS2_BLKST_FREE, "free" }, \
29 { GFS2_BLKST_USED, "used" }, \
30 { GFS2_BLKST_DINODE, "dinode" }, \
31 { GFS2_BLKST_UNLINKED, "unlinked" })
32
33#define show_glock_flags(flags) __print_flags(flags, "", \
34 {(1UL << GLF_LOCK), "l" }, \
35 {(1UL << GLF_DEMOTE), "D" }, \
36 {(1UL << GLF_PENDING_DEMOTE), "d" }, \
37 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \
38 {(1UL << GLF_DIRTY), "y" }, \
39 {(1UL << GLF_LFLUSH), "f" }, \
40 {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \
41 {(1UL << GLF_REPLY_PENDING), "r" }, \
42 {(1UL << GLF_INITIAL), "I" }, \
43 {(1UL << GLF_FROZEN), "F" })
44
45#ifndef NUMPTY
46#define NUMPTY
47static inline u8 glock_trace_state(unsigned int state)
48{
49 switch(state) {
50 case LM_ST_SHARED:
51 return DLM_LOCK_PR;
52 case LM_ST_DEFERRED:
53 return DLM_LOCK_CW;
54 case LM_ST_EXCLUSIVE:
55 return DLM_LOCK_EX;
56 }
57 return DLM_LOCK_NL;
58}
59#endif
60
61/* Section 1 - Locking
62 *
63 * Objectives:
64 * Latency: Remote demote request to state change
65 * Latency: Local lock request to state change
66 * Latency: State change to lock grant
67 * Correctness: Ordering of local lock state vs. I/O requests
68 * Correctness: Responses to remote demote requests
69 */
70
71/* General glock state change (DLM lock request completes) */
72TRACE_EVENT(gfs2_glock_state_change,
73
74 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
75
76 TP_ARGS(gl, new_state),
77
78 TP_STRUCT__entry(
79 __field( dev_t, dev )
80 __field( u64, glnum )
81 __field( u32, gltype )
82 __field( u8, cur_state )
83 __field( u8, new_state )
84 __field( u8, dmt_state )
85 __field( u8, tgt_state )
86 __field( unsigned long, flags )
87 ),
88
89 TP_fast_assign(
90 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
91 __entry->glnum = gl->gl_name.ln_number;
92 __entry->gltype = gl->gl_name.ln_type;
93 __entry->cur_state = glock_trace_state(gl->gl_state);
94 __entry->new_state = glock_trace_state(new_state);
95 __entry->tgt_state = glock_trace_state(gl->gl_target);
96 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
97 __entry->flags = gl->gl_flags;
98 ),
99
100 TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s",
101 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
102 (unsigned long long)__entry->glnum,
103 glock_trace_name(__entry->cur_state),
104 glock_trace_name(__entry->new_state),
105 glock_trace_name(__entry->tgt_state),
106 glock_trace_name(__entry->dmt_state),
107 show_glock_flags(__entry->flags))
108);
109
110/* State change -> unlocked, glock is being deallocated */
111TRACE_EVENT(gfs2_glock_put,
112
113 TP_PROTO(const struct gfs2_glock *gl),
114
115 TP_ARGS(gl),
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( u64, glnum )
120 __field( u32, gltype )
121 __field( u8, cur_state )
122 __field( unsigned long, flags )
123 ),
124
125 TP_fast_assign(
126 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
127 __entry->gltype = gl->gl_name.ln_type;
128 __entry->glnum = gl->gl_name.ln_number;
129 __entry->cur_state = glock_trace_state(gl->gl_state);
130 __entry->flags = gl->gl_flags;
131 ),
132
133 TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s",
134 MAJOR(__entry->dev), MINOR(__entry->dev),
135 __entry->gltype, (unsigned long long)__entry->glnum,
136 glock_trace_name(__entry->cur_state),
137 glock_trace_name(DLM_LOCK_IV),
138 show_glock_flags(__entry->flags))
139
140);
141
142/* Callback (local or remote) requesting lock demotion */
143TRACE_EVENT(gfs2_demote_rq,
144
145 TP_PROTO(const struct gfs2_glock *gl),
146
147 TP_ARGS(gl),
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( u64, glnum )
152 __field( u32, gltype )
153 __field( u8, cur_state )
154 __field( u8, dmt_state )
155 __field( unsigned long, flags )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = gl->gl_sbd->sd_vfs->s_dev;
160 __entry->gltype = gl->gl_name.ln_type;
161 __entry->glnum = gl->gl_name.ln_number;
162 __entry->cur_state = glock_trace_state(gl->gl_state);
163 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
164 __entry->flags = gl->gl_flags;
165 ),
166
167 TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
168 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
169 (unsigned long long)__entry->glnum,
170 glock_trace_name(__entry->cur_state),
171 glock_trace_name(__entry->dmt_state),
172 show_glock_flags(__entry->flags))
173
174);
175
176/* Promotion/grant of a glock */
177TRACE_EVENT(gfs2_promote,
178
179 TP_PROTO(const struct gfs2_holder *gh, int first),
180
181 TP_ARGS(gh, first),
182
183 TP_STRUCT__entry(
184 __field( dev_t, dev )
185 __field( u64, glnum )
186 __field( u32, gltype )
187 __field( int, first )
188 __field( u8, state )
189 ),
190
191 TP_fast_assign(
192 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
193 __entry->glnum = gh->gh_gl->gl_name.ln_number;
194 __entry->gltype = gh->gh_gl->gl_name.ln_type;
195 __entry->first = first;
196 __entry->state = glock_trace_state(gh->gh_state);
197 ),
198
199 TP_printk("%u,%u glock %u:%llu promote %s %s",
200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
201 (unsigned long long)__entry->glnum,
202 __entry->first ? "first": "other",
203 glock_trace_name(__entry->state))
204);
205
206/* Queue/dequeue a lock request */
207TRACE_EVENT(gfs2_glock_queue,
208
209 TP_PROTO(const struct gfs2_holder *gh, int queue),
210
211 TP_ARGS(gh, queue),
212
213 TP_STRUCT__entry(
214 __field( dev_t, dev )
215 __field( u64, glnum )
216 __field( u32, gltype )
217 __field( int, queue )
218 __field( u8, state )
219 ),
220
221 TP_fast_assign(
222 __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev;
223 __entry->glnum = gh->gh_gl->gl_name.ln_number;
224 __entry->gltype = gh->gh_gl->gl_name.ln_type;
225 __entry->queue = queue;
226 __entry->state = glock_trace_state(gh->gh_state);
227 ),
228
229 TP_printk("%u,%u glock %u:%llu %squeue %s",
230 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
231 (unsigned long long)__entry->glnum,
232 __entry->queue ? "" : "de",
233 glock_trace_name(__entry->state))
234);
235
236/* Section 2 - Log/journal
237 *
238 * Objectives:
239 * Latency: Log flush time
240 * Correctness: pin/unpin vs. disk I/O ordering
241 * Performance: Log usage stats
242 */
243
244/* Pin/unpin a block in the log */
245TRACE_EVENT(gfs2_pin,
246
247 TP_PROTO(const struct gfs2_bufdata *bd, int pin),
248
249 TP_ARGS(bd, pin),
250
251 TP_STRUCT__entry(
252 __field( dev_t, dev )
253 __field( int, pin )
254 __field( u32, len )
255 __field( sector_t, block )
256 __field( u64, ino )
257 ),
258
259 TP_fast_assign(
260 __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev;
261 __entry->pin = pin;
262 __entry->len = bd->bd_bh->b_size;
263 __entry->block = bd->bd_bh->b_blocknr;
264 __entry->ino = bd->bd_gl->gl_name.ln_number;
265 ),
266
267 TP_printk("%u,%u log %s %llu/%lu inode %llu",
268 MAJOR(__entry->dev), MINOR(__entry->dev),
269 __entry->pin ? "pin" : "unpin",
270 (unsigned long long)__entry->block,
271 (unsigned long)__entry->len,
272 (unsigned long long)__entry->ino)
273);
274
275/* Flushing the log */
276TRACE_EVENT(gfs2_log_flush,
277
278 TP_PROTO(const struct gfs2_sbd *sdp, int start),
279
280 TP_ARGS(sdp, start),
281
282 TP_STRUCT__entry(
283 __field( dev_t, dev )
284 __field( int, start )
285 __field( u64, log_seq )
286 ),
287
288 TP_fast_assign(
289 __entry->dev = sdp->sd_vfs->s_dev;
290 __entry->start = start;
291 __entry->log_seq = sdp->sd_log_sequence;
292 ),
293
294 TP_printk("%u,%u log flush %s %llu",
295 MAJOR(__entry->dev), MINOR(__entry->dev),
296 __entry->start ? "start" : "end",
297 (unsigned long long)__entry->log_seq)
298);
299
300/* Reserving/releasing blocks in the log */
301TRACE_EVENT(gfs2_log_blocks,
302
303 TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
304
305 TP_ARGS(sdp, blocks),
306
307 TP_STRUCT__entry(
308 __field( dev_t, dev )
309 __field( int, blocks )
310 ),
311
312 TP_fast_assign(
313 __entry->dev = sdp->sd_vfs->s_dev;
314 __entry->blocks = blocks;
315 ),
316
317 TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev),
318 MINOR(__entry->dev), __entry->blocks)
319);
320
321/* Section 3 - bmap
322 *
323 * Objectives:
324 * Latency: Bmap request time
325 * Performance: Block allocator tracing
326 * Correctness: Test of disard generation vs. blocks allocated
327 */
328
329/* Map an extent of blocks, possibly a new allocation */
330TRACE_EVENT(gfs2_bmap,
331
332 TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh,
333 sector_t lblock, int create, int errno),
334
335 TP_ARGS(ip, bh, lblock, create, errno),
336
337 TP_STRUCT__entry(
338 __field( dev_t, dev )
339 __field( sector_t, lblock )
340 __field( sector_t, pblock )
341 __field( u64, inum )
342 __field( unsigned long, state )
343 __field( u32, len )
344 __field( int, create )
345 __field( int, errno )
346 ),
347
348 TP_fast_assign(
349 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
350 __entry->lblock = lblock;
351 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
352 __entry->inum = ip->i_no_addr;
353 __entry->state = bh->b_state;
354 __entry->len = bh->b_size;
355 __entry->create = create;
356 __entry->errno = errno;
357 ),
358
359 TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d",
360 MAJOR(__entry->dev), MINOR(__entry->dev),
361 (unsigned long long)__entry->inum,
362 (unsigned long long)__entry->lblock,
363 (unsigned long)__entry->len,
364 (unsigned long long)__entry->pblock,
365 __entry->state, __entry->create ? "create " : "nocreate",
366 __entry->errno)
367);
368
369/* Keep track of blocks as they are allocated/freed */
370TRACE_EVENT(gfs2_block_alloc,
371
372 TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
373 u8 block_state),
374
375 TP_ARGS(ip, block, len, block_state),
376
377 TP_STRUCT__entry(
378 __field( dev_t, dev )
379 __field( u64, start )
380 __field( u64, inum )
381 __field( u32, len )
382 __field( u8, block_state )
383 ),
384
385 TP_fast_assign(
386 __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev;
387 __entry->start = block;
388 __entry->inum = ip->i_no_addr;
389 __entry->len = len;
390 __entry->block_state = block_state;
391 ),
392
393 TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
394 MAJOR(__entry->dev), MINOR(__entry->dev),
395 (unsigned long long)__entry->inum,
396 (unsigned long long)__entry->start,
397 (unsigned long)__entry->len,
398 block_state_name(__entry->block_state))
399);
400
401#endif /* _TRACE_GFS2_H */
402
403/* This part must be outside protection */
404#undef TRACE_INCLUDE_PATH
405#define TRACE_INCLUDE_PATH .
406#include <trace/define_trace.h>
407