aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-12-16 12:33:49 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-16 12:33:49 -0500
commitee1156c11a1121e118b0a7f2dec240f0d421b1fd (patch)
treeb8771cc5a9758af9d7410fc519227c036c222130 /fs/xfs
parentb9f8fcd55bbdb037e5332dbdb7b494f0b70861ac (diff)
parent8bea8672edfca7ec5f661cafb218f1205863b343 (diff)
Merge branch 'linus' into sched/urgent
Conflicts: kernel/sched_idletask.c Merge reason: resolve the conflicts, pick up latest changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c52
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h33
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c87
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h45
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c75
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1369
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h4
-rw-r--r--fs/xfs/quota/xfs_dquot.c110
-rw-r--r--fs/xfs/quota/xfs_dquot.h21
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/support/ktrace.c323
-rw-r--r--fs/xfs/support/ktrace.h85
-rw-r--r--fs/xfs/xfs.h16
-rw-r--r--fs/xfs/xfs_ag.h14
-rw-r--r--fs/xfs/xfs_alloc.c230
-rw-r--r--fs/xfs/xfs_alloc.h27
-rw-r--r--fs/xfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/xfs_attr.c107
-rw-r--r--fs/xfs/xfs_attr.h10
-rw-r--r--fs/xfs/xfs_attr_leaf.c14
-rw-r--r--fs/xfs/xfs_attr_sf.h40
-rw-r--r--fs/xfs/xfs_bmap.c942
-rw-r--r--fs/xfs/xfs_bmap.h58
-rw-r--r--fs/xfs/xfs_bmap_btree.c6
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_btree_trace.h17
-rw-r--r--fs/xfs/xfs_buf_item.c87
-rw-r--r--fs/xfs/xfs_buf_item.h20
-rw-r--r--fs/xfs/xfs_da_btree.c3
-rw-r--r--fs/xfs/xfs_da_btree.h7
-rw-r--r--fs/xfs/xfs_dfrag.c2
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2_block.c20
-rw-r--r--fs/xfs/xfs_dir2_leaf.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c27
-rw-r--r--fs/xfs/xfs_dir2_sf.c26
-rw-r--r--fs/xfs/xfs_dir2_trace.c216
-rw-r--r--fs/xfs/xfs_dir2_trace.h72
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_iget.c111
-rw-r--r--fs/xfs/xfs_inode.c79
-rw-r--r--fs/xfs/xfs_inode.h82
-rw-r--r--fs/xfs/xfs_inode_item.c5
-rw-r--r--fs/xfs/xfs_iomap.c85
-rw-r--r--fs/xfs/xfs_iomap.h8
-rw-r--r--fs/xfs/xfs_log.c181
-rw-r--r--fs/xfs/xfs_log_priv.h20
-rw-r--r--fs/xfs/xfs_log_recover.c15
-rw-r--r--fs/xfs/xfs_mount.c14
-rw-r--r--fs/xfs/xfs_quota.h8
-rw-r--r--fs/xfs/xfs_rename.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c1
-rw-r--r--fs/xfs/xfs_rw.c3
-rw-r--r--fs/xfs/xfs_trans.h47
-rw-r--r--fs/xfs/xfs_trans_buf.c62
-rw-r--r--fs/xfs/xfs_vnodeops.c8
70 files changed, 2379 insertions, 2852 deletions
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 7a59daed1782..56641fe52a23 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -26,6 +26,8 @@ endif
26 26
27obj-$(CONFIG_XFS_FS) += xfs.o 27obj-$(CONFIG_XFS_FS) += xfs.o
28 28
29xfs-y += linux-2.6/xfs_trace.o
30
29xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \ 31xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
30 xfs_dquot.o \ 32 xfs_dquot.o \
31 xfs_dquot_item.o \ 33 xfs_dquot_item.o \
@@ -90,8 +92,7 @@ xfs-y += xfs_alloc.o \
90 xfs_rw.o \ 92 xfs_rw.o \
91 xfs_dmops.o 93 xfs_dmops.o
92 94
93xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ 95xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
94 xfs_dir2_trace.o
95 96
96# Objects in linux/ 97# Objects in linux/
97xfs-y += $(addprefix $(XFS_LINUX)/, \ 98xfs-y += $(addprefix $(XFS_LINUX)/, \
@@ -113,6 +114,3 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
113xfs-y += $(addprefix support/, \ 114xfs-y += $(addprefix support/, \
114 debug.o \ 115 debug.o \
115 uuid.o) 116 uuid.o)
116
117xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
118
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b23a54506446..69e598b6986f 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -21,6 +21,7 @@
21#include "xfs_bmap_btree.h" 21#include "xfs_bmap_btree.h"
22#include "xfs_inode.h" 22#include "xfs_inode.h"
23#include "xfs_vnodeops.h" 23#include "xfs_vnodeops.h"
24#include "xfs_trace.h"
24#include <linux/xattr.h> 25#include <linux/xattr.h>
25#include <linux/posix_acl_xattr.h> 26#include <linux/posix_acl_xattr.h>
26 27
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 87813e405cef..d798c54296eb 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,6 +38,7 @@
38#include "xfs_rw.h" 38#include "xfs_rw.h"
39#include "xfs_iomap.h" 39#include "xfs_iomap.h"
40#include "xfs_vnodeops.h" 40#include "xfs_vnodeops.h"
41#include "xfs_trace.h"
41#include <linux/mpage.h> 42#include <linux/mpage.h>
42#include <linux/pagevec.h> 43#include <linux/pagevec.h>
43#include <linux/writeback.h> 44#include <linux/writeback.h>
@@ -76,7 +77,7 @@ xfs_ioend_wake(
76 wake_up(to_ioend_wq(ip)); 77 wake_up(to_ioend_wq(ip));
77} 78}
78 79
79STATIC void 80void
80xfs_count_page_state( 81xfs_count_page_state(
81 struct page *page, 82 struct page *page,
82 int *delalloc, 83 int *delalloc,
@@ -98,48 +99,6 @@ xfs_count_page_state(
98 } while ((bh = bh->b_this_page) != head); 99 } while ((bh = bh->b_this_page) != head);
99} 100}
100 101
101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
107 unsigned long pgoff)
108{
109 xfs_inode_t *ip;
110 loff_t isize = i_size_read(inode);
111 loff_t offset = page_offset(page);
112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
117 ip = XFS_I(inode);
118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
126 (void *)pgoff,
127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
136 (void *)((unsigned long)current_pid()),
137 (void *)NULL);
138}
139#else
140#define xfs_page_trace(tag, inode, page, pgoff)
141#endif
142
143STATIC struct block_device * 102STATIC struct block_device *
144xfs_find_bdev_for_inode( 103xfs_find_bdev_for_inode(
145 struct xfs_inode *ip) 104 struct xfs_inode *ip)
@@ -1202,7 +1161,7 @@ xfs_vm_writepage(
1202 int delalloc, unmapped, unwritten; 1161 int delalloc, unmapped, unwritten;
1203 struct inode *inode = page->mapping->host; 1162 struct inode *inode = page->mapping->host;
1204 1163
1205 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); 1164 trace_xfs_writepage(inode, page, 0);
1206 1165
1207 /* 1166 /*
1208 * We need a transaction if: 1167 * We need a transaction if:
@@ -1307,7 +1266,7 @@ xfs_vm_releasepage(
1307 .nr_to_write = 1, 1266 .nr_to_write = 1,
1308 }; 1267 };
1309 1268
1310 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); 1269 trace_xfs_releasepage(inode, page, 0);
1311 1270
1312 if (!page_has_buffers(page)) 1271 if (!page_has_buffers(page))
1313 return 0; 1272 return 0;
@@ -1587,8 +1546,7 @@ xfs_vm_invalidatepage(
1587 struct page *page, 1546 struct page *page,
1588 unsigned long offset) 1547 unsigned long offset)
1589{ 1548{
1590 xfs_page_trace(XFS_INVALIDPAGE_ENTER, 1549 trace_xfs_invalidatepage(page->mapping->host, page, offset);
1591 page->mapping->host, page, offset);
1592 block_invalidatepage(page, offset); 1550 block_invalidatepage(page, offset);
1593} 1551}
1594 1552
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 221b3e66ceef..4cfc6ea87df8 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -45,4 +45,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
45extern void xfs_ioend_init(void); 45extern void xfs_ioend_init(void);
46extern void xfs_ioend_wait(struct xfs_inode *); 46extern void xfs_ioend_wait(struct xfs_inode *);
47 47
48extern void xfs_count_page_state(struct page *, int *, int *, int *);
49
48#endif /* __XFS_AOPS_H__ */ 50#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4ddc973aea7a..b4c7d4248aac 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,6 +39,7 @@
39#include "xfs_ag.h" 39#include "xfs_ag.h"
40#include "xfs_dmapi.h" 40#include "xfs_dmapi.h"
41#include "xfs_mount.h" 41#include "xfs_mount.h"
42#include "xfs_trace.h"
42 43
43static kmem_zone_t *xfs_buf_zone; 44static kmem_zone_t *xfs_buf_zone;
44STATIC int xfsbufd(void *); 45STATIC int xfsbufd(void *);
@@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue;
53struct workqueue_struct *xfsdatad_workqueue; 54struct workqueue_struct *xfsdatad_workqueue;
54struct workqueue_struct *xfsconvertd_workqueue; 55struct workqueue_struct *xfsconvertd_workqueue;
55 56
56#ifdef XFS_BUF_TRACE
57void
58xfs_buf_trace(
59 xfs_buf_t *bp,
60 char *id,
61 void *data,
62 void *ra)
63{
64 ktrace_enter(xfs_buf_trace_buf,
65 bp, id,
66 (void *)(unsigned long)bp->b_flags,
67 (void *)(unsigned long)bp->b_hold.counter,
68 (void *)(unsigned long)bp->b_sema.count,
69 (void *)current,
70 data, ra,
71 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
72 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
73 (void *)(unsigned long)bp->b_buffer_length,
74 NULL, NULL, NULL, NULL, NULL);
75}
76ktrace_t *xfs_buf_trace_buf;
77#define XFS_BUF_TRACE_SIZE 4096
78#define XB_TRACE(bp, id, data) \
79 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
80#else
81#define XB_TRACE(bp, id, data) do { } while (0)
82#endif
83
84#ifdef XFS_BUF_LOCK_TRACKING 57#ifdef XFS_BUF_LOCK_TRACKING
85# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 58# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
86# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 59# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -279,7 +252,8 @@ _xfs_buf_initialize(
279 init_waitqueue_head(&bp->b_waiters); 252 init_waitqueue_head(&bp->b_waiters);
280 253
281 XFS_STATS_INC(xb_create); 254 XFS_STATS_INC(xb_create);
282 XB_TRACE(bp, "initialize", target); 255
256 trace_xfs_buf_init(bp, _RET_IP_);
283} 257}
284 258
285/* 259/*
@@ -332,7 +306,7 @@ void
332xfs_buf_free( 306xfs_buf_free(
333 xfs_buf_t *bp) 307 xfs_buf_t *bp)
334{ 308{
335 XB_TRACE(bp, "free", 0); 309 trace_xfs_buf_free(bp, _RET_IP_);
336 310
337 ASSERT(list_empty(&bp->b_hash_list)); 311 ASSERT(list_empty(&bp->b_hash_list));
338 312
@@ -445,7 +419,6 @@ _xfs_buf_lookup_pages(
445 if (page_count == bp->b_page_count) 419 if (page_count == bp->b_page_count)
446 bp->b_flags |= XBF_DONE; 420 bp->b_flags |= XBF_DONE;
447 421
448 XB_TRACE(bp, "lookup_pages", (long)page_count);
449 return error; 422 return error;
450} 423}
451 424
@@ -548,7 +521,6 @@ found:
548 if (down_trylock(&bp->b_sema)) { 521 if (down_trylock(&bp->b_sema)) {
549 if (!(flags & XBF_TRYLOCK)) { 522 if (!(flags & XBF_TRYLOCK)) {
550 /* wait for buffer ownership */ 523 /* wait for buffer ownership */
551 XB_TRACE(bp, "get_lock", 0);
552 xfs_buf_lock(bp); 524 xfs_buf_lock(bp);
553 XFS_STATS_INC(xb_get_locked_waited); 525 XFS_STATS_INC(xb_get_locked_waited);
554 } else { 526 } else {
@@ -571,7 +543,8 @@ found:
571 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 543 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
572 bp->b_flags &= XBF_MAPPED; 544 bp->b_flags &= XBF_MAPPED;
573 } 545 }
574 XB_TRACE(bp, "got_lock", 0); 546
547 trace_xfs_buf_find(bp, flags, _RET_IP_);
575 XFS_STATS_INC(xb_get_locked); 548 XFS_STATS_INC(xb_get_locked);
576 return bp; 549 return bp;
577} 550}
@@ -627,7 +600,7 @@ xfs_buf_get(
627 bp->b_bn = ioff; 600 bp->b_bn = ioff;
628 bp->b_count_desired = bp->b_buffer_length; 601 bp->b_count_desired = bp->b_buffer_length;
629 602
630 XB_TRACE(bp, "get", (unsigned long)flags); 603 trace_xfs_buf_get(bp, flags, _RET_IP_);
631 return bp; 604 return bp;
632 605
633 no_buffer: 606 no_buffer:
@@ -644,8 +617,6 @@ _xfs_buf_read(
644{ 617{
645 int status; 618 int status;
646 619
647 XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
648
649 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); 620 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
650 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 621 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
651 622
@@ -673,19 +644,18 @@ xfs_buf_read(
673 644
674 bp = xfs_buf_get(target, ioff, isize, flags); 645 bp = xfs_buf_get(target, ioff, isize, flags);
675 if (bp) { 646 if (bp) {
647 trace_xfs_buf_read(bp, flags, _RET_IP_);
648
676 if (!XFS_BUF_ISDONE(bp)) { 649 if (!XFS_BUF_ISDONE(bp)) {
677 XB_TRACE(bp, "read", (unsigned long)flags);
678 XFS_STATS_INC(xb_get_read); 650 XFS_STATS_INC(xb_get_read);
679 _xfs_buf_read(bp, flags); 651 _xfs_buf_read(bp, flags);
680 } else if (flags & XBF_ASYNC) { 652 } else if (flags & XBF_ASYNC) {
681 XB_TRACE(bp, "read_async", (unsigned long)flags);
682 /* 653 /*
683 * Read ahead call which is already satisfied, 654 * Read ahead call which is already satisfied,
684 * drop the buffer 655 * drop the buffer
685 */ 656 */
686 goto no_buffer; 657 goto no_buffer;
687 } else { 658 } else {
688 XB_TRACE(bp, "read_done", (unsigned long)flags);
689 /* We do not want read in the flags */ 659 /* We do not want read in the flags */
690 bp->b_flags &= ~XBF_READ; 660 bp->b_flags &= ~XBF_READ;
691 } 661 }
@@ -823,7 +793,7 @@ xfs_buf_get_noaddr(
823 793
824 xfs_buf_unlock(bp); 794 xfs_buf_unlock(bp);
825 795
826 XB_TRACE(bp, "no_daddr", len); 796 trace_xfs_buf_get_noaddr(bp, _RET_IP_);
827 return bp; 797 return bp;
828 798
829 fail_free_mem: 799 fail_free_mem:
@@ -845,8 +815,8 @@ void
845xfs_buf_hold( 815xfs_buf_hold(
846 xfs_buf_t *bp) 816 xfs_buf_t *bp)
847{ 817{
818 trace_xfs_buf_hold(bp, _RET_IP_);
848 atomic_inc(&bp->b_hold); 819 atomic_inc(&bp->b_hold);
849 XB_TRACE(bp, "hold", 0);
850} 820}
851 821
852/* 822/*
@@ -859,7 +829,7 @@ xfs_buf_rele(
859{ 829{
860 xfs_bufhash_t *hash = bp->b_hash; 830 xfs_bufhash_t *hash = bp->b_hash;
861 831
862 XB_TRACE(bp, "rele", bp->b_relse); 832 trace_xfs_buf_rele(bp, _RET_IP_);
863 833
864 if (unlikely(!hash)) { 834 if (unlikely(!hash)) {
865 ASSERT(!bp->b_relse); 835 ASSERT(!bp->b_relse);
@@ -909,21 +879,19 @@ xfs_buf_cond_lock(
909 int locked; 879 int locked;
910 880
911 locked = down_trylock(&bp->b_sema) == 0; 881 locked = down_trylock(&bp->b_sema) == 0;
912 if (locked) { 882 if (locked)
913 XB_SET_OWNER(bp); 883 XB_SET_OWNER(bp);
914 } 884
915 XB_TRACE(bp, "cond_lock", (long)locked); 885 trace_xfs_buf_cond_lock(bp, _RET_IP_);
916 return locked ? 0 : -EBUSY; 886 return locked ? 0 : -EBUSY;
917} 887}
918 888
919#if defined(DEBUG) || defined(XFS_BLI_TRACE)
920int 889int
921xfs_buf_lock_value( 890xfs_buf_lock_value(
922 xfs_buf_t *bp) 891 xfs_buf_t *bp)
923{ 892{
924 return bp->b_sema.count; 893 return bp->b_sema.count;
925} 894}
926#endif
927 895
928/* 896/*
929 * Locks a buffer object. 897 * Locks a buffer object.
@@ -935,12 +903,14 @@ void
935xfs_buf_lock( 903xfs_buf_lock(
936 xfs_buf_t *bp) 904 xfs_buf_t *bp)
937{ 905{
938 XB_TRACE(bp, "lock", 0); 906 trace_xfs_buf_lock(bp, _RET_IP_);
907
939 if (atomic_read(&bp->b_io_remaining)) 908 if (atomic_read(&bp->b_io_remaining))
940 blk_run_address_space(bp->b_target->bt_mapping); 909 blk_run_address_space(bp->b_target->bt_mapping);
941 down(&bp->b_sema); 910 down(&bp->b_sema);
942 XB_SET_OWNER(bp); 911 XB_SET_OWNER(bp);
943 XB_TRACE(bp, "locked", 0); 912
913 trace_xfs_buf_lock_done(bp, _RET_IP_);
944} 914}
945 915
946/* 916/*
@@ -962,7 +932,8 @@ xfs_buf_unlock(
962 932
963 XB_CLEAR_OWNER(bp); 933 XB_CLEAR_OWNER(bp);
964 up(&bp->b_sema); 934 up(&bp->b_sema);
965 XB_TRACE(bp, "unlock", 0); 935
936 trace_xfs_buf_unlock(bp, _RET_IP_);
966} 937}
967 938
968 939
@@ -974,17 +945,18 @@ void
974xfs_buf_pin( 945xfs_buf_pin(
975 xfs_buf_t *bp) 946 xfs_buf_t *bp)
976{ 947{
948 trace_xfs_buf_pin(bp, _RET_IP_);
977 atomic_inc(&bp->b_pin_count); 949 atomic_inc(&bp->b_pin_count);
978 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
979} 950}
980 951
981void 952void
982xfs_buf_unpin( 953xfs_buf_unpin(
983 xfs_buf_t *bp) 954 xfs_buf_t *bp)
984{ 955{
956 trace_xfs_buf_unpin(bp, _RET_IP_);
957
985 if (atomic_dec_and_test(&bp->b_pin_count)) 958 if (atomic_dec_and_test(&bp->b_pin_count))
986 wake_up_all(&bp->b_waiters); 959 wake_up_all(&bp->b_waiters);
987 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
988} 960}
989 961
990int 962int
@@ -1035,7 +1007,7 @@ xfs_buf_iodone_work(
1035 */ 1007 */
1036 if ((bp->b_error == EOPNOTSUPP) && 1008 if ((bp->b_error == EOPNOTSUPP) &&
1037 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { 1009 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1038 XB_TRACE(bp, "ordered_retry", bp->b_iodone); 1010 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
1039 bp->b_flags &= ~XBF_ORDERED; 1011 bp->b_flags &= ~XBF_ORDERED;
1040 bp->b_flags |= _XFS_BARRIER_FAILED; 1012 bp->b_flags |= _XFS_BARRIER_FAILED;
1041 xfs_buf_iorequest(bp); 1013 xfs_buf_iorequest(bp);
@@ -1050,12 +1022,12 @@ xfs_buf_ioend(
1050 xfs_buf_t *bp, 1022 xfs_buf_t *bp,
1051 int schedule) 1023 int schedule)
1052{ 1024{
1025 trace_xfs_buf_iodone(bp, _RET_IP_);
1026
1053 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1027 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1054 if (bp->b_error == 0) 1028 if (bp->b_error == 0)
1055 bp->b_flags |= XBF_DONE; 1029 bp->b_flags |= XBF_DONE;
1056 1030
1057 XB_TRACE(bp, "iodone", bp->b_iodone);
1058
1059 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { 1031 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1060 if (schedule) { 1032 if (schedule) {
1061 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1033 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
@@ -1075,7 +1047,7 @@ xfs_buf_ioerror(
1075{ 1047{
1076 ASSERT(error >= 0 && error <= 0xffff); 1048 ASSERT(error >= 0 && error <= 0xffff);
1077 bp->b_error = (unsigned short)error; 1049 bp->b_error = (unsigned short)error;
1078 XB_TRACE(bp, "ioerror", (unsigned long)error); 1050 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1079} 1051}
1080 1052
1081int 1053int
@@ -1083,7 +1055,7 @@ xfs_bawrite(
1083 void *mp, 1055 void *mp,
1084 struct xfs_buf *bp) 1056 struct xfs_buf *bp)
1085{ 1057{
1086 XB_TRACE(bp, "bawrite", 0); 1058 trace_xfs_buf_bawrite(bp, _RET_IP_);
1087 1059
1088 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 1060 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
1089 1061
@@ -1102,7 +1074,7 @@ xfs_bdwrite(
1102 void *mp, 1074 void *mp,
1103 struct xfs_buf *bp) 1075 struct xfs_buf *bp)
1104{ 1076{
1105 XB_TRACE(bp, "bdwrite", 0); 1077 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1106 1078
1107 bp->b_strat = xfs_bdstrat_cb; 1079 bp->b_strat = xfs_bdstrat_cb;
1108 bp->b_mount = mp; 1080 bp->b_mount = mp;
@@ -1253,7 +1225,7 @@ int
1253xfs_buf_iorequest( 1225xfs_buf_iorequest(
1254 xfs_buf_t *bp) 1226 xfs_buf_t *bp)
1255{ 1227{
1256 XB_TRACE(bp, "iorequest", 0); 1228 trace_xfs_buf_iorequest(bp, _RET_IP_);
1257 1229
1258 if (bp->b_flags & XBF_DELWRI) { 1230 if (bp->b_flags & XBF_DELWRI) {
1259 xfs_buf_delwri_queue(bp, 1); 1231 xfs_buf_delwri_queue(bp, 1);
@@ -1287,11 +1259,13 @@ int
1287xfs_buf_iowait( 1259xfs_buf_iowait(
1288 xfs_buf_t *bp) 1260 xfs_buf_t *bp)
1289{ 1261{
1290 XB_TRACE(bp, "iowait", 0); 1262 trace_xfs_buf_iowait(bp, _RET_IP_);
1263
1291 if (atomic_read(&bp->b_io_remaining)) 1264 if (atomic_read(&bp->b_io_remaining))
1292 blk_run_address_space(bp->b_target->bt_mapping); 1265 blk_run_address_space(bp->b_target->bt_mapping);
1293 wait_for_completion(&bp->b_iowait); 1266 wait_for_completion(&bp->b_iowait);
1294 XB_TRACE(bp, "iowaited", (long)bp->b_error); 1267
1268 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1295 return bp->b_error; 1269 return bp->b_error;
1296} 1270}
1297 1271
@@ -1604,7 +1578,8 @@ xfs_buf_delwri_queue(
1604 struct list_head *dwq = &bp->b_target->bt_delwrite_queue; 1578 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1605 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; 1579 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1606 1580
1607 XB_TRACE(bp, "delwri_q", (long)unlock); 1581 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1582
1608 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); 1583 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1609 1584
1610 spin_lock(dwlk); 1585 spin_lock(dwlk);
@@ -1644,7 +1619,7 @@ xfs_buf_delwri_dequeue(
1644 if (dequeued) 1619 if (dequeued)
1645 xfs_buf_rele(bp); 1620 xfs_buf_rele(bp);
1646 1621
1647 XB_TRACE(bp, "delwri_dq", (long)dequeued); 1622 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1648} 1623}
1649 1624
1650STATIC void 1625STATIC void
@@ -1692,7 +1667,7 @@ xfs_buf_delwri_split(
1692 INIT_LIST_HEAD(list); 1667 INIT_LIST_HEAD(list);
1693 spin_lock(dwlk); 1668 spin_lock(dwlk);
1694 list_for_each_entry_safe(bp, n, dwq, b_list) { 1669 list_for_each_entry_safe(bp, n, dwq, b_list) {
1695 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp)); 1670 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1696 ASSERT(bp->b_flags & XBF_DELWRI); 1671 ASSERT(bp->b_flags & XBF_DELWRI);
1697 1672
1698 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { 1673 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1816,14 +1791,10 @@ xfs_flush_buftarg(
1816int __init 1791int __init
1817xfs_buf_init(void) 1792xfs_buf_init(void)
1818{ 1793{
1819#ifdef XFS_BUF_TRACE
1820 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
1821#endif
1822
1823 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1794 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1824 KM_ZONE_HWALIGN, NULL); 1795 KM_ZONE_HWALIGN, NULL);
1825 if (!xfs_buf_zone) 1796 if (!xfs_buf_zone)
1826 goto out_free_trace_buf; 1797 goto out;
1827 1798
1828 xfslogd_workqueue = create_workqueue("xfslogd"); 1799 xfslogd_workqueue = create_workqueue("xfslogd");
1829 if (!xfslogd_workqueue) 1800 if (!xfslogd_workqueue)
@@ -1846,10 +1817,7 @@ xfs_buf_init(void)
1846 destroy_workqueue(xfslogd_workqueue); 1817 destroy_workqueue(xfslogd_workqueue);
1847 out_free_buf_zone: 1818 out_free_buf_zone:
1848 kmem_zone_destroy(xfs_buf_zone); 1819 kmem_zone_destroy(xfs_buf_zone);
1849 out_free_trace_buf: 1820 out:
1850#ifdef XFS_BUF_TRACE
1851 ktrace_free(xfs_buf_trace_buf);
1852#endif
1853 return -ENOMEM; 1821 return -ENOMEM;
1854} 1822}
1855 1823
@@ -1861,9 +1829,6 @@ xfs_buf_terminate(void)
1861 destroy_workqueue(xfsdatad_workqueue); 1829 destroy_workqueue(xfsdatad_workqueue);
1862 destroy_workqueue(xfslogd_workqueue); 1830 destroy_workqueue(xfslogd_workqueue);
1863 kmem_zone_destroy(xfs_buf_zone); 1831 kmem_zone_destroy(xfs_buf_zone);
1864#ifdef XFS_BUF_TRACE
1865 ktrace_free(xfs_buf_trace_buf);
1866#endif
1867} 1832}
1868 1833
1869#ifdef CONFIG_KDB_MODULES 1834#ifdef CONFIG_KDB_MODULES
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 5f07dd91c5fa..a509f4addc2a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -95,6 +95,28 @@ typedef enum {
95 _XFS_BARRIER_FAILED = (1 << 23), 95 _XFS_BARRIER_FAILED = (1 << 23),
96} xfs_buf_flags_t; 96} xfs_buf_flags_t;
97 97
98#define XFS_BUF_FLAGS \
99 { XBF_READ, "READ" }, \
100 { XBF_WRITE, "WRITE" }, \
101 { XBF_MAPPED, "MAPPED" }, \
102 { XBF_ASYNC, "ASYNC" }, \
103 { XBF_DONE, "DONE" }, \
104 { XBF_DELWRI, "DELWRI" }, \
105 { XBF_STALE, "STALE" }, \
106 { XBF_FS_MANAGED, "FS_MANAGED" }, \
107 { XBF_ORDERED, "ORDERED" }, \
108 { XBF_READ_AHEAD, "READ_AHEAD" }, \
109 { XBF_LOCK, "LOCK" }, /* should never be set */\
110 { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
111 { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
112 { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
113 { _XBF_PAGES, "PAGES" }, \
114 { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
115 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
116 { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \
117 { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
118
119
98typedef enum { 120typedef enum {
99 XBT_FORCE_SLEEP = 0, 121 XBT_FORCE_SLEEP = 0,
100 XBT_FORCE_FLUSH = 1, 122 XBT_FORCE_FLUSH = 1,
@@ -243,13 +265,6 @@ extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
243extern int xfs_buf_init(void); 265extern int xfs_buf_init(void);
244extern void xfs_buf_terminate(void); 266extern void xfs_buf_terminate(void);
245 267
246#ifdef XFS_BUF_TRACE
247extern ktrace_t *xfs_buf_trace_buf;
248extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
249#else
250#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
251#endif
252
253#define xfs_buf_target_name(target) \ 268#define xfs_buf_target_name(target) \
254 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) 269 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
255 270
@@ -365,10 +380,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
365 380
366#define xfs_bpin(bp) xfs_buf_pin(bp) 381#define xfs_bpin(bp) xfs_buf_pin(bp)
367#define xfs_bunpin(bp) xfs_buf_unpin(bp) 382#define xfs_bunpin(bp) xfs_buf_unpin(bp)
368
369#define xfs_buftrace(id, bp) \
370 xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
371
372#define xfs_biodone(bp) xfs_buf_ioend(bp, 0) 383#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
373 384
374#define xfs_biomove(bp, off, len, data, rw) \ 385#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 08be36d7326c..7501b85fd860 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -19,6 +19,7 @@
19#include "xfs_vnodeops.h" 19#include "xfs_vnodeops.h"
20#include "xfs_bmap_btree.h" 20#include "xfs_bmap_btree.h"
21#include "xfs_inode.h" 21#include "xfs_inode.h"
22#include "xfs_trace.h"
22 23
23int fs_noerr(void) { return 0; } 24int fs_noerr(void) { return 0; }
24int fs_nosys(void) { return ENOSYS; } 25int fs_nosys(void) { return ENOSYS; }
@@ -51,6 +52,8 @@ xfs_flushinval_pages(
51 struct address_space *mapping = VFS_I(ip)->i_mapping; 52 struct address_space *mapping = VFS_I(ip)->i_mapping;
52 int ret = 0; 53 int ret = 0;
53 54
55 trace_xfs_pagecache_inval(ip, first, last);
56
54 if (mapping->nrpages) { 57 if (mapping->nrpages) {
55 xfs_iflags_clear(ip, XFS_ITRUNCATED); 58 xfs_iflags_clear(ip, XFS_ITRUNCATED);
56 ret = filemap_write_and_wait(mapping); 59 ret = filemap_write_and_wait(mapping);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 5bb523d7f37e..a034cf624437 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -51,6 +51,7 @@
51#include "xfs_quota.h" 51#include "xfs_quota.h"
52#include "xfs_inode_item.h" 52#include "xfs_inode_item.h"
53#include "xfs_export.h" 53#include "xfs_export.h"
54#include "xfs_trace.h"
54 55
55#include <linux/capability.h> 56#include <linux/capability.h>
56#include <linux/dcache.h> 57#include <linux/dcache.h>
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index eafcc7c18706..be1527b1670c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -46,6 +46,7 @@
46#include "xfs_attr.h" 46#include "xfs_attr.h"
47#include "xfs_ioctl.h" 47#include "xfs_ioctl.h"
48#include "xfs_ioctl32.h" 48#include "xfs_ioctl32.h"
49#include "xfs_trace.h"
49 50
50#define _NATIVE_IOC(cmd, type) \ 51#define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) 52 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 1f3b4b8f7dd4..1d5b298ba8b2 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -47,6 +47,7 @@
47#include "xfs_buf_item.h" 47#include "xfs_buf_item.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_vnodeops.h" 49#include "xfs_vnodeops.h"
50#include "xfs_trace.h"
50 51
51#include <linux/capability.h> 52#include <linux/capability.h>
52#include <linux/xattr.h> 53#include <linux/xattr.h>
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 6127e24062d0..5af0c81ca1ae 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -40,7 +40,6 @@
40#include <sv.h> 40#include <sv.h>
41#include <time.h> 41#include <time.h>
42 42
43#include <support/ktrace.h>
44#include <support/debug.h> 43#include <support/debug.h>
45#include <support/uuid.h> 44#include <support/uuid.h>
46 45
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 1bf47f219c97..0d32457abef1 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -48,73 +48,12 @@
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_iomap.h" 49#include "xfs_iomap.h"
50#include "xfs_vnodeops.h" 50#include "xfs_vnodeops.h"
51#include "xfs_trace.h"
51 52
52#include <linux/capability.h> 53#include <linux/capability.h>
53#include <linux/writeback.h> 54#include <linux/writeback.h>
54 55
55 56
56#if defined(XFS_RW_TRACE)
57void
58xfs_rw_enter_trace(
59 int tag,
60 xfs_inode_t *ip,
61 void *data,
62 size_t segs,
63 loff_t offset,
64 int ioflags)
65{
66 if (ip->i_rwtrace == NULL)
67 return;
68 ktrace_enter(ip->i_rwtrace,
69 (void *)(unsigned long)tag,
70 (void *)ip,
71 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
72 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
73 (void *)data,
74 (void *)((unsigned long)segs),
75 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
76 (void *)((unsigned long)(offset & 0xffffffff)),
77 (void *)((unsigned long)ioflags),
78 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
79 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
80 (void *)((unsigned long)current_pid()),
81 (void *)NULL,
82 (void *)NULL,
83 (void *)NULL,
84 (void *)NULL);
85}
86
87void
88xfs_inval_cached_trace(
89 xfs_inode_t *ip,
90 xfs_off_t offset,
91 xfs_off_t len,
92 xfs_off_t first,
93 xfs_off_t last)
94{
95
96 if (ip->i_rwtrace == NULL)
97 return;
98 ktrace_enter(ip->i_rwtrace,
99 (void *)(__psint_t)XFS_INVAL_CACHED,
100 (void *)ip,
101 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102 (void *)((unsigned long)(offset & 0xffffffff)),
103 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
104 (void *)((unsigned long)(len & 0xffffffff)),
105 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
106 (void *)((unsigned long)(first & 0xffffffff)),
107 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
108 (void *)((unsigned long)(last & 0xffffffff)),
109 (void *)((unsigned long)current_pid()),
110 (void *)NULL,
111 (void *)NULL,
112 (void *)NULL,
113 (void *)NULL,
114 (void *)NULL);
115}
116#endif
117
118/* 57/*
119 * xfs_iozero 58 * xfs_iozero
120 * 59 *
@@ -250,8 +189,7 @@ xfs_read(
250 } 189 }
251 } 190 }
252 191
253 xfs_rw_enter_trace(XFS_READ_ENTER, ip, 192 trace_xfs_file_read(ip, size, *offset, ioflags);
254 (void *)iovp, segs, *offset, ioflags);
255 193
256 iocb->ki_pos = *offset; 194 iocb->ki_pos = *offset;
257 ret = generic_file_aio_read(iocb, iovp, segs, *offset); 195 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
@@ -292,8 +230,9 @@ xfs_splice_read(
292 return -error; 230 return -error;
293 } 231 }
294 } 232 }
295 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip, 233
296 pipe, count, *ppos, ioflags); 234 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
235
297 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 236 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
298 if (ret > 0) 237 if (ret > 0)
299 XFS_STATS_ADD(xs_read_bytes, ret); 238 XFS_STATS_ADD(xs_read_bytes, ret);
@@ -342,8 +281,8 @@ xfs_splice_write(
342 ip->i_new_size = new_size; 281 ip->i_new_size = new_size;
343 xfs_iunlock(ip, XFS_ILOCK_EXCL); 282 xfs_iunlock(ip, XFS_ILOCK_EXCL);
344 283
345 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, 284 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
346 pipe, count, *ppos, ioflags); 285
347 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 286 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
348 if (ret > 0) 287 if (ret > 0)
349 XFS_STATS_ADD(xs_write_bytes, ret); 288 XFS_STATS_ADD(xs_write_bytes, ret);
@@ -710,8 +649,6 @@ start:
710 if ((ioflags & IO_ISDIRECT)) { 649 if ((ioflags & IO_ISDIRECT)) {
711 if (mapping->nrpages) { 650 if (mapping->nrpages) {
712 WARN_ON(need_i_mutex == 0); 651 WARN_ON(need_i_mutex == 0);
713 xfs_inval_cached_trace(xip, pos, -1,
714 (pos & PAGE_CACHE_MASK), -1);
715 error = xfs_flushinval_pages(xip, 652 error = xfs_flushinval_pages(xip,
716 (pos & PAGE_CACHE_MASK), 653 (pos & PAGE_CACHE_MASK),
717 -1, FI_REMAPF_LOCKED); 654 -1, FI_REMAPF_LOCKED);
@@ -728,8 +665,7 @@ start:
728 need_i_mutex = 0; 665 need_i_mutex = 0;
729 } 666 }
730 667
731 xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs, 668 trace_xfs_file_direct_write(xip, count, *offset, ioflags);
732 *offset, ioflags);
733 ret = generic_file_direct_write(iocb, iovp, 669 ret = generic_file_direct_write(iocb, iovp,
734 &segs, pos, offset, count, ocount); 670 &segs, pos, offset, count, ocount);
735 671
@@ -752,8 +688,7 @@ start:
752 ssize_t ret2 = 0; 688 ssize_t ret2 = 0;
753 689
754write_retry: 690write_retry:
755 xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, 691 trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
756 *offset, ioflags);
757 ret2 = generic_file_buffered_write(iocb, iovp, segs, 692 ret2 = generic_file_buffered_write(iocb, iovp, segs,
758 pos, offset, count, ret); 693 pos, offset, count, ret);
759 /* 694 /*
@@ -858,7 +793,7 @@ int
858xfs_bdstrat_cb(struct xfs_buf *bp) 793xfs_bdstrat_cb(struct xfs_buf *bp)
859{ 794{
860 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 795 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
861 xfs_buftrace("XFS__BDSTRAT IOERROR", bp); 796 trace_xfs_bdstrat_shut(bp, _RET_IP_);
862 /* 797 /*
863 * Metadata write that didn't get logged but 798 * Metadata write that didn't get logged but
864 * written delayed anyway. These aren't associated 799 * written delayed anyway. These aren't associated
@@ -891,7 +826,7 @@ xfsbdstrat(
891 return; 826 return;
892 } 827 }
893 828
894 xfs_buftrace("XFSBDSTRAT IOERROR", bp); 829 trace_xfs_bdstrat_shut(bp, _RET_IP_);
895 xfs_bioerror_relse(bp); 830 xfs_bioerror_relse(bp);
896} 831}
897 832
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index e6be37dbd0e9..d1f7789c7ffb 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -20,52 +20,7 @@
20 20
21struct xfs_mount; 21struct xfs_mount;
22struct xfs_inode; 22struct xfs_inode;
23struct xfs_bmbt_irec;
24struct xfs_buf; 23struct xfs_buf;
25struct xfs_iomap;
26
27#if defined(XFS_RW_TRACE)
28/*
29 * Defines for the trace mechanisms in xfs_lrw.c.
30 */
31#define XFS_RW_KTRACE_SIZE 128
32
33#define XFS_READ_ENTER 1
34#define XFS_WRITE_ENTER 2
35#define XFS_IOMAP_READ_ENTER 3
36#define XFS_IOMAP_WRITE_ENTER 4
37#define XFS_IOMAP_READ_MAP 5
38#define XFS_IOMAP_WRITE_MAP 6
39#define XFS_IOMAP_WRITE_NOSPACE 7
40#define XFS_ITRUNC_START 8
41#define XFS_ITRUNC_FINISH1 9
42#define XFS_ITRUNC_FINISH2 10
43#define XFS_CTRUNC1 11
44#define XFS_CTRUNC2 12
45#define XFS_CTRUNC3 13
46#define XFS_CTRUNC4 14
47#define XFS_CTRUNC5 15
48#define XFS_CTRUNC6 16
49#define XFS_BUNMAP 17
50#define XFS_INVAL_CACHED 18
51#define XFS_DIORD_ENTER 19
52#define XFS_DIOWR_ENTER 20
53#define XFS_WRITEPAGE_ENTER 22
54#define XFS_RELEASEPAGE_ENTER 23
55#define XFS_INVALIDPAGE_ENTER 24
56#define XFS_IOMAP_ALLOC_ENTER 25
57#define XFS_IOMAP_ALLOC_MAP 26
58#define XFS_IOMAP_UNWRITTEN 27
59#define XFS_SPLICE_READ_ENTER 28
60#define XFS_SPLICE_WRITE_ENTER 29
61extern void xfs_rw_enter_trace(int, struct xfs_inode *,
62 void *, size_t, loff_t, int);
63extern void xfs_inval_cached_trace(struct xfs_inode *,
64 xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
65#else
66#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags)
67#define xfs_inval_cached_trace(ip, offset, len, first, last)
68#endif
69 24
70/* errors from xfsbdstrat() must be extracted from the buffer */ 25/* errors from xfsbdstrat() must be extracted from the buffer */
71extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 26extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1bfb0e980193..09783cc444ac 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -15,6 +15,7 @@
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18
18#include "xfs.h" 19#include "xfs.h"
19#include "xfs_bit.h" 20#include "xfs_bit.h"
20#include "xfs_log.h" 21#include "xfs_log.h"
@@ -52,11 +53,11 @@
52#include "xfs_trans_priv.h" 53#include "xfs_trans_priv.h"
53#include "xfs_filestream.h" 54#include "xfs_filestream.h"
54#include "xfs_da_btree.h" 55#include "xfs_da_btree.h"
55#include "xfs_dir2_trace.h"
56#include "xfs_extfree_item.h" 56#include "xfs_extfree_item.h"
57#include "xfs_mru_cache.h" 57#include "xfs_mru_cache.h"
58#include "xfs_inode_item.h" 58#include "xfs_inode_item.h"
59#include "xfs_sync.h" 59#include "xfs_sync.h"
60#include "xfs_trace.h"
60 61
61#include <linux/namei.h> 62#include <linux/namei.h>
62#include <linux/init.h> 63#include <linux/init.h>
@@ -1525,8 +1526,6 @@ xfs_fs_fill_super(
1525 goto fail_vnrele; 1526 goto fail_vnrele;
1526 1527
1527 kfree(mtpt); 1528 kfree(mtpt);
1528
1529 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
1530 return 0; 1529 return 0;
1531 1530
1532 out_filestream_unmount: 1531 out_filestream_unmount:
@@ -1602,94 +1601,6 @@ static struct file_system_type xfs_fs_type = {
1602}; 1601};
1603 1602
1604STATIC int __init 1603STATIC int __init
1605xfs_alloc_trace_bufs(void)
1606{
1607#ifdef XFS_ALLOC_TRACE
1608 xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
1609 if (!xfs_alloc_trace_buf)
1610 goto out;
1611#endif
1612#ifdef XFS_BMAP_TRACE
1613 xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
1614 if (!xfs_bmap_trace_buf)
1615 goto out_free_alloc_trace;
1616#endif
1617#ifdef XFS_BTREE_TRACE
1618 xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE,
1619 KM_MAYFAIL);
1620 if (!xfs_allocbt_trace_buf)
1621 goto out_free_bmap_trace;
1622
1623 xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL);
1624 if (!xfs_inobt_trace_buf)
1625 goto out_free_allocbt_trace;
1626
1627 xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
1628 if (!xfs_bmbt_trace_buf)
1629 goto out_free_inobt_trace;
1630#endif
1631#ifdef XFS_ATTR_TRACE
1632 xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
1633 if (!xfs_attr_trace_buf)
1634 goto out_free_bmbt_trace;
1635#endif
1636#ifdef XFS_DIR2_TRACE
1637 xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
1638 if (!xfs_dir2_trace_buf)
1639 goto out_free_attr_trace;
1640#endif
1641
1642 return 0;
1643
1644#ifdef XFS_DIR2_TRACE
1645 out_free_attr_trace:
1646#endif
1647#ifdef XFS_ATTR_TRACE
1648 ktrace_free(xfs_attr_trace_buf);
1649 out_free_bmbt_trace:
1650#endif
1651#ifdef XFS_BTREE_TRACE
1652 ktrace_free(xfs_bmbt_trace_buf);
1653 out_free_inobt_trace:
1654 ktrace_free(xfs_inobt_trace_buf);
1655 out_free_allocbt_trace:
1656 ktrace_free(xfs_allocbt_trace_buf);
1657 out_free_bmap_trace:
1658#endif
1659#ifdef XFS_BMAP_TRACE
1660 ktrace_free(xfs_bmap_trace_buf);
1661 out_free_alloc_trace:
1662#endif
1663#ifdef XFS_ALLOC_TRACE
1664 ktrace_free(xfs_alloc_trace_buf);
1665 out:
1666#endif
1667 return -ENOMEM;
1668}
1669
1670STATIC void
1671xfs_free_trace_bufs(void)
1672{
1673#ifdef XFS_DIR2_TRACE
1674 ktrace_free(xfs_dir2_trace_buf);
1675#endif
1676#ifdef XFS_ATTR_TRACE
1677 ktrace_free(xfs_attr_trace_buf);
1678#endif
1679#ifdef XFS_BTREE_TRACE
1680 ktrace_free(xfs_bmbt_trace_buf);
1681 ktrace_free(xfs_inobt_trace_buf);
1682 ktrace_free(xfs_allocbt_trace_buf);
1683#endif
1684#ifdef XFS_BMAP_TRACE
1685 ktrace_free(xfs_bmap_trace_buf);
1686#endif
1687#ifdef XFS_ALLOC_TRACE
1688 ktrace_free(xfs_alloc_trace_buf);
1689#endif
1690}
1691
1692STATIC int __init
1693xfs_init_zones(void) 1604xfs_init_zones(void)
1694{ 1605{
1695 1606
@@ -1830,7 +1741,6 @@ init_xfs_fs(void)
1830 printk(KERN_INFO XFS_VERSION_STRING " with " 1741 printk(KERN_INFO XFS_VERSION_STRING " with "
1831 XFS_BUILD_OPTIONS " enabled\n"); 1742 XFS_BUILD_OPTIONS " enabled\n");
1832 1743
1833 ktrace_init(64);
1834 xfs_ioend_init(); 1744 xfs_ioend_init();
1835 xfs_dir_startup(); 1745 xfs_dir_startup();
1836 1746
@@ -1838,13 +1748,9 @@ init_xfs_fs(void)
1838 if (error) 1748 if (error)
1839 goto out; 1749 goto out;
1840 1750
1841 error = xfs_alloc_trace_bufs();
1842 if (error)
1843 goto out_destroy_zones;
1844
1845 error = xfs_mru_cache_init(); 1751 error = xfs_mru_cache_init();
1846 if (error) 1752 if (error)
1847 goto out_free_trace_buffers; 1753 goto out_destroy_zones;
1848 1754
1849 error = xfs_filestream_init(); 1755 error = xfs_filestream_init();
1850 if (error) 1756 if (error)
@@ -1879,8 +1785,6 @@ init_xfs_fs(void)
1879 xfs_filestream_uninit(); 1785 xfs_filestream_uninit();
1880 out_mru_cache_uninit: 1786 out_mru_cache_uninit:
1881 xfs_mru_cache_uninit(); 1787 xfs_mru_cache_uninit();
1882 out_free_trace_buffers:
1883 xfs_free_trace_bufs();
1884 out_destroy_zones: 1788 out_destroy_zones:
1885 xfs_destroy_zones(); 1789 xfs_destroy_zones();
1886 out: 1790 out:
@@ -1897,9 +1801,7 @@ exit_xfs_fs(void)
1897 xfs_buf_terminate(); 1801 xfs_buf_terminate();
1898 xfs_filestream_uninit(); 1802 xfs_filestream_uninit();
1899 xfs_mru_cache_uninit(); 1803 xfs_mru_cache_uninit();
1900 xfs_free_trace_bufs();
1901 xfs_destroy_zones(); 1804 xfs_destroy_zones();
1902 ktrace_uninit();
1903} 1805}
1904 1806
1905module_init(init_xfs_fs); 1807module_init(init_xfs_fs);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 18175ebd58ed..233d4b9881b1 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
56# define XFS_BIGFS_STRING 56# define XFS_BIGFS_STRING
57#endif 57#endif
58 58
59#ifdef CONFIG_XFS_TRACE
60# define XFS_TRACE_STRING "tracing, "
61#else
62# define XFS_TRACE_STRING
63#endif
64
65#ifdef CONFIG_XFS_DMAPI 59#ifdef CONFIG_XFS_DMAPI
66# define XFS_DMAPI_STRING "dmapi support, " 60# define XFS_DMAPI_STRING "dmapi support, "
67#else 61#else
@@ -78,7 +72,6 @@ extern void xfs_qm_exit(void);
78 XFS_SECURITY_STRING \ 72 XFS_SECURITY_STRING \
79 XFS_REALTIME_STRING \ 73 XFS_REALTIME_STRING \
80 XFS_BIGFS_STRING \ 74 XFS_BIGFS_STRING \
81 XFS_TRACE_STRING \
82 XFS_DMAPI_STRING \ 75 XFS_DMAPI_STRING \
83 XFS_DBG_STRING /* DBG must be last */ 76 XFS_DBG_STRING /* DBG must be last */
84 77
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index d895a3a960f5..6fed97a8cd3e 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -44,6 +44,7 @@
44#include "xfs_inode_item.h" 44#include "xfs_inode_item.h"
45#include "xfs_rw.h" 45#include "xfs_rw.h"
46#include "xfs_quota.h" 46#include "xfs_quota.h"
47#include "xfs_trace.h"
47 48
48#include <linux/kthread.h> 49#include <linux/kthread.h>
49#include <linux/freezer.h> 50#include <linux/freezer.h>
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
new file mode 100644
index 000000000000..856eb3c8d605
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2009, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_da_btree.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_alloc_btree.h"
31#include "xfs_ialloc_btree.h"
32#include "xfs_dir2_sf.h"
33#include "xfs_attr_sf.h"
34#include "xfs_dinode.h"
35#include "xfs_inode.h"
36#include "xfs_btree.h"
37#include "xfs_dmapi.h"
38#include "xfs_mount.h"
39#include "xfs_ialloc.h"
40#include "xfs_itable.h"
41#include "xfs_alloc.h"
42#include "xfs_bmap.h"
43#include "xfs_attr.h"
44#include "xfs_attr_sf.h"
45#include "xfs_attr_leaf.h"
46#include "xfs_log_priv.h"
47#include "xfs_buf_item.h"
48#include "xfs_quota.h"
49#include "xfs_iomap.h"
50#include "xfs_aops.h"
51#include "quota/xfs_dquot_item.h"
52#include "quota/xfs_dquot.h"
53
54/*
55 * Format fsblock number into a static buffer & return it.
56 */
57STATIC char *xfs_fmtfsblock(xfs_fsblock_t bno)
58{
59 static char rval[50];
60
61 if (bno == NULLFSBLOCK)
62 sprintf(rval, "NULLFSBLOCK");
63 else if (isnullstartblock(bno))
64 sprintf(rval, "NULLSTARTBLOCK(%lld)", startblockval(bno));
65 else
66 sprintf(rval, "%lld", (xfs_dfsbno_t)bno);
67 return rval;
68}
69
70/*
71 * We include this last to have the helpers above available for the trace
72 * event implementations.
73 */
74#define CREATE_TRACE_POINTS
75#include "xfs_trace.h"
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
new file mode 100644
index 000000000000..c40834bdee58
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -0,0 +1,1369 @@
1/*
2 * Copyright (c) 2009, Christoph Hellwig
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#undef TRACE_SYSTEM
19#define TRACE_SYSTEM xfs
20
21#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
22#define _TRACE_XFS_H
23
24#include <linux/tracepoint.h>
25
26struct xfs_agf;
27struct xfs_alloc_arg;
28struct xfs_attr_list_context;
29struct xfs_buf_log_item;
30struct xfs_da_args;
31struct xfs_da_node_entry;
32struct xfs_dquot;
33struct xlog_ticket;
34struct log;
35
36#define DEFINE_ATTR_LIST_EVENT(name) \
37TRACE_EVENT(name, \
38 TP_PROTO(struct xfs_attr_list_context *ctx), \
39 TP_ARGS(ctx), \
40 TP_STRUCT__entry( \
41 __field(dev_t, dev) \
42 __field(xfs_ino_t, ino) \
43 __field(u32, hashval) \
44 __field(u32, blkno) \
45 __field(u32, offset) \
46 __field(void *, alist) \
47 __field(int, bufsize) \
48 __field(int, count) \
49 __field(int, firstu) \
50 __field(int, dupcnt) \
51 __field(int, flags) \
52 ), \
53 TP_fast_assign( \
54 __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
55 __entry->ino = ctx->dp->i_ino; \
56 __entry->hashval = ctx->cursor->hashval; \
57 __entry->blkno = ctx->cursor->blkno; \
58 __entry->offset = ctx->cursor->offset; \
59 __entry->alist = ctx->alist; \
60 __entry->bufsize = ctx->bufsize; \
61 __entry->count = ctx->count; \
62 __entry->firstu = ctx->firstu; \
63 __entry->flags = ctx->flags; \
64 ), \
65 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
66 "alist 0x%p size %u count %u firstu %u flags %d %s", \
67 MAJOR(__entry->dev), MINOR(__entry->dev), \
68 __entry->ino, \
69 __entry->hashval, \
70 __entry->blkno, \
71 __entry->offset, \
72 __entry->dupcnt, \
73 __entry->alist, \
74 __entry->bufsize, \
75 __entry->count, \
76 __entry->firstu, \
77 __entry->flags, \
78 __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
79 ) \
80)
81DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
82DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
83DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
84DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
85DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
86DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
87DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
88DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
89
90TRACE_EVENT(xfs_attr_list_node_descend,
91 TP_PROTO(struct xfs_attr_list_context *ctx,
92 struct xfs_da_node_entry *btree),
93 TP_ARGS(ctx, btree),
94 TP_STRUCT__entry(
95 __field(dev_t, dev)
96 __field(xfs_ino_t, ino)
97 __field(u32, hashval)
98 __field(u32, blkno)
99 __field(u32, offset)
100 __field(void *, alist)
101 __field(int, bufsize)
102 __field(int, count)
103 __field(int, firstu)
104 __field(int, dupcnt)
105 __field(int, flags)
106 __field(u32, bt_hashval)
107 __field(u32, bt_before)
108 ),
109 TP_fast_assign(
110 __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
111 __entry->ino = ctx->dp->i_ino;
112 __entry->hashval = ctx->cursor->hashval;
113 __entry->blkno = ctx->cursor->blkno;
114 __entry->offset = ctx->cursor->offset;
115 __entry->alist = ctx->alist;
116 __entry->bufsize = ctx->bufsize;
117 __entry->count = ctx->count;
118 __entry->firstu = ctx->firstu;
119 __entry->flags = ctx->flags;
120 __entry->bt_hashval = be32_to_cpu(btree->hashval);
121 __entry->bt_before = be32_to_cpu(btree->before);
122 ),
123 TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
124 "alist 0x%p size %u count %u firstu %u flags %d %s "
125 "node hashval %u, node before %u",
126 MAJOR(__entry->dev), MINOR(__entry->dev),
127 __entry->ino,
128 __entry->hashval,
129 __entry->blkno,
130 __entry->offset,
131 __entry->dupcnt,
132 __entry->alist,
133 __entry->bufsize,
134 __entry->count,
135 __entry->firstu,
136 __entry->flags,
137 __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
138 __entry->bt_hashval,
139 __entry->bt_before)
140);
141
142TRACE_EVENT(xfs_iext_insert,
143 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
144 struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
145 TP_ARGS(ip, idx, r, state, caller_ip),
146 TP_STRUCT__entry(
147 __field(dev_t, dev)
148 __field(xfs_ino_t, ino)
149 __field(xfs_extnum_t, idx)
150 __field(xfs_fileoff_t, startoff)
151 __field(xfs_fsblock_t, startblock)
152 __field(xfs_filblks_t, blockcount)
153 __field(xfs_exntst_t, state)
154 __field(int, bmap_state)
155 __field(unsigned long, caller_ip)
156 ),
157 TP_fast_assign(
158 __entry->dev = VFS_I(ip)->i_sb->s_dev;
159 __entry->ino = ip->i_ino;
160 __entry->idx = idx;
161 __entry->startoff = r->br_startoff;
162 __entry->startblock = r->br_startblock;
163 __entry->blockcount = r->br_blockcount;
164 __entry->state = r->br_state;
165 __entry->bmap_state = state;
166 __entry->caller_ip = caller_ip;
167 ),
168 TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
169 "offset %lld block %s count %lld flag %d caller %pf",
170 MAJOR(__entry->dev), MINOR(__entry->dev),
171 __entry->ino,
172 __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
173 (long)__entry->idx,
174 __entry->startoff,
175 xfs_fmtfsblock(__entry->startblock),
176 __entry->blockcount,
177 __entry->state,
178 (char *)__entry->caller_ip)
179);
180
181#define DEFINE_BMAP_EVENT(name) \
182TRACE_EVENT(name, \
183 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
184 unsigned long caller_ip), \
185 TP_ARGS(ip, idx, state, caller_ip), \
186 TP_STRUCT__entry( \
187 __field(dev_t, dev) \
188 __field(xfs_ino_t, ino) \
189 __field(xfs_extnum_t, idx) \
190 __field(xfs_fileoff_t, startoff) \
191 __field(xfs_fsblock_t, startblock) \
192 __field(xfs_filblks_t, blockcount) \
193 __field(xfs_exntst_t, state) \
194 __field(int, bmap_state) \
195 __field(unsigned long, caller_ip) \
196 ), \
197 TP_fast_assign( \
198 struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
199 ip->i_afp : &ip->i_df; \
200 struct xfs_bmbt_irec r; \
201 \
202 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
203 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
204 __entry->ino = ip->i_ino; \
205 __entry->idx = idx; \
206 __entry->startoff = r.br_startoff; \
207 __entry->startblock = r.br_startblock; \
208 __entry->blockcount = r.br_blockcount; \
209 __entry->state = r.br_state; \
210 __entry->bmap_state = state; \
211 __entry->caller_ip = caller_ip; \
212 ), \
213 TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
214 "offset %lld block %s count %lld flag %d caller %pf", \
215 MAJOR(__entry->dev), MINOR(__entry->dev), \
216 __entry->ino, \
217 __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
218 (long)__entry->idx, \
219 __entry->startoff, \
220 xfs_fmtfsblock(__entry->startblock), \
221 __entry->blockcount, \
222 __entry->state, \
223 (char *)__entry->caller_ip) \
224)
225
226DEFINE_BMAP_EVENT(xfs_iext_remove);
227DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
228DEFINE_BMAP_EVENT(xfs_bmap_post_update);
229DEFINE_BMAP_EVENT(xfs_extlist);
230
231#define DEFINE_BUF_EVENT(tname) \
232TRACE_EVENT(tname, \
233 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
234 TP_ARGS(bp, caller_ip), \
235 TP_STRUCT__entry( \
236 __field(dev_t, dev) \
237 __field(xfs_daddr_t, bno) \
238 __field(size_t, buffer_length) \
239 __field(int, hold) \
240 __field(int, pincount) \
241 __field(unsigned, lockval) \
242 __field(unsigned, flags) \
243 __field(unsigned long, caller_ip) \
244 ), \
245 TP_fast_assign( \
246 __entry->dev = bp->b_target->bt_dev; \
247 __entry->bno = bp->b_bn; \
248 __entry->buffer_length = bp->b_buffer_length; \
249 __entry->hold = atomic_read(&bp->b_hold); \
250 __entry->pincount = atomic_read(&bp->b_pin_count); \
251 __entry->lockval = xfs_buf_lock_value(bp); \
252 __entry->flags = bp->b_flags; \
253 __entry->caller_ip = caller_ip; \
254 ), \
255 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
256 "lock %d flags %s caller %pf", \
257 MAJOR(__entry->dev), MINOR(__entry->dev), \
258 (unsigned long long)__entry->bno, \
259 __entry->buffer_length, \
260 __entry->hold, \
261 __entry->pincount, \
262 __entry->lockval, \
263 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
264 (void *)__entry->caller_ip) \
265)
266DEFINE_BUF_EVENT(xfs_buf_init);
267DEFINE_BUF_EVENT(xfs_buf_free);
268DEFINE_BUF_EVENT(xfs_buf_hold);
269DEFINE_BUF_EVENT(xfs_buf_rele);
270DEFINE_BUF_EVENT(xfs_buf_pin);
271DEFINE_BUF_EVENT(xfs_buf_unpin);
272DEFINE_BUF_EVENT(xfs_buf_iodone);
273DEFINE_BUF_EVENT(xfs_buf_iorequest);
274DEFINE_BUF_EVENT(xfs_buf_bawrite);
275DEFINE_BUF_EVENT(xfs_buf_bdwrite);
276DEFINE_BUF_EVENT(xfs_buf_lock);
277DEFINE_BUF_EVENT(xfs_buf_lock_done);
278DEFINE_BUF_EVENT(xfs_buf_cond_lock);
279DEFINE_BUF_EVENT(xfs_buf_unlock);
280DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
281DEFINE_BUF_EVENT(xfs_buf_iowait);
282DEFINE_BUF_EVENT(xfs_buf_iowait_done);
283DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
284DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
285DEFINE_BUF_EVENT(xfs_buf_delwri_split);
286DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
287DEFINE_BUF_EVENT(xfs_bdstrat_shut);
288DEFINE_BUF_EVENT(xfs_buf_item_relse);
289DEFINE_BUF_EVENT(xfs_buf_item_iodone);
290DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
291DEFINE_BUF_EVENT(xfs_buf_error_relse);
292DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
293DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
294
295/* not really buffer traces, but the buf provides useful information */
296DEFINE_BUF_EVENT(xfs_btree_corrupt);
297DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
298DEFINE_BUF_EVENT(xfs_reset_dqcounts);
299DEFINE_BUF_EVENT(xfs_inode_item_push);
300
301/* pass flags explicitly */
302#define DEFINE_BUF_FLAGS_EVENT(tname) \
303TRACE_EVENT(tname, \
304 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
305 TP_ARGS(bp, flags, caller_ip), \
306 TP_STRUCT__entry( \
307 __field(dev_t, dev) \
308 __field(xfs_daddr_t, bno) \
309 __field(size_t, buffer_length) \
310 __field(int, hold) \
311 __field(int, pincount) \
312 __field(unsigned, lockval) \
313 __field(unsigned, flags) \
314 __field(unsigned long, caller_ip) \
315 ), \
316 TP_fast_assign( \
317 __entry->dev = bp->b_target->bt_dev; \
318 __entry->bno = bp->b_bn; \
319 __entry->buffer_length = bp->b_buffer_length; \
320 __entry->flags = flags; \
321 __entry->hold = atomic_read(&bp->b_hold); \
322 __entry->pincount = atomic_read(&bp->b_pin_count); \
323 __entry->lockval = xfs_buf_lock_value(bp); \
324 __entry->caller_ip = caller_ip; \
325 ), \
326 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
327 "lock %d flags %s caller %pf", \
328 MAJOR(__entry->dev), MINOR(__entry->dev), \
329 (unsigned long long)__entry->bno, \
330 __entry->buffer_length, \
331 __entry->hold, \
332 __entry->pincount, \
333 __entry->lockval, \
334 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
335 (void *)__entry->caller_ip) \
336)
337DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
338DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
339DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
340
341TRACE_EVENT(xfs_buf_ioerror,
342 TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
343 TP_ARGS(bp, error, caller_ip),
344 TP_STRUCT__entry(
345 __field(dev_t, dev)
346 __field(xfs_daddr_t, bno)
347 __field(size_t, buffer_length)
348 __field(unsigned, flags)
349 __field(int, hold)
350 __field(int, pincount)
351 __field(unsigned, lockval)
352 __field(int, error)
353 __field(unsigned long, caller_ip)
354 ),
355 TP_fast_assign(
356 __entry->dev = bp->b_target->bt_dev;
357 __entry->bno = bp->b_bn;
358 __entry->buffer_length = bp->b_buffer_length;
359 __entry->hold = atomic_read(&bp->b_hold);
360 __entry->pincount = atomic_read(&bp->b_pin_count);
361 __entry->lockval = xfs_buf_lock_value(bp);
362 __entry->error = error;
363 __entry->flags = bp->b_flags;
364 __entry->caller_ip = caller_ip;
365 ),
366 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
367 "lock %d error %d flags %s caller %pf",
368 MAJOR(__entry->dev), MINOR(__entry->dev),
369 (unsigned long long)__entry->bno,
370 __entry->buffer_length,
371 __entry->hold,
372 __entry->pincount,
373 __entry->lockval,
374 __entry->error,
375 __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
376 (void *)__entry->caller_ip)
377);
378
379#define DEFINE_BUF_ITEM_EVENT(tname) \
380TRACE_EVENT(tname, \
381 TP_PROTO(struct xfs_buf_log_item *bip), \
382 TP_ARGS(bip), \
383 TP_STRUCT__entry( \
384 __field(dev_t, dev) \
385 __field(xfs_daddr_t, buf_bno) \
386 __field(size_t, buf_len) \
387 __field(int, buf_hold) \
388 __field(int, buf_pincount) \
389 __field(int, buf_lockval) \
390 __field(unsigned, buf_flags) \
391 __field(unsigned, bli_recur) \
392 __field(int, bli_refcount) \
393 __field(unsigned, bli_flags) \
394 __field(void *, li_desc) \
395 __field(unsigned, li_flags) \
396 ), \
397 TP_fast_assign( \
398 __entry->dev = bip->bli_buf->b_target->bt_dev; \
399 __entry->bli_flags = bip->bli_flags; \
400 __entry->bli_recur = bip->bli_recur; \
401 __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
402 __entry->buf_bno = bip->bli_buf->b_bn; \
403 __entry->buf_len = bip->bli_buf->b_buffer_length; \
404 __entry->buf_flags = bip->bli_buf->b_flags; \
405 __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
406 __entry->buf_pincount = \
407 atomic_read(&bip->bli_buf->b_pin_count); \
408 __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
409 __entry->li_desc = bip->bli_item.li_desc; \
410 __entry->li_flags = bip->bli_item.li_flags; \
411 ), \
412 TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
413 "lock %d flags %s recur %d refcount %d bliflags %s " \
414 "lidesc 0x%p liflags %s", \
415 MAJOR(__entry->dev), MINOR(__entry->dev), \
416 (unsigned long long)__entry->buf_bno, \
417 __entry->buf_len, \
418 __entry->buf_hold, \
419 __entry->buf_pincount, \
420 __entry->buf_lockval, \
421 __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
422 __entry->bli_recur, \
423 __entry->bli_refcount, \
424 __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
425 __entry->li_desc, \
426 __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
427)
428DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
429DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
430DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
431DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
432DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
433DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
434DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
435DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
436DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
437DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
438DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
439DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
440DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
441DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
442DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
443DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
444DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
445DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
446DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
447DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
448DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
449DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
450DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
451DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
452
453#define DEFINE_LOCK_EVENT(name) \
454TRACE_EVENT(name, \
455 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
456 unsigned long caller_ip), \
457 TP_ARGS(ip, lock_flags, caller_ip), \
458 TP_STRUCT__entry( \
459 __field(dev_t, dev) \
460 __field(xfs_ino_t, ino) \
461 __field(int, lock_flags) \
462 __field(unsigned long, caller_ip) \
463 ), \
464 TP_fast_assign( \
465 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
466 __entry->ino = ip->i_ino; \
467 __entry->lock_flags = lock_flags; \
468 __entry->caller_ip = caller_ip; \
469 ), \
470 TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
471 MAJOR(__entry->dev), MINOR(__entry->dev), \
472 __entry->ino, \
473 __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
474 (void *)__entry->caller_ip) \
475)
476
477DEFINE_LOCK_EVENT(xfs_ilock);
478DEFINE_LOCK_EVENT(xfs_ilock_nowait);
479DEFINE_LOCK_EVENT(xfs_ilock_demote);
480DEFINE_LOCK_EVENT(xfs_iunlock);
481
482#define DEFINE_IGET_EVENT(name) \
483TRACE_EVENT(name, \
484 TP_PROTO(struct xfs_inode *ip), \
485 TP_ARGS(ip), \
486 TP_STRUCT__entry( \
487 __field(dev_t, dev) \
488 __field(xfs_ino_t, ino) \
489 ), \
490 TP_fast_assign( \
491 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
492 __entry->ino = ip->i_ino; \
493 ), \
494 TP_printk("dev %d:%d ino 0x%llx", \
495 MAJOR(__entry->dev), MINOR(__entry->dev), \
496 __entry->ino) \
497)
498DEFINE_IGET_EVENT(xfs_iget_skip);
499DEFINE_IGET_EVENT(xfs_iget_reclaim);
500DEFINE_IGET_EVENT(xfs_iget_found);
501DEFINE_IGET_EVENT(xfs_iget_alloc);
502
503#define DEFINE_INODE_EVENT(name) \
504TRACE_EVENT(name, \
505 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
506 TP_ARGS(ip, caller_ip), \
507 TP_STRUCT__entry( \
508 __field(dev_t, dev) \
509 __field(xfs_ino_t, ino) \
510 __field(int, count) \
511 __field(unsigned long, caller_ip) \
512 ), \
513 TP_fast_assign( \
514 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
515 __entry->ino = ip->i_ino; \
516 __entry->count = atomic_read(&VFS_I(ip)->i_count); \
517 __entry->caller_ip = caller_ip; \
518 ), \
519 TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
520 MAJOR(__entry->dev), MINOR(__entry->dev), \
521 __entry->ino, \
522 __entry->count, \
523 (char *)__entry->caller_ip) \
524)
525DEFINE_INODE_EVENT(xfs_ihold);
526DEFINE_INODE_EVENT(xfs_irele);
527/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
528DEFINE_INODE_EVENT(xfs_inode);
529#define xfs_itrace_entry(ip) \
530 trace_xfs_inode(ip, _THIS_IP_)
531
532#define DEFINE_DQUOT_EVENT(tname) \
533TRACE_EVENT(tname, \
534 TP_PROTO(struct xfs_dquot *dqp), \
535 TP_ARGS(dqp), \
536 TP_STRUCT__entry( \
537 __field(dev_t, dev) \
538 __field(__be32, id) \
539 __field(unsigned, flags) \
540 __field(unsigned, nrefs) \
541 __field(unsigned long long, res_bcount) \
542 __field(unsigned long long, bcount) \
543 __field(unsigned long long, icount) \
544 __field(unsigned long long, blk_hardlimit) \
545 __field(unsigned long long, blk_softlimit) \
546 __field(unsigned long long, ino_hardlimit) \
547 __field(unsigned long long, ino_softlimit) \
548 ), \
549 TP_fast_assign( \
550 __entry->dev = dqp->q_mount->m_super->s_dev; \
551 __entry->id = dqp->q_core.d_id; \
552 __entry->flags = dqp->dq_flags; \
553 __entry->nrefs = dqp->q_nrefs; \
554 __entry->res_bcount = dqp->q_res_bcount; \
555 __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
556 __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
557 __entry->blk_hardlimit = \
558 be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
559 __entry->blk_softlimit = \
560 be64_to_cpu(dqp->q_core.d_blk_softlimit); \
561 __entry->ino_hardlimit = \
562 be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
563 __entry->ino_softlimit = \
564 be64_to_cpu(dqp->q_core.d_ino_softlimit); \
565 ), \
566 TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
567 "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
568 "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
569 MAJOR(__entry->dev), MINOR(__entry->dev), \
570 be32_to_cpu(__entry->id), \
571 __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
572 __entry->nrefs, \
573 __entry->res_bcount, \
574 __entry->bcount, \
575 __entry->blk_hardlimit, \
576 __entry->blk_softlimit, \
577 __entry->icount, \
578 __entry->ino_hardlimit, \
579 __entry->ino_softlimit) \
580)
581DEFINE_DQUOT_EVENT(xfs_dqadjust);
582DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
583DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
584DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
585DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
586DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
587DEFINE_DQUOT_EVENT(xfs_dqattach_found);
588DEFINE_DQUOT_EVENT(xfs_dqattach_get);
589DEFINE_DQUOT_EVENT(xfs_dqinit);
590DEFINE_DQUOT_EVENT(xfs_dqreuse);
591DEFINE_DQUOT_EVENT(xfs_dqalloc);
592DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
593DEFINE_DQUOT_EVENT(xfs_dqread);
594DEFINE_DQUOT_EVENT(xfs_dqread_fail);
595DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
596DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
597DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
598DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
599DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
600DEFINE_DQUOT_EVENT(xfs_dqget_hit);
601DEFINE_DQUOT_EVENT(xfs_dqget_miss);
602DEFINE_DQUOT_EVENT(xfs_dqput);
603DEFINE_DQUOT_EVENT(xfs_dqput_wait);
604DEFINE_DQUOT_EVENT(xfs_dqput_free);
605DEFINE_DQUOT_EVENT(xfs_dqrele);
606DEFINE_DQUOT_EVENT(xfs_dqflush);
607DEFINE_DQUOT_EVENT(xfs_dqflush_force);
608DEFINE_DQUOT_EVENT(xfs_dqflush_done);
609/* not really iget events, but we re-use the format */
610DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
611DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
612
613
614#define DEFINE_LOGGRANT_EVENT(tname) \
615TRACE_EVENT(tname, \
616 TP_PROTO(struct log *log, struct xlog_ticket *tic), \
617 TP_ARGS(log, tic), \
618 TP_STRUCT__entry( \
619 __field(dev_t, dev) \
620 __field(unsigned, trans_type) \
621 __field(char, ocnt) \
622 __field(char, cnt) \
623 __field(int, curr_res) \
624 __field(int, unit_res) \
625 __field(unsigned int, flags) \
626 __field(void *, reserve_headq) \
627 __field(void *, write_headq) \
628 __field(int, grant_reserve_cycle) \
629 __field(int, grant_reserve_bytes) \
630 __field(int, grant_write_cycle) \
631 __field(int, grant_write_bytes) \
632 __field(int, curr_cycle) \
633 __field(int, curr_block) \
634 __field(xfs_lsn_t, tail_lsn) \
635 ), \
636 TP_fast_assign( \
637 __entry->dev = log->l_mp->m_super->s_dev; \
638 __entry->trans_type = tic->t_trans_type; \
639 __entry->ocnt = tic->t_ocnt; \
640 __entry->cnt = tic->t_cnt; \
641 __entry->curr_res = tic->t_curr_res; \
642 __entry->unit_res = tic->t_unit_res; \
643 __entry->flags = tic->t_flags; \
644 __entry->reserve_headq = log->l_reserve_headq; \
645 __entry->write_headq = log->l_write_headq; \
646 __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
647 __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
648 __entry->grant_write_cycle = log->l_grant_write_cycle; \
649 __entry->grant_write_bytes = log->l_grant_write_bytes; \
650 __entry->curr_cycle = log->l_curr_cycle; \
651 __entry->curr_block = log->l_curr_block; \
652 __entry->tail_lsn = log->l_tail_lsn; \
653 ), \
654 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
655 "t_unit_res %u t_flags %s reserve_headq 0x%p " \
656 "write_headq 0x%p grant_reserve_cycle %d " \
657 "grant_reserve_bytes %d grant_write_cycle %d " \
658 "grant_write_bytes %d curr_cycle %d curr_block %d " \
659 "tail_cycle %d tail_block %d", \
660 MAJOR(__entry->dev), MINOR(__entry->dev), \
661 __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
662 __entry->ocnt, \
663 __entry->cnt, \
664 __entry->curr_res, \
665 __entry->unit_res, \
666 __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
667 __entry->reserve_headq, \
668 __entry->write_headq, \
669 __entry->grant_reserve_cycle, \
670 __entry->grant_reserve_bytes, \
671 __entry->grant_write_cycle, \
672 __entry->grant_write_bytes, \
673 __entry->curr_cycle, \
674 __entry->curr_block, \
675 CYCLE_LSN(__entry->tail_lsn), \
676 BLOCK_LSN(__entry->tail_lsn) \
677 ) \
678)
679DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
680DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
681DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
682DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
683DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
684DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
685DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
686DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
687DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
688DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
689DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
690DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
691DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
692DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
693DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
694DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
695DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
696DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
697DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
698DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
699DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
700DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
701DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
702DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
703
704#define DEFINE_RW_EVENT(name) \
705TRACE_EVENT(name, \
706 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
707 TP_ARGS(ip, count, offset, flags), \
708 TP_STRUCT__entry( \
709 __field(dev_t, dev) \
710 __field(xfs_ino_t, ino) \
711 __field(xfs_fsize_t, size) \
712 __field(xfs_fsize_t, new_size) \
713 __field(loff_t, offset) \
714 __field(size_t, count) \
715 __field(int, flags) \
716 ), \
717 TP_fast_assign( \
718 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
719 __entry->ino = ip->i_ino; \
720 __entry->size = ip->i_d.di_size; \
721 __entry->new_size = ip->i_new_size; \
722 __entry->offset = offset; \
723 __entry->count = count; \
724 __entry->flags = flags; \
725 ), \
726 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
727 "offset 0x%llx count 0x%zx ioflags %s", \
728 MAJOR(__entry->dev), MINOR(__entry->dev), \
729 __entry->ino, \
730 __entry->size, \
731 __entry->new_size, \
732 __entry->offset, \
733 __entry->count, \
734 __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
735)
736DEFINE_RW_EVENT(xfs_file_read);
737DEFINE_RW_EVENT(xfs_file_buffered_write);
738DEFINE_RW_EVENT(xfs_file_direct_write);
739DEFINE_RW_EVENT(xfs_file_splice_read);
740DEFINE_RW_EVENT(xfs_file_splice_write);
741
742
743#define DEFINE_PAGE_EVENT(name) \
744TRACE_EVENT(name, \
745 TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
746 TP_ARGS(inode, page, off), \
747 TP_STRUCT__entry( \
748 __field(dev_t, dev) \
749 __field(xfs_ino_t, ino) \
750 __field(pgoff_t, pgoff) \
751 __field(loff_t, size) \
752 __field(unsigned long, offset) \
753 __field(int, delalloc) \
754 __field(int, unmapped) \
755 __field(int, unwritten) \
756 ), \
757 TP_fast_assign( \
758 int delalloc = -1, unmapped = -1, unwritten = -1; \
759 \
760 if (page_has_buffers(page)) \
761 xfs_count_page_state(page, &delalloc, \
762 &unmapped, &unwritten); \
763 __entry->dev = inode->i_sb->s_dev; \
764 __entry->ino = XFS_I(inode)->i_ino; \
765 __entry->pgoff = page_offset(page); \
766 __entry->size = i_size_read(inode); \
767 __entry->offset = off; \
768 __entry->delalloc = delalloc; \
769 __entry->unmapped = unmapped; \
770 __entry->unwritten = unwritten; \
771 ), \
772 TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
773 "delalloc %d unmapped %d unwritten %d", \
774 MAJOR(__entry->dev), MINOR(__entry->dev), \
775 __entry->ino, \
776 __entry->pgoff, \
777 __entry->size, \
778 __entry->offset, \
779 __entry->delalloc, \
780 __entry->unmapped, \
781 __entry->unwritten) \
782)
783DEFINE_PAGE_EVENT(xfs_writepage);
784DEFINE_PAGE_EVENT(xfs_releasepage);
785DEFINE_PAGE_EVENT(xfs_invalidatepage);
786
787#define DEFINE_IOMAP_EVENT(name) \
788TRACE_EVENT(name, \
789 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
790 int flags, struct xfs_bmbt_irec *irec), \
791 TP_ARGS(ip, offset, count, flags, irec), \
792 TP_STRUCT__entry( \
793 __field(dev_t, dev) \
794 __field(xfs_ino_t, ino) \
795 __field(loff_t, size) \
796 __field(loff_t, new_size) \
797 __field(loff_t, offset) \
798 __field(size_t, count) \
799 __field(int, flags) \
800 __field(xfs_fileoff_t, startoff) \
801 __field(xfs_fsblock_t, startblock) \
802 __field(xfs_filblks_t, blockcount) \
803 ), \
804 TP_fast_assign( \
805 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
806 __entry->ino = ip->i_ino; \
807 __entry->size = ip->i_d.di_size; \
808 __entry->new_size = ip->i_new_size; \
809 __entry->offset = offset; \
810 __entry->count = count; \
811 __entry->flags = flags; \
812 __entry->startoff = irec ? irec->br_startoff : 0; \
813 __entry->startblock = irec ? irec->br_startblock : 0; \
814 __entry->blockcount = irec ? irec->br_blockcount : 0; \
815 ), \
816 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
817 "offset 0x%llx count %zd flags %s " \
818 "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
819 MAJOR(__entry->dev), MINOR(__entry->dev), \
820 __entry->ino, \
821 __entry->size, \
822 __entry->new_size, \
823 __entry->offset, \
824 __entry->count, \
825 __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
826 __entry->startoff, \
827 __entry->startblock, \
828 __entry->blockcount) \
829)
830DEFINE_IOMAP_EVENT(xfs_iomap_enter);
831DEFINE_IOMAP_EVENT(xfs_iomap_found);
832DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
833
834#define DEFINE_SIMPLE_IO_EVENT(name) \
835TRACE_EVENT(name, \
836 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
837 TP_ARGS(ip, offset, count), \
838 TP_STRUCT__entry( \
839 __field(dev_t, dev) \
840 __field(xfs_ino_t, ino) \
841 __field(loff_t, size) \
842 __field(loff_t, new_size) \
843 __field(loff_t, offset) \
844 __field(size_t, count) \
845 ), \
846 TP_fast_assign( \
847 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
848 __entry->ino = ip->i_ino; \
849 __entry->size = ip->i_d.di_size; \
850 __entry->new_size = ip->i_new_size; \
851 __entry->offset = offset; \
852 __entry->count = count; \
853 ), \
854 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
855 "offset 0x%llx count %zd", \
856 MAJOR(__entry->dev), MINOR(__entry->dev), \
857 __entry->ino, \
858 __entry->size, \
859 __entry->new_size, \
860 __entry->offset, \
861 __entry->count) \
862);
863DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
864DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
865
866
867TRACE_EVENT(xfs_itruncate_start,
868 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
869 xfs_off_t toss_start, xfs_off_t toss_finish),
870 TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
871 TP_STRUCT__entry(
872 __field(dev_t, dev)
873 __field(xfs_ino_t, ino)
874 __field(xfs_fsize_t, size)
875 __field(xfs_fsize_t, new_size)
876 __field(xfs_off_t, toss_start)
877 __field(xfs_off_t, toss_finish)
878 __field(int, flag)
879 ),
880 TP_fast_assign(
881 __entry->dev = VFS_I(ip)->i_sb->s_dev;
882 __entry->ino = ip->i_ino;
883 __entry->size = ip->i_d.di_size;
884 __entry->new_size = new_size;
885 __entry->toss_start = toss_start;
886 __entry->toss_finish = toss_finish;
887 __entry->flag = flag;
888 ),
889 TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
890 "toss start 0x%llx toss finish 0x%llx",
891 MAJOR(__entry->dev), MINOR(__entry->dev),
892 __entry->ino,
893 __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
894 __entry->size,
895 __entry->new_size,
896 __entry->toss_start,
897 __entry->toss_finish)
898);
899
900#define DEFINE_ITRUNC_EVENT(name) \
901TRACE_EVENT(name, \
902 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
903 TP_ARGS(ip, new_size), \
904 TP_STRUCT__entry( \
905 __field(dev_t, dev) \
906 __field(xfs_ino_t, ino) \
907 __field(xfs_fsize_t, size) \
908 __field(xfs_fsize_t, new_size) \
909 ), \
910 TP_fast_assign( \
911 __entry->dev = VFS_I(ip)->i_sb->s_dev; \
912 __entry->ino = ip->i_ino; \
913 __entry->size = ip->i_d.di_size; \
914 __entry->new_size = new_size; \
915 ), \
916 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
917 MAJOR(__entry->dev), MINOR(__entry->dev), \
918 __entry->ino, \
919 __entry->size, \
920 __entry->new_size) \
921)
922DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
923DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
924
925TRACE_EVENT(xfs_pagecache_inval,
926 TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
927 TP_ARGS(ip, start, finish),
928 TP_STRUCT__entry(
929 __field(dev_t, dev)
930 __field(xfs_ino_t, ino)
931 __field(xfs_fsize_t, size)
932 __field(xfs_off_t, start)
933 __field(xfs_off_t, finish)
934 ),
935 TP_fast_assign(
936 __entry->dev = VFS_I(ip)->i_sb->s_dev;
937 __entry->ino = ip->i_ino;
938 __entry->size = ip->i_d.di_size;
939 __entry->start = start;
940 __entry->finish = finish;
941 ),
942 TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
943 MAJOR(__entry->dev), MINOR(__entry->dev),
944 __entry->ino,
945 __entry->size,
946 __entry->start,
947 __entry->finish)
948);
949
950TRACE_EVENT(xfs_bunmap,
951 TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
952 int flags, unsigned long caller_ip),
953 TP_ARGS(ip, bno, len, flags, caller_ip),
954 TP_STRUCT__entry(
955 __field(dev_t, dev)
956 __field(xfs_ino_t, ino)
957 __field(xfs_fsize_t, size)
958 __field(xfs_fileoff_t, bno)
959 __field(xfs_filblks_t, len)
960 __field(unsigned long, caller_ip)
961 __field(int, flags)
962 ),
963 TP_fast_assign(
964 __entry->dev = VFS_I(ip)->i_sb->s_dev;
965 __entry->ino = ip->i_ino;
966 __entry->size = ip->i_d.di_size;
967 __entry->bno = bno;
968 __entry->len = len;
969 __entry->caller_ip = caller_ip;
970 __entry->flags = flags;
971 ),
972 TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
973 "flags %s caller %pf",
974 MAJOR(__entry->dev), MINOR(__entry->dev),
975 __entry->ino,
976 __entry->size,
977 __entry->bno,
978 __entry->len,
979 __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
980 (void *)__entry->caller_ip)
981
982);
983
984TRACE_EVENT(xfs_alloc_busy,
985 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
986 xfs_extlen_t len, int slot),
987 TP_ARGS(mp, agno, agbno, len, slot),
988 TP_STRUCT__entry(
989 __field(dev_t, dev)
990 __field(xfs_agnumber_t, agno)
991 __field(xfs_agblock_t, agbno)
992 __field(xfs_extlen_t, len)
993 __field(int, slot)
994 ),
995 TP_fast_assign(
996 __entry->dev = mp->m_super->s_dev;
997 __entry->agno = agno;
998 __entry->agbno = agbno;
999 __entry->len = len;
1000 __entry->slot = slot;
1001 ),
1002 TP_printk("dev %d:%d agno %u agbno %u len %u slot %d",
1003 MAJOR(__entry->dev), MINOR(__entry->dev),
1004 __entry->agno,
1005 __entry->agbno,
1006 __entry->len,
1007 __entry->slot)
1008
1009);
1010
1011#define XFS_BUSY_STATES \
1012 { 0, "found" }, \
1013 { 1, "missing" }
1014
1015TRACE_EVENT(xfs_alloc_unbusy,
1016 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
1017 int slot, int found),
1018 TP_ARGS(mp, agno, slot, found),
1019 TP_STRUCT__entry(
1020 __field(dev_t, dev)
1021 __field(xfs_agnumber_t, agno)
1022 __field(int, slot)
1023 __field(int, found)
1024 ),
1025 TP_fast_assign(
1026 __entry->dev = mp->m_super->s_dev;
1027 __entry->agno = agno;
1028 __entry->slot = slot;
1029 __entry->found = found;
1030 ),
1031 TP_printk("dev %d:%d agno %u slot %d %s",
1032 MAJOR(__entry->dev), MINOR(__entry->dev),
1033 __entry->agno,
1034 __entry->slot,
1035 __print_symbolic(__entry->found, XFS_BUSY_STATES))
1036);
1037
1038TRACE_EVENT(xfs_alloc_busysearch,
1039 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1040 xfs_extlen_t len, int found),
1041 TP_ARGS(mp, agno, agbno, len, found),
1042 TP_STRUCT__entry(
1043 __field(dev_t, dev)
1044 __field(xfs_agnumber_t, agno)
1045 __field(xfs_agblock_t, agbno)
1046 __field(xfs_extlen_t, len)
1047 __field(int, found)
1048 ),
1049 TP_fast_assign(
1050 __entry->dev = mp->m_super->s_dev;
1051 __entry->agno = agno;
1052 __entry->agbno = agbno;
1053 __entry->len = len;
1054 __entry->found = found;
1055 ),
1056 TP_printk("dev %d:%d agno %u agbno %u len %u %s",
1057 MAJOR(__entry->dev), MINOR(__entry->dev),
1058 __entry->agno,
1059 __entry->agbno,
1060 __entry->len,
1061 __print_symbolic(__entry->found, XFS_BUSY_STATES))
1062);
1063
1064TRACE_EVENT(xfs_agf,
1065 TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
1066 unsigned long caller_ip),
1067 TP_ARGS(mp, agf, flags, caller_ip),
1068 TP_STRUCT__entry(
1069 __field(dev_t, dev)
1070 __field(xfs_agnumber_t, agno)
1071 __field(int, flags)
1072 __field(__u32, length)
1073 __field(__u32, bno_root)
1074 __field(__u32, cnt_root)
1075 __field(__u32, bno_level)
1076 __field(__u32, cnt_level)
1077 __field(__u32, flfirst)
1078 __field(__u32, fllast)
1079 __field(__u32, flcount)
1080 __field(__u32, freeblks)
1081 __field(__u32, longest)
1082 __field(unsigned long, caller_ip)
1083 ),
1084 TP_fast_assign(
1085 __entry->dev = mp->m_super->s_dev;
1086 __entry->agno = be32_to_cpu(agf->agf_seqno),
1087 __entry->flags = flags;
1088 __entry->length = be32_to_cpu(agf->agf_length),
1089 __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
1090 __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
1091 __entry->bno_level =
1092 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
1093 __entry->cnt_level =
1094 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
1095 __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
1096 __entry->fllast = be32_to_cpu(agf->agf_fllast),
1097 __entry->flcount = be32_to_cpu(agf->agf_flcount),
1098 __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
1099 __entry->longest = be32_to_cpu(agf->agf_longest);
1100 __entry->caller_ip = caller_ip;
1101 ),
1102 TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
1103 "levels b %u c %u flfirst %u fllast %u flcount %u "
1104 "freeblks %u longest %u caller %pf",
1105 MAJOR(__entry->dev), MINOR(__entry->dev),
1106 __entry->agno,
1107 __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
1108 __entry->length,
1109 __entry->bno_root,
1110 __entry->cnt_root,
1111 __entry->bno_level,
1112 __entry->cnt_level,
1113 __entry->flfirst,
1114 __entry->fllast,
1115 __entry->flcount,
1116 __entry->freeblks,
1117 __entry->longest,
1118 (void *)__entry->caller_ip)
1119);
1120
1121TRACE_EVENT(xfs_free_extent,
1122 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
1123 xfs_extlen_t len, bool isfl, int haveleft, int haveright),
1124 TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
1125 TP_STRUCT__entry(
1126 __field(dev_t, dev)
1127 __field(xfs_agnumber_t, agno)
1128 __field(xfs_agblock_t, agbno)
1129 __field(xfs_extlen_t, len)
1130 __field(int, isfl)
1131 __field(int, haveleft)
1132 __field(int, haveright)
1133 ),
1134 TP_fast_assign(
1135 __entry->dev = mp->m_super->s_dev;
1136 __entry->agno = agno;
1137 __entry->agbno = agbno;
1138 __entry->len = len;
1139 __entry->isfl = isfl;
1140 __entry->haveleft = haveleft;
1141 __entry->haveright = haveright;
1142 ),
1143 TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
1144 MAJOR(__entry->dev), MINOR(__entry->dev),
1145 __entry->agno,
1146 __entry->agbno,
1147 __entry->len,
1148 __entry->isfl,
1149 __entry->haveleft ?
1150 (__entry->haveright ? "both" : "left") :
1151 (__entry->haveright ? "right" : "none"))
1152
1153);
1154
1155#define DEFINE_ALLOC_EVENT(name) \
1156TRACE_EVENT(name, \
1157 TP_PROTO(struct xfs_alloc_arg *args), \
1158 TP_ARGS(args), \
1159 TP_STRUCT__entry( \
1160 __field(dev_t, dev) \
1161 __field(xfs_agnumber_t, agno) \
1162 __field(xfs_agblock_t, agbno) \
1163 __field(xfs_extlen_t, minlen) \
1164 __field(xfs_extlen_t, maxlen) \
1165 __field(xfs_extlen_t, mod) \
1166 __field(xfs_extlen_t, prod) \
1167 __field(xfs_extlen_t, minleft) \
1168 __field(xfs_extlen_t, total) \
1169 __field(xfs_extlen_t, alignment) \
1170 __field(xfs_extlen_t, minalignslop) \
1171 __field(xfs_extlen_t, len) \
1172 __field(short, type) \
1173 __field(short, otype) \
1174 __field(char, wasdel) \
1175 __field(char, wasfromfl) \
1176 __field(char, isfl) \
1177 __field(char, userdata) \
1178 __field(xfs_fsblock_t, firstblock) \
1179 ), \
1180 TP_fast_assign( \
1181 __entry->dev = args->mp->m_super->s_dev; \
1182 __entry->agno = args->agno; \
1183 __entry->agbno = args->agbno; \
1184 __entry->minlen = args->minlen; \
1185 __entry->maxlen = args->maxlen; \
1186 __entry->mod = args->mod; \
1187 __entry->prod = args->prod; \
1188 __entry->minleft = args->minleft; \
1189 __entry->total = args->total; \
1190 __entry->alignment = args->alignment; \
1191 __entry->minalignslop = args->minalignslop; \
1192 __entry->len = args->len; \
1193 __entry->type = args->type; \
1194 __entry->otype = args->otype; \
1195 __entry->wasdel = args->wasdel; \
1196 __entry->wasfromfl = args->wasfromfl; \
1197 __entry->isfl = args->isfl; \
1198 __entry->userdata = args->userdata; \
1199 __entry->firstblock = args->firstblock; \
1200 ), \
1201 TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
1202 "prod %u minleft %u total %u alignment %u minalignslop %u " \
1203 "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
1204 "userdata %d firstblock 0x%llx", \
1205 MAJOR(__entry->dev), MINOR(__entry->dev), \
1206 __entry->agno, \
1207 __entry->agbno, \
1208 __entry->minlen, \
1209 __entry->maxlen, \
1210 __entry->mod, \
1211 __entry->prod, \
1212 __entry->minleft, \
1213 __entry->total, \
1214 __entry->alignment, \
1215 __entry->minalignslop, \
1216 __entry->len, \
1217 __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
1218 __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
1219 __entry->wasdel, \
1220 __entry->wasfromfl, \
1221 __entry->isfl, \
1222 __entry->userdata, \
1223 __entry->firstblock) \
1224)
1225
1226DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
1227DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
1228DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
1229DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
1230DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
1231DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
1232DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
1233DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
1234DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
1235DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
1236DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
1237DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
1238DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
1239DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
1240DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
1241DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
1242DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
1243DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
1244DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
1245DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
1246DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
1247
1248#define DEFINE_DIR2_TRACE(tname) \
1249TRACE_EVENT(tname, \
1250 TP_PROTO(struct xfs_da_args *args), \
1251 TP_ARGS(args), \
1252 TP_STRUCT__entry( \
1253 __field(dev_t, dev) \
1254 __field(xfs_ino_t, ino) \
1255 __dynamic_array(char, name, args->namelen) \
1256 __field(int, namelen) \
1257 __field(xfs_dahash_t, hashval) \
1258 __field(xfs_ino_t, inumber) \
1259 __field(int, op_flags) \
1260 ), \
1261 TP_fast_assign( \
1262 __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
1263 __entry->ino = args->dp->i_ino; \
1264 if (args->namelen) \
1265 memcpy(__get_str(name), args->name, args->namelen); \
1266 __entry->namelen = args->namelen; \
1267 __entry->hashval = args->hashval; \
1268 __entry->inumber = args->inumber; \
1269 __entry->op_flags = args->op_flags; \
1270 ), \
1271 TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
1272 "inumber 0x%llx op_flags %s", \
1273 MAJOR(__entry->dev), MINOR(__entry->dev), \
1274 __entry->ino, \
1275 __entry->namelen, \
1276 __entry->namelen ? __get_str(name) : NULL, \
1277 __entry->namelen, \
1278 __entry->hashval, \
1279 __entry->inumber, \
1280 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
1281)
1282DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
1283DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
1284DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
1285DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
1286DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
1287DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
1288DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
1289DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
1290DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
1291DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
1292DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
1293DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
1294DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
1295DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
1296DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
1297DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
1298DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
1299DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
1300DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
1301DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
1302DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
1303DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
1304DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
1305DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
1306DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
1307
1308#define DEFINE_DIR2_SPACE_TRACE(tname) \
1309TRACE_EVENT(tname, \
1310 TP_PROTO(struct xfs_da_args *args, int idx), \
1311 TP_ARGS(args, idx), \
1312 TP_STRUCT__entry( \
1313 __field(dev_t, dev) \
1314 __field(xfs_ino_t, ino) \
1315 __field(int, op_flags) \
1316 __field(int, idx) \
1317 ), \
1318 TP_fast_assign( \
1319 __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
1320 __entry->ino = args->dp->i_ino; \
1321 __entry->op_flags = args->op_flags; \
1322 __entry->idx = idx; \
1323 ), \
1324 TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
1325 MAJOR(__entry->dev), MINOR(__entry->dev), \
1326 __entry->ino, \
1327 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
1328 __entry->idx) \
1329)
1330DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
1331DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
1332DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
1333DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
1334
1335TRACE_EVENT(xfs_dir2_leafn_moveents,
1336 TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
1337 TP_ARGS(args, src_idx, dst_idx, count),
1338 TP_STRUCT__entry(
1339 __field(dev_t, dev)
1340 __field(xfs_ino_t, ino)
1341 __field(int, op_flags)
1342 __field(int, src_idx)
1343 __field(int, dst_idx)
1344 __field(int, count)
1345 ),
1346 TP_fast_assign(
1347 __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
1348 __entry->ino = args->dp->i_ino;
1349 __entry->op_flags = args->op_flags;
1350 __entry->src_idx = src_idx;
1351 __entry->dst_idx = dst_idx;
1352 __entry->count = count;
1353 ),
1354 TP_printk("dev %d:%d ino 0x%llx op_flags %s "
1355 "src_idx %d dst_idx %d count %d",
1356 MAJOR(__entry->dev), MINOR(__entry->dev),
1357 __entry->ino,
1358 __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
1359 __entry->src_idx,
1360 __entry->dst_idx,
1361 __entry->count)
1362);
1363
1364#endif /* _TRACE_XFS_H */
1365
1366#undef TRACE_INCLUDE_PATH
1367#define TRACE_INCLUDE_PATH .
1368#define TRACE_INCLUDE_FILE xfs_trace
1369#include <trace/define_trace.h>
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 00cabf5354d2..7c220b4227bc 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -39,6 +39,10 @@ struct attrlist_cursor_kern;
39#define IO_ISDIRECT 0x00004 /* bypass page cache */ 39#define IO_ISDIRECT 0x00004 /* bypass page cache */
40#define IO_INVIS 0x00020 /* don't update inode timestamps */ 40#define IO_INVIS 0x00020 /* don't update inode timestamps */
41 41
42#define XFS_IO_FLAGS \
43 { IO_ISDIRECT, "DIRECT" }, \
44 { IO_INVIS, "INVIS"}
45
42/* 46/*
43 * Flush/Invalidate options for vop_toss/flush/flushinval_pages. 47 * Flush/Invalidate options for vop_toss/flush/flushinval_pages.
44 */ 48 */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 2f3f2229eaaf..d7c7eea09fc2 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_trans_priv.h" 48#include "xfs_trans_priv.h"
49#include "xfs_qm.h" 49#include "xfs_qm.h"
50#include "xfs_trace.h"
50 51
51 52
52/* 53/*
@@ -112,10 +113,7 @@ xfs_qm_dqinit(
112 init_completion(&dqp->q_flush); 113 init_completion(&dqp->q_flush);
113 complete(&dqp->q_flush); 114 complete(&dqp->q_flush);
114 115
115#ifdef XFS_DQUOT_TRACE 116 trace_xfs_dqinit(dqp);
116 dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
117 xfs_dqtrace_entry(dqp, "DQINIT");
118#endif
119 } else { 117 } else {
120 /* 118 /*
121 * Only the q_core portion was zeroed in dqreclaim_one(). 119 * Only the q_core portion was zeroed in dqreclaim_one().
@@ -136,10 +134,7 @@ xfs_qm_dqinit(
136 dqp->q_hash = NULL; 134 dqp->q_hash = NULL;
137 ASSERT(dqp->dq_flnext == dqp->dq_flprev); 135 ASSERT(dqp->dq_flnext == dqp->dq_flprev);
138 136
139#ifdef XFS_DQUOT_TRACE 137 trace_xfs_dqreuse(dqp);
140 ASSERT(dqp->q_trace);
141 xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
142#endif
143 } 138 }
144 139
145 /* 140 /*
@@ -167,13 +162,8 @@ xfs_qm_dqdestroy(
167 162
168 mutex_destroy(&dqp->q_qlock); 163 mutex_destroy(&dqp->q_qlock);
169 sv_destroy(&dqp->q_pinwait); 164 sv_destroy(&dqp->q_pinwait);
170
171#ifdef XFS_DQUOT_TRACE
172 if (dqp->q_trace)
173 ktrace_free(dqp->q_trace);
174 dqp->q_trace = NULL;
175#endif
176 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); 165 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
166
177 atomic_dec(&xfs_Gqm->qm_totaldquots); 167 atomic_dec(&xfs_Gqm->qm_totaldquots);
178} 168}
179 169
@@ -195,49 +185,6 @@ xfs_qm_dqinit_core(
195 d->dd_diskdq.d_flags = type; 185 d->dd_diskdq.d_flags = type;
196} 186}
197 187
198
199#ifdef XFS_DQUOT_TRACE
200/*
201 * Dquot tracing for debugging.
202 */
203/* ARGSUSED */
204void
205__xfs_dqtrace_entry(
206 xfs_dquot_t *dqp,
207 char *func,
208 void *retaddr,
209 xfs_inode_t *ip)
210{
211 xfs_dquot_t *udqp = NULL;
212 xfs_ino_t ino = 0;
213
214 ASSERT(dqp->q_trace);
215 if (ip) {
216 ino = ip->i_ino;
217 udqp = ip->i_udquot;
218 }
219 ktrace_enter(dqp->q_trace,
220 (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
221 (void *)func,
222 (void *)(__psint_t)dqp->q_nrefs,
223 (void *)(__psint_t)dqp->dq_flags,
224 (void *)(__psint_t)dqp->q_res_bcount,
225 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
226 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
227 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
228 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
229 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
230 (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
231 (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
232 (void *)(__psint_t)current_pid(),
233 (void *)(__psint_t)ino,
234 (void *)(__psint_t)retaddr,
235 (void *)(__psint_t)udqp);
236 return;
237}
238#endif
239
240
241/* 188/*
242 * If default limits are in force, push them into the dquot now. 189 * If default limits are in force, push them into the dquot now.
243 * We overwrite the dquot limits only if they are zero and this 190 * We overwrite the dquot limits only if they are zero and this
@@ -425,7 +372,8 @@ xfs_qm_dqalloc(
425 xfs_trans_t *tp = *tpp; 372 xfs_trans_t *tp = *tpp;
426 373
427 ASSERT(tp != NULL); 374 ASSERT(tp != NULL);
428 xfs_dqtrace_entry(dqp, "DQALLOC"); 375
376 trace_xfs_dqalloc(dqp);
429 377
430 /* 378 /*
431 * Initialize the bmap freelist prior to calling bmapi code. 379 * Initialize the bmap freelist prior to calling bmapi code.
@@ -612,7 +560,8 @@ xfs_qm_dqtobp(
612 * (in which case we already have the buf). 560 * (in which case we already have the buf).
613 */ 561 */
614 if (! newdquot) { 562 if (! newdquot) {
615 xfs_dqtrace_entry(dqp, "DQTOBP READBUF"); 563 trace_xfs_dqtobp_read(dqp);
564
616 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, 565 if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
617 dqp->q_blkno, 566 dqp->q_blkno,
618 XFS_QI_DQCHUNKLEN(mp), 567 XFS_QI_DQCHUNKLEN(mp),
@@ -670,11 +619,12 @@ xfs_qm_dqread(
670 619
671 ASSERT(tpp); 620 ASSERT(tpp);
672 621
622 trace_xfs_dqread(dqp);
623
673 /* 624 /*
674 * get a pointer to the on-disk dquot and the buffer containing it 625 * get a pointer to the on-disk dquot and the buffer containing it
675 * dqp already knows its own type (GROUP/USER). 626 * dqp already knows its own type (GROUP/USER).
676 */ 627 */
677 xfs_dqtrace_entry(dqp, "DQREAD");
678 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { 628 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
679 return (error); 629 return (error);
680 } 630 }
@@ -763,7 +713,7 @@ xfs_qm_idtodq(
763 * or if the dquot didn't exist on disk and we ask to 713 * or if the dquot didn't exist on disk and we ask to
764 * allocate (ENOENT). 714 * allocate (ENOENT).
765 */ 715 */
766 xfs_dqtrace_entry(dqp, "DQREAD FAIL"); 716 trace_xfs_dqread_fail(dqp);
767 cancelflags |= XFS_TRANS_ABORT; 717 cancelflags |= XFS_TRANS_ABORT;
768 goto error0; 718 goto error0;
769 } 719 }
@@ -817,7 +767,8 @@ xfs_qm_dqlookup(
817 * id can't be modified without the hashlock anyway. 767 * id can't be modified without the hashlock anyway.
818 */ 768 */
819 if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { 769 if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
820 xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); 770 trace_xfs_dqlookup_found(dqp);
771
821 /* 772 /*
822 * All in core dquots must be on the dqlist of mp 773 * All in core dquots must be on the dqlist of mp
823 */ 774 */
@@ -827,7 +778,7 @@ xfs_qm_dqlookup(
827 if (dqp->q_nrefs == 0) { 778 if (dqp->q_nrefs == 0) {
828 ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); 779 ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
829 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { 780 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
830 xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); 781 trace_xfs_dqlookup_want(dqp);
831 782
832 /* 783 /*
833 * We may have raced with dqreclaim_one() 784 * We may have raced with dqreclaim_one()
@@ -857,8 +808,7 @@ xfs_qm_dqlookup(
857 /* 808 /*
858 * take it off the freelist 809 * take it off the freelist
859 */ 810 */
860 xfs_dqtrace_entry(dqp, 811 trace_xfs_dqlookup_freelist(dqp);
861 "DQLOOKUP: TAKEOFF FL");
862 XQM_FREELIST_REMOVE(dqp); 812 XQM_FREELIST_REMOVE(dqp);
863 /* xfs_qm_freelist_print(&(xfs_Gqm-> 813 /* xfs_qm_freelist_print(&(xfs_Gqm->
864 qm_dqfreelist), 814 qm_dqfreelist),
@@ -878,8 +828,7 @@ xfs_qm_dqlookup(
878 */ 828 */
879 ASSERT(mutex_is_locked(&qh->qh_lock)); 829 ASSERT(mutex_is_locked(&qh->qh_lock));
880 if (dqp->HL_PREVP != &qh->qh_next) { 830 if (dqp->HL_PREVP != &qh->qh_next) {
881 xfs_dqtrace_entry(dqp, 831 trace_xfs_dqlookup_move(dqp);
882 "DQLOOKUP: HASH MOVETOFRONT");
883 if ((d = dqp->HL_NEXT)) 832 if ((d = dqp->HL_NEXT))
884 d->HL_PREVP = dqp->HL_PREVP; 833 d->HL_PREVP = dqp->HL_PREVP;
885 *(dqp->HL_PREVP) = d; 834 *(dqp->HL_PREVP) = d;
@@ -889,7 +838,7 @@ xfs_qm_dqlookup(
889 dqp->HL_PREVP = &qh->qh_next; 838 dqp->HL_PREVP = &qh->qh_next;
890 qh->qh_next = dqp; 839 qh->qh_next = dqp;
891 } 840 }
892 xfs_dqtrace_entry(dqp, "LOOKUP END"); 841 trace_xfs_dqlookup_done(dqp);
893 *O_dqpp = dqp; 842 *O_dqpp = dqp;
894 ASSERT(mutex_is_locked(&qh->qh_lock)); 843 ASSERT(mutex_is_locked(&qh->qh_lock));
895 return (0); 844 return (0);
@@ -971,7 +920,7 @@ xfs_qm_dqget(
971 ASSERT(*O_dqpp); 920 ASSERT(*O_dqpp);
972 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); 921 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
973 mutex_unlock(&h->qh_lock); 922 mutex_unlock(&h->qh_lock);
974 xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); 923 trace_xfs_dqget_hit(*O_dqpp);
975 return (0); /* success */ 924 return (0); /* success */
976 } 925 }
977 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); 926 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
@@ -1104,7 +1053,7 @@ xfs_qm_dqget(
1104 mutex_unlock(&h->qh_lock); 1053 mutex_unlock(&h->qh_lock);
1105 dqret: 1054 dqret:
1106 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1055 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
1107 xfs_dqtrace_entry(dqp, "DQGET DONE"); 1056 trace_xfs_dqget_miss(dqp);
1108 *O_dqpp = dqp; 1057 *O_dqpp = dqp;
1109 return (0); 1058 return (0);
1110} 1059}
@@ -1124,7 +1073,8 @@ xfs_qm_dqput(
1124 1073
1125 ASSERT(dqp->q_nrefs > 0); 1074 ASSERT(dqp->q_nrefs > 0);
1126 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1075 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1127 xfs_dqtrace_entry(dqp, "DQPUT"); 1076
1077 trace_xfs_dqput(dqp);
1128 1078
1129 if (dqp->q_nrefs != 1) { 1079 if (dqp->q_nrefs != 1) {
1130 dqp->q_nrefs--; 1080 dqp->q_nrefs--;
@@ -1137,7 +1087,7 @@ xfs_qm_dqput(
1137 * in the right order; but try to get it out-of-order first 1087 * in the right order; but try to get it out-of-order first
1138 */ 1088 */
1139 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { 1089 if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
1140 xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); 1090 trace_xfs_dqput_wait(dqp);
1141 xfs_dqunlock(dqp); 1091 xfs_dqunlock(dqp);
1142 xfs_qm_freelist_lock(xfs_Gqm); 1092 xfs_qm_freelist_lock(xfs_Gqm);
1143 xfs_dqlock(dqp); 1093 xfs_dqlock(dqp);
@@ -1148,7 +1098,8 @@ xfs_qm_dqput(
1148 1098
1149 /* We can't depend on nrefs being == 1 here */ 1099 /* We can't depend on nrefs being == 1 here */
1150 if (--dqp->q_nrefs == 0) { 1100 if (--dqp->q_nrefs == 0) {
1151 xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); 1101 trace_xfs_dqput_free(dqp);
1102
1152 /* 1103 /*
1153 * insert at end of the freelist. 1104 * insert at end of the freelist.
1154 */ 1105 */
@@ -1196,7 +1147,7 @@ xfs_qm_dqrele(
1196 if (!dqp) 1147 if (!dqp)
1197 return; 1148 return;
1198 1149
1199 xfs_dqtrace_entry(dqp, "DQRELE"); 1150 trace_xfs_dqrele(dqp);
1200 1151
1201 xfs_dqlock(dqp); 1152 xfs_dqlock(dqp);
1202 /* 1153 /*
@@ -1229,7 +1180,7 @@ xfs_qm_dqflush(
1229 1180
1230 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1181 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1231 ASSERT(!completion_done(&dqp->q_flush)); 1182 ASSERT(!completion_done(&dqp->q_flush));
1232 xfs_dqtrace_entry(dqp, "DQFLUSH"); 1183 trace_xfs_dqflush(dqp);
1233 1184
1234 /* 1185 /*
1235 * If not dirty, or it's pinned and we are not supposed to 1186 * If not dirty, or it's pinned and we are not supposed to
@@ -1259,7 +1210,6 @@ xfs_qm_dqflush(
1259 * the ondisk-dquot has already been allocated for. 1210 * the ondisk-dquot has already been allocated for.
1260 */ 1211 */
1261 if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) { 1212 if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
1262 xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
1263 ASSERT(error != ENOENT); 1213 ASSERT(error != ENOENT);
1264 /* 1214 /*
1265 * Quotas could have gotten turned off (ESRCH) 1215 * Quotas could have gotten turned off (ESRCH)
@@ -1297,7 +1247,7 @@ xfs_qm_dqflush(
1297 * get stuck waiting in the write for too long. 1247 * get stuck waiting in the write for too long.
1298 */ 1248 */
1299 if (XFS_BUF_ISPINNED(bp)) { 1249 if (XFS_BUF_ISPINNED(bp)) {
1300 xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE"); 1250 trace_xfs_dqflush_force(dqp);
1301 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 1251 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
1302 } 1252 }
1303 1253
@@ -1308,7 +1258,9 @@ xfs_qm_dqflush(
1308 } else { 1258 } else {
1309 error = xfs_bwrite(mp, bp); 1259 error = xfs_bwrite(mp, bp);
1310 } 1260 }
1311 xfs_dqtrace_entry(dqp, "DQFLUSH END"); 1261
1262 trace_xfs_dqflush_done(dqp);
1263
1312 /* 1264 /*
1313 * dqp is still locked, but caller is free to unlock it now. 1265 * dqp is still locked, but caller is free to unlock it now.
1314 */ 1266 */
@@ -1483,7 +1435,7 @@ xfs_qm_dqpurge(
1483 */ 1435 */
1484 if (XFS_DQ_IS_DIRTY(dqp)) { 1436 if (XFS_DQ_IS_DIRTY(dqp)) {
1485 int error; 1437 int error;
1486 xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); 1438
1487 /* dqflush unlocks dqflock */ 1439 /* dqflush unlocks dqflock */
1488 /* 1440 /*
1489 * Given that dqpurge is a very rare occurrence, it is OK 1441 * Given that dqpurge is a very rare occurrence, it is OK
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a2c16bcee90b..a0f7da586d1b 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -85,9 +85,6 @@ typedef struct xfs_dquot {
85 struct completion q_flush; /* flush completion queue */ 85 struct completion q_flush; /* flush completion queue */
86 atomic_t q_pincount; /* dquot pin count */ 86 atomic_t q_pincount; /* dquot pin count */
87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ 87 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
88#ifdef XFS_DQUOT_TRACE
89 struct ktrace *q_trace; /* trace header structure */
90#endif
91} xfs_dquot_t; 88} xfs_dquot_t;
92 89
93 90
@@ -144,24 +141,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
144 (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ 141 (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
145 (XFS_IS_OQUOTA_ON((d)->q_mount)))) 142 (XFS_IS_OQUOTA_ON((d)->q_mount))))
146 143
147#ifdef XFS_DQUOT_TRACE
148/*
149 * Dquot Tracing stuff.
150 */
151#define DQUOT_TRACE_SIZE 64
152#define DQUOT_KTRACE_ENTRY 1
153
154extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
155 void *, xfs_inode_t *);
156#define xfs_dqtrace_entry_ino(a,b,ip) \
157 __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
158#define xfs_dqtrace_entry(a,b) \
159 __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
160#else
161#define xfs_dqtrace_entry(a,b)
162#define xfs_dqtrace_entry_ino(a,b,ip)
163#endif
164
165#ifdef QUOTADEBUG 144#ifdef QUOTADEBUG
166extern void xfs_qm_dqprint(xfs_dquot_t *); 145extern void xfs_qm_dqprint(xfs_dquot_t *);
167#else 146#else
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 45b1bfef7388..9e627a8b5b0e 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_qm.h" 49#include "xfs_qm.h"
50#include "xfs_trace.h"
50 51
51/* 52/*
52 * The global quota manager. There is only one of these for the entire 53 * The global quota manager. There is only one of these for the entire
@@ -453,7 +454,7 @@ again:
453 xfs_dqunlock(dqp); 454 xfs_dqunlock(dqp);
454 continue; 455 continue;
455 } 456 }
456 xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY"); 457
457 /* XXX a sentinel would be better */ 458 /* XXX a sentinel would be better */
458 recl = XFS_QI_MPLRECLAIMS(mp); 459 recl = XFS_QI_MPLRECLAIMS(mp);
459 if (!xfs_dqflock_nowait(dqp)) { 460 if (!xfs_dqflock_nowait(dqp)) {
@@ -651,7 +652,7 @@ xfs_qm_dqattach_one(
651 */ 652 */
652 dqp = *IO_idqpp; 653 dqp = *IO_idqpp;
653 if (dqp) { 654 if (dqp) {
654 xfs_dqtrace_entry(dqp, "DQATTACH: found in ip"); 655 trace_xfs_dqattach_found(dqp);
655 return 0; 656 return 0;
656 } 657 }
657 658
@@ -704,7 +705,7 @@ xfs_qm_dqattach_one(
704 if (error) 705 if (error)
705 return error; 706 return error;
706 707
707 xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget"); 708 trace_xfs_dqattach_get(dqp);
708 709
709 /* 710 /*
710 * dqget may have dropped and re-acquired the ilock, but it guarantees 711 * dqget may have dropped and re-acquired the ilock, but it guarantees
@@ -890,15 +891,15 @@ xfs_qm_dqdetach(
890 if (!(ip->i_udquot || ip->i_gdquot)) 891 if (!(ip->i_udquot || ip->i_gdquot))
891 return; 892 return;
892 893
894 trace_xfs_dquot_dqdetach(ip);
895
893 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); 896 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
894 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); 897 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
895 if (ip->i_udquot) { 898 if (ip->i_udquot) {
896 xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
897 xfs_qm_dqrele(ip->i_udquot); 899 xfs_qm_dqrele(ip->i_udquot);
898 ip->i_udquot = NULL; 900 ip->i_udquot = NULL;
899 } 901 }
900 if (ip->i_gdquot) { 902 if (ip->i_gdquot) {
901 xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
902 xfs_qm_dqrele(ip->i_gdquot); 903 xfs_qm_dqrele(ip->i_gdquot);
903 ip->i_gdquot = NULL; 904 ip->i_gdquot = NULL;
904 } 905 }
@@ -977,7 +978,6 @@ xfs_qm_sync(
977 * across a disk write 978 * across a disk write
978 */ 979 */
979 xfs_qm_mplist_unlock(mp); 980 xfs_qm_mplist_unlock(mp);
980 xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
981 error = xfs_qm_dqflush(dqp, flush_flags); 981 error = xfs_qm_dqflush(dqp, flush_flags);
982 xfs_dqunlock(dqp); 982 xfs_dqunlock(dqp);
983 if (error && XFS_FORCED_SHUTDOWN(mp)) 983 if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -1350,7 +1350,8 @@ xfs_qm_reset_dqcounts(
1350 xfs_disk_dquot_t *ddq; 1350 xfs_disk_dquot_t *ddq;
1351 int j; 1351 int j;
1352 1352
1353 xfs_buftrace("RESET DQUOTS", bp); 1353 trace_xfs_reset_dqcounts(bp, _RET_IP_);
1354
1354 /* 1355 /*
1355 * Reset all counters and timers. They'll be 1356 * Reset all counters and timers. They'll be
1356 * started afresh by xfs_qm_quotacheck. 1357 * started afresh by xfs_qm_quotacheck.
@@ -1543,7 +1544,9 @@ xfs_qm_quotacheck_dqadjust(
1543 xfs_qcnt_t rtblks) 1544 xfs_qcnt_t rtblks)
1544{ 1545{
1545 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1546 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1546 xfs_dqtrace_entry(dqp, "QCHECK DQADJUST"); 1547
1548 trace_xfs_dqadjust(dqp);
1549
1547 /* 1550 /*
1548 * Adjust the inode count and the block count to reflect this inode's 1551 * Adjust the inode count and the block count to reflect this inode's
1549 * resource usage. 1552 * resource usage.
@@ -1994,7 +1997,9 @@ xfs_qm_shake_freelist(
1994 */ 1997 */
1995 if (XFS_DQ_IS_DIRTY(dqp)) { 1998 if (XFS_DQ_IS_DIRTY(dqp)) {
1996 int error; 1999 int error;
1997 xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); 2000
2001 trace_xfs_dqshake_dirty(dqp);
2002
1998 /* 2003 /*
1999 * We flush it delayed write, so don't bother 2004 * We flush it delayed write, so don't bother
2000 * releasing the mplock. 2005 * releasing the mplock.
@@ -2038,7 +2043,9 @@ xfs_qm_shake_freelist(
2038 return nreclaimed; 2043 return nreclaimed;
2039 goto tryagain; 2044 goto tryagain;
2040 } 2045 }
2041 xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING"); 2046
2047 trace_xfs_dqshake_unlink(dqp);
2048
2042#ifdef QUOTADEBUG 2049#ifdef QUOTADEBUG
2043 cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n", 2050 cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
2044 dqp, be32_to_cpu(dqp->q_core.d_id)); 2051 dqp, be32_to_cpu(dqp->q_core.d_id));
@@ -2125,7 +2132,9 @@ xfs_qm_dqreclaim_one(void)
2125 */ 2132 */
2126 if (dqp->dq_flags & XFS_DQ_WANT) { 2133 if (dqp->dq_flags & XFS_DQ_WANT) {
2127 ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); 2134 ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
2128 xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT"); 2135
2136 trace_xfs_dqreclaim_want(dqp);
2137
2129 xfs_dqunlock(dqp); 2138 xfs_dqunlock(dqp);
2130 xfs_qm_freelist_unlock(xfs_Gqm); 2139 xfs_qm_freelist_unlock(xfs_Gqm);
2131 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) 2140 if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
@@ -2171,7 +2180,9 @@ xfs_qm_dqreclaim_one(void)
2171 */ 2180 */
2172 if (XFS_DQ_IS_DIRTY(dqp)) { 2181 if (XFS_DQ_IS_DIRTY(dqp)) {
2173 int error; 2182 int error;
2174 xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); 2183
2184 trace_xfs_dqreclaim_dirty(dqp);
2185
2175 /* 2186 /*
2176 * We flush it delayed write, so don't bother 2187 * We flush it delayed write, so don't bother
2177 * releasing the freelist lock. 2188 * releasing the freelist lock.
@@ -2194,8 +2205,9 @@ xfs_qm_dqreclaim_one(void)
2194 if (!mutex_trylock(&dqp->q_hash->qh_lock)) 2205 if (!mutex_trylock(&dqp->q_hash->qh_lock))
2195 goto mplistunlock; 2206 goto mplistunlock;
2196 2207
2208 trace_xfs_dqreclaim_unlink(dqp);
2209
2197 ASSERT(dqp->q_nrefs == 0); 2210 ASSERT(dqp->q_nrefs == 0);
2198 xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
2199 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp); 2211 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2200 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp); 2212 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
2201 XQM_FREELIST_REMOVE(dqp); 2213 XQM_FREELIST_REMOVE(dqp);
@@ -2430,7 +2442,7 @@ xfs_qm_vop_dqalloc(
2430 } 2442 }
2431 } 2443 }
2432 if (uq) 2444 if (uq)
2433 xfs_dqtrace_entry_ino(uq, "DQALLOC", ip); 2445 trace_xfs_dquot_dqalloc(ip);
2434 2446
2435 xfs_iunlock(ip, lockflags); 2447 xfs_iunlock(ip, lockflags);
2436 if (O_udqpp) 2448 if (O_udqpp)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 5d1a3b98a6e6..71af76fe8a23 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -49,6 +49,7 @@
49#include "xfs_buf_item.h" 49#include "xfs_buf_item.h"
50#include "xfs_utils.h" 50#include "xfs_utils.h"
51#include "xfs_qm.h" 51#include "xfs_qm.h"
52#include "xfs_trace.h"
52 53
53#ifdef DEBUG 54#ifdef DEBUG
54# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) 55# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
@@ -496,7 +497,6 @@ xfs_qm_scall_setqlim(
496 ASSERT(error != ENOENT); 497 ASSERT(error != ENOENT);
497 return (error); 498 return (error);
498 } 499 }
499 xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
500 xfs_trans_dqjoin(tp, dqp); 500 xfs_trans_dqjoin(tp, dqp);
501 ddq = &dqp->q_core; 501 ddq = &dqp->q_core;
502 502
@@ -602,7 +602,6 @@ xfs_qm_scall_setqlim(
602 dqp->dq_flags |= XFS_DQ_DIRTY; 602 dqp->dq_flags |= XFS_DQ_DIRTY;
603 xfs_trans_log_dquot(tp, dqp); 603 xfs_trans_log_dquot(tp, dqp);
604 604
605 xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
606 error = xfs_trans_commit(tp, 0); 605 error = xfs_trans_commit(tp, 0);
607 xfs_qm_dqprint(dqp); 606 xfs_qm_dqprint(dqp);
608 xfs_qm_dqrele(dqp); 607 xfs_qm_dqrele(dqp);
@@ -630,7 +629,6 @@ xfs_qm_scall_getquota(
630 return (error); 629 return (error);
631 } 630 }
632 631
633 xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
634 /* 632 /*
635 * If everything's NULL, this dquot doesn't quite exist as far as 633 * If everything's NULL, this dquot doesn't quite exist as far as
636 * our utility programs are concerned. 634 * our utility programs are concerned.
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
deleted file mode 100644
index 2d494c26717f..000000000000
--- a/fs/xfs/support/ktrace.c
+++ /dev/null
@@ -1,323 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <xfs.h>
19
20static kmem_zone_t *ktrace_hdr_zone;
21static kmem_zone_t *ktrace_ent_zone;
22static int ktrace_zentries;
23
24void __init
25ktrace_init(int zentries)
26{
27 ktrace_zentries = roundup_pow_of_two(zentries);
28
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr");
31 ASSERT(ktrace_hdr_zone);
32
33 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t),
35 "ktrace_ent");
36 ASSERT(ktrace_ent_zone);
37}
38
39void __exit
40ktrace_uninit(void)
41{
42 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_zone_destroy(ktrace_ent_zone);
44}
45
46/*
47 * ktrace_alloc()
48 *
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries. Round the number of entries up to a
51 * power of 2 so we can do fast masking to get the index from
52 * the atomic index counter.
53 */
54ktrace_t *
55ktrace_alloc(int nentries, unsigned int __nocast sleep)
56{
57 ktrace_t *ktp;
58 ktrace_entry_t *ktep;
59 int entries;
60
61 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
62
63 if (ktp == (ktrace_t*)NULL) {
64 /*
65 * KM_SLEEP callers don't expect failure.
66 */
67 if (sleep & KM_SLEEP)
68 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
69
70 return NULL;
71 }
72
73 /*
74 * Special treatment for buffers with the ktrace_zentries entries
75 */
76 entries = roundup_pow_of_two(nentries);
77 if (entries == ktrace_zentries) {
78 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
79 sleep);
80 } else {
81 ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
82 sleep | KM_LARGE);
83 }
84
85 if (ktep == NULL) {
86 /*
87 * KM_SLEEP callers don't expect failure.
88 */
89 if (sleep & KM_SLEEP)
90 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
91
92 kmem_free(ktp);
93
94 return NULL;
95 }
96
97 ktp->kt_entries = ktep;
98 ktp->kt_nentries = entries;
99 ASSERT(is_power_of_2(entries));
100 ktp->kt_index_mask = entries - 1;
101 atomic_set(&ktp->kt_index, 0);
102 ktp->kt_rollover = 0;
103 return ktp;
104}
105
106
107/*
108 * ktrace_free()
109 *
110 * Free up the ktrace header and buffer. It is up to the caller
111 * to ensure that no-one is referencing it.
112 */
113void
114ktrace_free(ktrace_t *ktp)
115{
116 if (ktp == (ktrace_t *)NULL)
117 return;
118
119 /*
120 * Special treatment for the Vnode trace buffer.
121 */
122 if (ktp->kt_nentries == ktrace_zentries)
123 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
124 else
125 kmem_free(ktp->kt_entries);
126
127 kmem_zone_free(ktrace_hdr_zone, ktp);
128}
129
130
131/*
132 * Enter the given values into the "next" entry in the trace buffer.
133 * kt_index is always the index of the next entry to be filled.
134 */
135void
136ktrace_enter(
137 ktrace_t *ktp,
138 void *val0,
139 void *val1,
140 void *val2,
141 void *val3,
142 void *val4,
143 void *val5,
144 void *val6,
145 void *val7,
146 void *val8,
147 void *val9,
148 void *val10,
149 void *val11,
150 void *val12,
151 void *val13,
152 void *val14,
153 void *val15)
154{
155 int index;
156 ktrace_entry_t *ktep;
157
158 ASSERT(ktp != NULL);
159
160 /*
161 * Grab an entry by pushing the index up to the next one.
162 */
163 index = atomic_add_return(1, &ktp->kt_index);
164 index = (index - 1) & ktp->kt_index_mask;
165 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
166 ktp->kt_rollover = 1;
167
168 ASSERT((index >= 0) && (index < ktp->kt_nentries));
169
170 ktep = &(ktp->kt_entries[index]);
171
172 ktep->val[0] = val0;
173 ktep->val[1] = val1;
174 ktep->val[2] = val2;
175 ktep->val[3] = val3;
176 ktep->val[4] = val4;
177 ktep->val[5] = val5;
178 ktep->val[6] = val6;
179 ktep->val[7] = val7;
180 ktep->val[8] = val8;
181 ktep->val[9] = val9;
182 ktep->val[10] = val10;
183 ktep->val[11] = val11;
184 ktep->val[12] = val12;
185 ktep->val[13] = val13;
186 ktep->val[14] = val14;
187 ktep->val[15] = val15;
188}
189
190/*
191 * Return the number of entries in the trace buffer.
192 */
193int
194ktrace_nentries(
195 ktrace_t *ktp)
196{
197 int index;
198 if (ktp == NULL)
199 return 0;
200
201 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
202 return (ktp->kt_rollover ? ktp->kt_nentries : index);
203}
204
205/*
206 * ktrace_first()
207 *
208 * This is used to find the start of the trace buffer.
209 * In conjunction with ktrace_next() it can be used to
210 * iterate through the entire trace buffer. This code does
211 * not do any locking because it is assumed that it is called
212 * from the debugger.
213 *
214 * The caller must pass in a pointer to a ktrace_snap
215 * structure in which we will keep some state used to
216 * iterate through the buffer. This state must not touched
217 * by any code outside of this module.
218 */
219ktrace_entry_t *
220ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
221{
222 ktrace_entry_t *ktep;
223 int index;
224 int nentries;
225
226 if (ktp->kt_rollover)
227 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
228 else
229 index = 0;
230
231 ktsp->ks_start = index;
232 ktep = &(ktp->kt_entries[index]);
233
234 nentries = ktrace_nentries(ktp);
235 index++;
236 if (index < nentries) {
237 ktsp->ks_index = index;
238 } else {
239 ktsp->ks_index = 0;
240 if (index > nentries)
241 ktep = NULL;
242 }
243 return ktep;
244}
245
246/*
247 * ktrace_next()
248 *
249 * This is used to iterate through the entries of the given
250 * trace buffer. The caller must pass in the ktrace_snap_t
251 * structure initialized by ktrace_first(). The return value
252 * will be either a pointer to the next ktrace_entry or NULL
253 * if all of the entries have been traversed.
254 */
255ktrace_entry_t *
256ktrace_next(
257 ktrace_t *ktp,
258 ktrace_snap_t *ktsp)
259{
260 int index;
261 ktrace_entry_t *ktep;
262
263 index = ktsp->ks_index;
264 if (index == ktsp->ks_start) {
265 ktep = NULL;
266 } else {
267 ktep = &ktp->kt_entries[index];
268 }
269
270 index++;
271 if (index == ktrace_nentries(ktp)) {
272 ktsp->ks_index = 0;
273 } else {
274 ktsp->ks_index = index;
275 }
276
277 return ktep;
278}
279
280/*
281 * ktrace_skip()
282 *
283 * Skip the next "count" entries and return the entry after that.
284 * Return NULL if this causes us to iterate past the beginning again.
285 */
286ktrace_entry_t *
287ktrace_skip(
288 ktrace_t *ktp,
289 int count,
290 ktrace_snap_t *ktsp)
291{
292 int index;
293 int new_index;
294 ktrace_entry_t *ktep;
295 int nentries = ktrace_nentries(ktp);
296
297 index = ktsp->ks_index;
298 new_index = index + count;
299 while (new_index >= nentries) {
300 new_index -= nentries;
301 }
302 if (index == ktsp->ks_start) {
303 /*
304 * We've iterated around to the start, so we're done.
305 */
306 ktep = NULL;
307 } else if ((new_index < index) && (index < ktsp->ks_index)) {
308 /*
309 * We've skipped past the start again, so we're done.
310 */
311 ktep = NULL;
312 ktsp->ks_index = ktsp->ks_start;
313 } else {
314 ktep = &(ktp->kt_entries[new_index]);
315 new_index++;
316 if (new_index == nentries) {
317 ktsp->ks_index = 0;
318 } else {
319 ktsp->ks_index = new_index;
320 }
321 }
322 return ktep;
323}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
deleted file mode 100644
index 741d6947ca60..000000000000
--- a/fs/xfs/support/ktrace.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_SUPPORT_KTRACE_H__
19#define __XFS_SUPPORT_KTRACE_H__
20
21/*
22 * Trace buffer entry structure.
23 */
24typedef struct ktrace_entry {
25 void *val[16];
26} ktrace_entry_t;
27
28/*
29 * Trace buffer header structure.
30 */
31typedef struct ktrace {
32 int kt_nentries; /* number of entries in trace buf */
33 atomic_t kt_index; /* current index in entries */
34 unsigned int kt_index_mask;
35 int kt_rollover;
36 ktrace_entry_t *kt_entries; /* buffer of entries */
37} ktrace_t;
38
39/*
40 * Trace buffer snapshot structure.
41 */
42typedef struct ktrace_snap {
43 int ks_start; /* kt_index at time of snap */
44 int ks_index; /* current index */
45} ktrace_snap_t;
46
47
48#ifdef CONFIG_XFS_TRACE
49
50extern void ktrace_init(int zentries);
51extern void ktrace_uninit(void);
52
53extern ktrace_t *ktrace_alloc(int, unsigned int __nocast);
54extern void ktrace_free(ktrace_t *);
55
56extern void ktrace_enter(
57 ktrace_t *,
58 void *,
59 void *,
60 void *,
61 void *,
62 void *,
63 void *,
64 void *,
65 void *,
66 void *,
67 void *,
68 void *,
69 void *,
70 void *,
71 void *,
72 void *,
73 void *);
74
75extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *);
76extern int ktrace_nentries(ktrace_t *);
77extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
78extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
79
80#else
81#define ktrace_init(x) do { } while (0)
82#define ktrace_uninit() do { } while (0)
83#endif /* CONFIG_XFS_TRACE */
84
85#endif /* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 17254b529c54..5ad8ad3a1dcd 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -25,21 +25,5 @@
25/* #define QUOTADEBUG 1 */ 25/* #define QUOTADEBUG 1 */
26#endif 26#endif
27 27
28#ifdef CONFIG_XFS_TRACE
29#define XFS_ALLOC_TRACE 1
30#define XFS_ATTR_TRACE 1
31#define XFS_BLI_TRACE 1
32#define XFS_BMAP_TRACE 1
33#define XFS_BTREE_TRACE 1
34#define XFS_DIR2_TRACE 1
35#define XFS_DQUOT_TRACE 1
36#define XFS_ILOCK_TRACE 1
37#define XFS_LOG_TRACE 1
38#define XFS_RW_TRACE 1
39#define XFS_BUF_TRACE 1
40#define XFS_INODE_TRACE 1
41#define XFS_FILESTREAMS_TRACE 1
42#endif
43
44#include <linux-2.6/xfs_linux.h> 28#include <linux-2.6/xfs_linux.h>
45#endif /* __XFS_H__ */ 29#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index a5d54bf4931b..6702bd865811 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -86,6 +86,20 @@ typedef struct xfs_agf {
86#define XFS_AGF_NUM_BITS 12 86#define XFS_AGF_NUM_BITS 12
87#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) 87#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
88 88
89#define XFS_AGF_FLAGS \
90 { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
91 { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
92 { XFS_AGF_SEQNO, "SEQNO" }, \
93 { XFS_AGF_LENGTH, "LENGTH" }, \
94 { XFS_AGF_ROOTS, "ROOTS" }, \
95 { XFS_AGF_LEVELS, "LEVELS" }, \
96 { XFS_AGF_FLFIRST, "FLFIRST" }, \
97 { XFS_AGF_FLLAST, "FLLAST" }, \
98 { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
99 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
100 { XFS_AGF_LONGEST, "LONGEST" }, \
101 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }
102
89/* disk block (xfs_daddr_t) in the AG */ 103/* disk block (xfs_daddr_t) in the AG */
90#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) 104#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
91#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) 105#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 2cf944eb796d..a1c65fc6d9c4 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -38,6 +38,7 @@
38#include "xfs_ialloc.h" 38#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 39#include "xfs_alloc.h"
40#include "xfs_error.h" 40#include "xfs_error.h"
41#include "xfs_trace.h"
41 42
42 43
43#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) 44#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
@@ -51,30 +52,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
51 xfs_agblock_t bno, 52 xfs_agblock_t bno,
52 xfs_extlen_t len); 53 xfs_extlen_t len);
53 54
54#if defined(XFS_ALLOC_TRACE)
55ktrace_t *xfs_alloc_trace_buf;
56
57#define TRACE_ALLOC(s,a) \
58 xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
59#define TRACE_FREE(s,a,b,x,f) \
60 xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
61#define TRACE_MODAGF(s,a,f) \
62 xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
63#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \
64 xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
65#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \
66 xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
67#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \
68 xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
69#else
70#define TRACE_ALLOC(s,a)
71#define TRACE_FREE(s,a,b,x,f)
72#define TRACE_MODAGF(s,a,f)
73#define TRACE_BUSY(s,a,ag,agb,l,sl,tp)
74#define TRACE_UNBUSY(fname,s,ag,sl,tp)
75#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
76#endif /* XFS_ALLOC_TRACE */
77
78/* 55/*
79 * Prototypes for per-ag allocation routines 56 * Prototypes for per-ag allocation routines
80 */ 57 */
@@ -498,124 +475,6 @@ xfs_alloc_read_agfl(
498 return 0; 475 return 0;
499} 476}
500 477
501#if defined(XFS_ALLOC_TRACE)
502/*
503 * Add an allocation trace entry for an alloc call.
504 */
505STATIC void
506xfs_alloc_trace_alloc(
507 const char *name, /* function tag string */
508 char *str, /* additional string */
509 xfs_alloc_arg_t *args, /* allocation argument structure */
510 int line) /* source line number */
511{
512 ktrace_enter(xfs_alloc_trace_buf,
513 (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
514 (void *)name,
515 (void *)str,
516 (void *)args->mp,
517 (void *)(__psunsigned_t)args->agno,
518 (void *)(__psunsigned_t)args->agbno,
519 (void *)(__psunsigned_t)args->minlen,
520 (void *)(__psunsigned_t)args->maxlen,
521 (void *)(__psunsigned_t)args->mod,
522 (void *)(__psunsigned_t)args->prod,
523 (void *)(__psunsigned_t)args->minleft,
524 (void *)(__psunsigned_t)args->total,
525 (void *)(__psunsigned_t)args->alignment,
526 (void *)(__psunsigned_t)args->len,
527 (void *)((((__psint_t)args->type) << 16) |
528 (__psint_t)args->otype),
529 (void *)(__psint_t)((args->wasdel << 3) |
530 (args->wasfromfl << 2) |
531 (args->isfl << 1) |
532 (args->userdata << 0)));
533}
534
535/*
536 * Add an allocation trace entry for a free call.
537 */
538STATIC void
539xfs_alloc_trace_free(
540 const char *name, /* function tag string */
541 char *str, /* additional string */
542 xfs_mount_t *mp, /* file system mount point */
543 xfs_agnumber_t agno, /* allocation group number */
544 xfs_agblock_t agbno, /* a.g. relative block number */
545 xfs_extlen_t len, /* length of extent */
546 int isfl, /* set if is freelist allocation/free */
547 int line) /* source line number */
548{
549 ktrace_enter(xfs_alloc_trace_buf,
550 (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
551 (void *)name,
552 (void *)str,
553 (void *)mp,
554 (void *)(__psunsigned_t)agno,
555 (void *)(__psunsigned_t)agbno,
556 (void *)(__psunsigned_t)len,
557 (void *)(__psint_t)isfl,
558 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
559}
560
561/*
562 * Add an allocation trace entry for modifying an agf.
563 */
564STATIC void
565xfs_alloc_trace_modagf(
566 const char *name, /* function tag string */
567 char *str, /* additional string */
568 xfs_mount_t *mp, /* file system mount point */
569 xfs_agf_t *agf, /* new agf value */
570 int flags, /* logging flags for agf */
571 int line) /* source line number */
572{
573 ktrace_enter(xfs_alloc_trace_buf,
574 (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
575 (void *)name,
576 (void *)str,
577 (void *)mp,
578 (void *)(__psint_t)flags,
579 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
580 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
581 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
582 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
583 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
584 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
585 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
586 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
587 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
588 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
589 (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
590}
591
592STATIC void
593xfs_alloc_trace_busy(
594 const char *name, /* function tag string */
595 char *str, /* additional string */
596 xfs_mount_t *mp, /* file system mount point */
597 xfs_agnumber_t agno, /* allocation group number */
598 xfs_agblock_t agbno, /* a.g. relative block number */
599 xfs_extlen_t len, /* length of extent */
600 int slot, /* perag Busy slot */
601 xfs_trans_t *tp,
602 int trtype, /* type: add, delete, search */
603 int line) /* source line number */
604{
605 ktrace_enter(xfs_alloc_trace_buf,
606 (void *)(__psint_t)(trtype | (line << 16)),
607 (void *)name,
608 (void *)str,
609 (void *)mp,
610 (void *)(__psunsigned_t)agno,
611 (void *)(__psunsigned_t)agbno,
612 (void *)(__psunsigned_t)len,
613 (void *)(__psint_t)slot,
614 (void *)tp,
615 NULL, NULL, NULL, NULL, NULL, NULL, NULL);
616}
617#endif /* XFS_ALLOC_TRACE */
618
619/* 478/*
620 * Allocation group level functions. 479 * Allocation group level functions.
621 */ 480 */
@@ -665,9 +524,6 @@ xfs_alloc_ag_vextent(
665 */ 524 */
666 if (args->agbno != NULLAGBLOCK) { 525 if (args->agbno != NULLAGBLOCK) {
667 xfs_agf_t *agf; /* allocation group freelist header */ 526 xfs_agf_t *agf; /* allocation group freelist header */
668#ifdef XFS_ALLOC_TRACE
669 xfs_mount_t *mp = args->mp;
670#endif
671 long slen = (long)args->len; 527 long slen = (long)args->len;
672 528
673 ASSERT(args->len >= args->minlen && args->len <= args->maxlen); 529 ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
@@ -682,7 +538,6 @@ xfs_alloc_ag_vextent(
682 args->pag->pagf_freeblks -= args->len; 538 args->pag->pagf_freeblks -= args->len;
683 ASSERT(be32_to_cpu(agf->agf_freeblks) <= 539 ASSERT(be32_to_cpu(agf->agf_freeblks) <=
684 be32_to_cpu(agf->agf_length)); 540 be32_to_cpu(agf->agf_length));
685 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
686 xfs_alloc_log_agf(args->tp, args->agbp, 541 xfs_alloc_log_agf(args->tp, args->agbp,
687 XFS_AGF_FREEBLKS); 542 XFS_AGF_FREEBLKS);
688 /* search the busylist for these blocks */ 543 /* search the busylist for these blocks */
@@ -792,13 +647,14 @@ xfs_alloc_ag_vextent_exact(
792 } 647 }
793 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 648 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
794 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
795 TRACE_ALLOC("normal", args); 650
651 trace_xfs_alloc_exact_done(args);
796 args->wasfromfl = 0; 652 args->wasfromfl = 0;
797 return 0; 653 return 0;
798 654
799error0: 655error0:
800 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 656 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
801 TRACE_ALLOC("error", args); 657 trace_xfs_alloc_exact_error(args);
802 return error; 658 return error;
803} 659}
804 660
@@ -958,7 +814,7 @@ xfs_alloc_ag_vextent_near(
958 args->len = blen; 814 args->len = blen;
959 if (!xfs_alloc_fix_minleft(args)) { 815 if (!xfs_alloc_fix_minleft(args)) {
960 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 816 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
961 TRACE_ALLOC("nominleft", args); 817 trace_xfs_alloc_near_nominleft(args);
962 return 0; 818 return 0;
963 } 819 }
964 blen = args->len; 820 blen = args->len;
@@ -981,7 +837,8 @@ xfs_alloc_ag_vextent_near(
981 goto error0; 837 goto error0;
982 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 838 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
983 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 839 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
984 TRACE_ALLOC("first", args); 840
841 trace_xfs_alloc_near_first(args);
985 return 0; 842 return 0;
986 } 843 }
987 /* 844 /*
@@ -1272,7 +1129,7 @@ xfs_alloc_ag_vextent_near(
1272 * If we couldn't get anything, give up. 1129 * If we couldn't get anything, give up.
1273 */ 1130 */
1274 if (bno_cur_lt == NULL && bno_cur_gt == NULL) { 1131 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1275 TRACE_ALLOC("neither", args); 1132 trace_xfs_alloc_size_neither(args);
1276 args->agbno = NULLAGBLOCK; 1133 args->agbno = NULLAGBLOCK;
1277 return 0; 1134 return 0;
1278 } 1135 }
@@ -1299,7 +1156,7 @@ xfs_alloc_ag_vextent_near(
1299 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1156 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1300 xfs_alloc_fix_len(args); 1157 xfs_alloc_fix_len(args);
1301 if (!xfs_alloc_fix_minleft(args)) { 1158 if (!xfs_alloc_fix_minleft(args)) {
1302 TRACE_ALLOC("nominleft", args); 1159 trace_xfs_alloc_near_nominleft(args);
1303 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1160 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1304 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1161 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1305 return 0; 1162 return 0;
@@ -1314,13 +1171,18 @@ xfs_alloc_ag_vextent_near(
1314 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1171 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1315 ltnew, rlen, XFSA_FIXUP_BNO_OK))) 1172 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1316 goto error0; 1173 goto error0;
1317 TRACE_ALLOC(j ? "gt" : "lt", args); 1174
1175 if (j)
1176 trace_xfs_alloc_near_greater(args);
1177 else
1178 trace_xfs_alloc_near_lesser(args);
1179
1318 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1180 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1319 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1181 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1320 return 0; 1182 return 0;
1321 1183
1322 error0: 1184 error0:
1323 TRACE_ALLOC("error", args); 1185 trace_xfs_alloc_near_error(args);
1324 if (cnt_cur != NULL) 1186 if (cnt_cur != NULL)
1325 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1187 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1326 if (bno_cur_lt != NULL) 1188 if (bno_cur_lt != NULL)
@@ -1371,7 +1233,7 @@ xfs_alloc_ag_vextent_size(
1371 goto error0; 1233 goto error0;
1372 if (i == 0 || flen == 0) { 1234 if (i == 0 || flen == 0) {
1373 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1235 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1374 TRACE_ALLOC("noentry", args); 1236 trace_xfs_alloc_size_noentry(args);
1375 return 0; 1237 return 0;
1376 } 1238 }
1377 ASSERT(i == 1); 1239 ASSERT(i == 1);
@@ -1448,7 +1310,7 @@ xfs_alloc_ag_vextent_size(
1448 xfs_alloc_fix_len(args); 1310 xfs_alloc_fix_len(args);
1449 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) { 1311 if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
1450 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1312 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1451 TRACE_ALLOC("nominleft", args); 1313 trace_xfs_alloc_size_nominleft(args);
1452 args->agbno = NULLAGBLOCK; 1314 args->agbno = NULLAGBLOCK;
1453 return 0; 1315 return 0;
1454 } 1316 }
@@ -1471,11 +1333,11 @@ xfs_alloc_ag_vextent_size(
1471 args->agbno + args->len <= 1333 args->agbno + args->len <=
1472 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1334 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1473 error0); 1335 error0);
1474 TRACE_ALLOC("normal", args); 1336 trace_xfs_alloc_size_done(args);
1475 return 0; 1337 return 0;
1476 1338
1477error0: 1339error0:
1478 TRACE_ALLOC("error", args); 1340 trace_xfs_alloc_size_error(args);
1479 if (cnt_cur) 1341 if (cnt_cur)
1480 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1342 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1481 if (bno_cur) 1343 if (bno_cur)
@@ -1534,7 +1396,7 @@ xfs_alloc_ag_vextent_small(
1534 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1396 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1535 error0); 1397 error0);
1536 args->wasfromfl = 1; 1398 args->wasfromfl = 1;
1537 TRACE_ALLOC("freelist", args); 1399 trace_xfs_alloc_small_freelist(args);
1538 *stat = 0; 1400 *stat = 0;
1539 return 0; 1401 return 0;
1540 } 1402 }
@@ -1556,17 +1418,17 @@ xfs_alloc_ag_vextent_small(
1556 */ 1418 */
1557 if (flen < args->minlen) { 1419 if (flen < args->minlen) {
1558 args->agbno = NULLAGBLOCK; 1420 args->agbno = NULLAGBLOCK;
1559 TRACE_ALLOC("notenough", args); 1421 trace_xfs_alloc_small_notenough(args);
1560 flen = 0; 1422 flen = 0;
1561 } 1423 }
1562 *fbnop = fbno; 1424 *fbnop = fbno;
1563 *flenp = flen; 1425 *flenp = flen;
1564 *stat = 1; 1426 *stat = 1;
1565 TRACE_ALLOC("normal", args); 1427 trace_xfs_alloc_small_done(args);
1566 return 0; 1428 return 0;
1567 1429
1568error0: 1430error0:
1569 TRACE_ALLOC("error", args); 1431 trace_xfs_alloc_small_error(args);
1570 return error; 1432 return error;
1571} 1433}
1572 1434
@@ -1809,17 +1671,14 @@ xfs_free_ag_extent(
1809 be32_to_cpu(agf->agf_freeblks) <= 1671 be32_to_cpu(agf->agf_freeblks) <=
1810 be32_to_cpu(agf->agf_length), 1672 be32_to_cpu(agf->agf_length),
1811 error0); 1673 error0);
1812 TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
1813 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 1674 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
1814 if (!isfl) 1675 if (!isfl)
1815 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); 1676 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1816 XFS_STATS_INC(xs_freex); 1677 XFS_STATS_INC(xs_freex);
1817 XFS_STATS_ADD(xs_freeb, len); 1678 XFS_STATS_ADD(xs_freeb, len);
1818 } 1679 }
1819 TRACE_FREE(haveleft ? 1680
1820 (haveright ? "both" : "left") : 1681 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1821 (haveright ? "right" : "none"),
1822 agno, bno, len, isfl);
1823 1682
1824 /* 1683 /*
1825 * Since blocks move to the free list without the coordination 1684 * Since blocks move to the free list without the coordination
@@ -1836,7 +1695,7 @@ xfs_free_ag_extent(
1836 return 0; 1695 return 0;
1837 1696
1838 error0: 1697 error0:
1839 TRACE_FREE("error", agno, bno, len, isfl); 1698 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1840 if (bno_cur) 1699 if (bno_cur)
1841 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1700 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1842 if (cnt_cur) 1701 if (cnt_cur)
@@ -2122,7 +1981,6 @@ xfs_alloc_get_freelist(
2122 logflags |= XFS_AGF_BTREEBLKS; 1981 logflags |= XFS_AGF_BTREEBLKS;
2123 } 1982 }
2124 1983
2125 TRACE_MODAGF(NULL, agf, logflags);
2126 xfs_alloc_log_agf(tp, agbp, logflags); 1984 xfs_alloc_log_agf(tp, agbp, logflags);
2127 *bnop = bno; 1985 *bnop = bno;
2128 1986
@@ -2165,6 +2023,8 @@ xfs_alloc_log_agf(
2165 sizeof(xfs_agf_t) 2023 sizeof(xfs_agf_t)
2166 }; 2024 };
2167 2025
2026 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2027
2168 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); 2028 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2169 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); 2029 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2170} 2030}
@@ -2230,13 +2090,11 @@ xfs_alloc_put_freelist(
2230 logflags |= XFS_AGF_BTREEBLKS; 2090 logflags |= XFS_AGF_BTREEBLKS;
2231 } 2091 }
2232 2092
2233 TRACE_MODAGF(NULL, agf, logflags);
2234 xfs_alloc_log_agf(tp, agbp, logflags); 2093 xfs_alloc_log_agf(tp, agbp, logflags);
2235 2094
2236 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2095 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2237 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2096 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2238 *blockp = cpu_to_be32(bno); 2097 *blockp = cpu_to_be32(bno);
2239 TRACE_MODAGF(NULL, agf, logflags);
2240 xfs_alloc_log_agf(tp, agbp, logflags); 2098 xfs_alloc_log_agf(tp, agbp, logflags);
2241 xfs_trans_log_buf(tp, agflbp, 2099 xfs_trans_log_buf(tp, agflbp,
2242 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), 2100 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
@@ -2399,7 +2257,7 @@ xfs_alloc_vextent(
2399 args->minlen > args->maxlen || args->minlen > agsize || 2257 args->minlen > args->maxlen || args->minlen > agsize ||
2400 args->mod >= args->prod) { 2258 args->mod >= args->prod) {
2401 args->fsbno = NULLFSBLOCK; 2259 args->fsbno = NULLFSBLOCK;
2402 TRACE_ALLOC("badargs", args); 2260 trace_xfs_alloc_vextent_badargs(args);
2403 return 0; 2261 return 0;
2404 } 2262 }
2405 minleft = args->minleft; 2263 minleft = args->minleft;
@@ -2418,12 +2276,12 @@ xfs_alloc_vextent(
2418 error = xfs_alloc_fix_freelist(args, 0); 2276 error = xfs_alloc_fix_freelist(args, 0);
2419 args->minleft = minleft; 2277 args->minleft = minleft;
2420 if (error) { 2278 if (error) {
2421 TRACE_ALLOC("nofix", args); 2279 trace_xfs_alloc_vextent_nofix(args);
2422 goto error0; 2280 goto error0;
2423 } 2281 }
2424 if (!args->agbp) { 2282 if (!args->agbp) {
2425 up_read(&mp->m_peraglock); 2283 up_read(&mp->m_peraglock);
2426 TRACE_ALLOC("noagbp", args); 2284 trace_xfs_alloc_vextent_noagbp(args);
2427 break; 2285 break;
2428 } 2286 }
2429 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2287 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
@@ -2488,7 +2346,7 @@ xfs_alloc_vextent(
2488 error = xfs_alloc_fix_freelist(args, flags); 2346 error = xfs_alloc_fix_freelist(args, flags);
2489 args->minleft = minleft; 2347 args->minleft = minleft;
2490 if (error) { 2348 if (error) {
2491 TRACE_ALLOC("nofix", args); 2349 trace_xfs_alloc_vextent_nofix(args);
2492 goto error0; 2350 goto error0;
2493 } 2351 }
2494 /* 2352 /*
@@ -2499,7 +2357,9 @@ xfs_alloc_vextent(
2499 goto error0; 2357 goto error0;
2500 break; 2358 break;
2501 } 2359 }
2502 TRACE_ALLOC("loopfailed", args); 2360
2361 trace_xfs_alloc_vextent_loopfailed(args);
2362
2503 /* 2363 /*
2504 * Didn't work, figure out the next iteration. 2364 * Didn't work, figure out the next iteration.
2505 */ 2365 */
@@ -2526,7 +2386,7 @@ xfs_alloc_vextent(
2526 if (args->agno == sagno) { 2386 if (args->agno == sagno) {
2527 if (no_min == 1) { 2387 if (no_min == 1) {
2528 args->agbno = NULLAGBLOCK; 2388 args->agbno = NULLAGBLOCK;
2529 TRACE_ALLOC("allfailed", args); 2389 trace_xfs_alloc_vextent_allfailed(args);
2530 break; 2390 break;
2531 } 2391 }
2532 if (flags == 0) { 2392 if (flags == 0) {
@@ -2642,16 +2502,16 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
2642 } 2502 }
2643 } 2503 }
2644 2504
2505 trace_xfs_alloc_busy(mp, agno, bno, len, n);
2506
2645 if (n < XFS_PAGB_NUM_SLOTS) { 2507 if (n < XFS_PAGB_NUM_SLOTS) {
2646 bsy = &mp->m_perag[agno].pagb_list[n]; 2508 bsy = &mp->m_perag[agno].pagb_list[n];
2647 mp->m_perag[agno].pagb_count++; 2509 mp->m_perag[agno].pagb_count++;
2648 TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
2649 bsy->busy_start = bno; 2510 bsy->busy_start = bno;
2650 bsy->busy_length = len; 2511 bsy->busy_length = len;
2651 bsy->busy_tp = tp; 2512 bsy->busy_tp = tp;
2652 xfs_trans_add_busy(tp, agno, n); 2513 xfs_trans_add_busy(tp, agno, n);
2653 } else { 2514 } else {
2654 TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
2655 /* 2515 /*
2656 * The busy list is full! Since it is now not possible to 2516 * The busy list is full! Since it is now not possible to
2657 * track the free block, make this a synchronous transaction 2517 * track the free block, make this a synchronous transaction
@@ -2678,12 +2538,12 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
2678 list = mp->m_perag[agno].pagb_list; 2538 list = mp->m_perag[agno].pagb_list;
2679 2539
2680 ASSERT(idx < XFS_PAGB_NUM_SLOTS); 2540 ASSERT(idx < XFS_PAGB_NUM_SLOTS);
2541
2542 trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
2543
2681 if (list[idx].busy_tp == tp) { 2544 if (list[idx].busy_tp == tp) {
2682 TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
2683 list[idx].busy_tp = NULL; 2545 list[idx].busy_tp = NULL;
2684 mp->m_perag[agno].pagb_count--; 2546 mp->m_perag[agno].pagb_count--;
2685 } else {
2686 TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
2687 } 2547 }
2688 2548
2689 spin_unlock(&mp->m_perag[agno].pagb_lock); 2549 spin_unlock(&mp->m_perag[agno].pagb_lock);
@@ -2724,24 +2584,22 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
2724 if ((bno > bend) || (uend < bsy->busy_start)) { 2584 if ((bno > bend) || (uend < bsy->busy_start)) {
2725 cnt--; 2585 cnt--;
2726 } else { 2586 } else {
2727 TRACE_BUSYSEARCH("xfs_alloc_search_busy",
2728 "found1", agno, bno, len, tp);
2729 break; 2587 break;
2730 } 2588 }
2731 } 2589 }
2732 } 2590 }
2733 2591
2592 trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
2593
2734 /* 2594 /*
2735 * If a block was found, force the log through the LSN of the 2595 * If a block was found, force the log through the LSN of the
2736 * transaction that freed the block 2596 * transaction that freed the block
2737 */ 2597 */
2738 if (cnt) { 2598 if (cnt) {
2739 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
2740 lsn = bsy->busy_tp->t_commit_lsn; 2599 lsn = bsy->busy_tp->t_commit_lsn;
2741 spin_unlock(&mp->m_perag[agno].pagb_lock); 2600 spin_unlock(&mp->m_perag[agno].pagb_lock);
2742 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); 2601 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
2743 } else { 2602 } else {
2744 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
2745 spin_unlock(&mp->m_perag[agno].pagb_lock); 2603 spin_unlock(&mp->m_perag[agno].pagb_lock);
2746 } 2604 }
2747} 2605}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index e704caee10df..599bffa39784 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -37,6 +37,15 @@ typedef enum xfs_alloctype
37 XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ 37 XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
38} xfs_alloctype_t; 38} xfs_alloctype_t;
39 39
40#define XFS_ALLOC_TYPES \
41 { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
42 { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
43 { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
44 { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
45 { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
46 { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
47 { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
48
40/* 49/*
41 * Flags for xfs_alloc_fix_freelist. 50 * Flags for xfs_alloc_fix_freelist.
42 */ 51 */
@@ -109,24 +118,6 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
109 118
110#ifdef __KERNEL__ 119#ifdef __KERNEL__
111 120
112#if defined(XFS_ALLOC_TRACE)
113/*
114 * Allocation tracing buffer size.
115 */
116#define XFS_ALLOC_TRACE_SIZE 4096
117extern ktrace_t *xfs_alloc_trace_buf;
118
119/*
120 * Types for alloc tracing.
121 */
122#define XFS_ALLOC_KTRACE_ALLOC 1
123#define XFS_ALLOC_KTRACE_FREE 2
124#define XFS_ALLOC_KTRACE_MODAGF 3
125#define XFS_ALLOC_KTRACE_BUSY 4
126#define XFS_ALLOC_KTRACE_UNBUSY 5
127#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
128#endif
129
130void 121void
131xfs_alloc_mark_busy(xfs_trans_t *tp, 122xfs_alloc_mark_busy(xfs_trans_t *tp,
132 xfs_agnumber_t agno, 123 xfs_agnumber_t agno,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index c10c3a292d30..adbd9141aea1 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -39,6 +39,7 @@
39#include "xfs_ialloc.h" 39#include "xfs_ialloc.h"
40#include "xfs_alloc.h" 40#include "xfs_alloc.h"
41#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
42 43
43 44
44STATIC struct xfs_btree_cur * 45STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 8fe6f6b78a4a..e953b6cfb2a8 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -47,6 +47,7 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_rw.h" 48#include "xfs_rw.h"
49#include "xfs_vnodeops.h" 49#include "xfs_vnodeops.h"
50#include "xfs_trace.h"
50 51
51/* 52/*
52 * xfs_attr.c 53 * xfs_attr.c
@@ -89,10 +90,6 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
89 90
90#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */ 91#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
91 92
92#if defined(XFS_ATTR_TRACE)
93ktrace_t *xfs_attr_trace_buf;
94#endif
95
96STATIC int 93STATIC int
97xfs_attr_name_to_xname( 94xfs_attr_name_to_xname(
98 struct xfs_name *xname, 95 struct xfs_name *xname,
@@ -640,7 +637,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
640 return EIO; 637 return EIO;
641 638
642 xfs_ilock(dp, XFS_ILOCK_SHARED); 639 xfs_ilock(dp, XFS_ILOCK_SHARED);
643 xfs_attr_trace_l_c("syscall start", context);
644 640
645 /* 641 /*
646 * Decide on what work routines to call based on the inode size. 642 * Decide on what work routines to call based on the inode size.
@@ -656,7 +652,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
656 } 652 }
657 653
658 xfs_iunlock(dp, XFS_ILOCK_SHARED); 654 xfs_iunlock(dp, XFS_ILOCK_SHARED);
659 xfs_attr_trace_l_c("syscall end", context);
660 655
661 return error; 656 return error;
662} 657}
@@ -702,7 +697,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
702 context->count * sizeof(alist->al_offset[0]); 697 context->count * sizeof(alist->al_offset[0]);
703 context->firstu -= ATTR_ENTSIZE(namelen); 698 context->firstu -= ATTR_ENTSIZE(namelen);
704 if (context->firstu < arraytop) { 699 if (context->firstu < arraytop) {
705 xfs_attr_trace_l_c("buffer full", context); 700 trace_xfs_attr_list_full(context);
706 alist->al_more = 1; 701 alist->al_more = 1;
707 context->seen_enough = 1; 702 context->seen_enough = 1;
708 return 1; 703 return 1;
@@ -714,7 +709,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
714 aep->a_name[namelen] = 0; 709 aep->a_name[namelen] = 0;
715 alist->al_offset[context->count++] = context->firstu; 710 alist->al_offset[context->count++] = context->firstu;
716 alist->al_count = context->count; 711 alist->al_count = context->count;
717 xfs_attr_trace_l_c("add", context); 712 trace_xfs_attr_list_add(context);
718 return 0; 713 return 0;
719} 714}
720 715
@@ -1853,7 +1848,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1853 node = bp->data; 1848 node = bp->data;
1854 switch (be16_to_cpu(node->hdr.info.magic)) { 1849 switch (be16_to_cpu(node->hdr.info.magic)) {
1855 case XFS_DA_NODE_MAGIC: 1850 case XFS_DA_NODE_MAGIC:
1856 xfs_attr_trace_l_cn("wrong blk", context, node); 1851 trace_xfs_attr_list_wrong_blk(context);
1857 xfs_da_brelse(NULL, bp); 1852 xfs_da_brelse(NULL, bp);
1858 bp = NULL; 1853 bp = NULL;
1859 break; 1854 break;
@@ -1861,20 +1856,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1861 leaf = bp->data; 1856 leaf = bp->data;
1862 if (cursor->hashval > be32_to_cpu(leaf->entries[ 1857 if (cursor->hashval > be32_to_cpu(leaf->entries[
1863 be16_to_cpu(leaf->hdr.count)-1].hashval)) { 1858 be16_to_cpu(leaf->hdr.count)-1].hashval)) {
1864 xfs_attr_trace_l_cl("wrong blk", 1859 trace_xfs_attr_list_wrong_blk(context);
1865 context, leaf);
1866 xfs_da_brelse(NULL, bp); 1860 xfs_da_brelse(NULL, bp);
1867 bp = NULL; 1861 bp = NULL;
1868 } else if (cursor->hashval <= 1862 } else if (cursor->hashval <=
1869 be32_to_cpu(leaf->entries[0].hashval)) { 1863 be32_to_cpu(leaf->entries[0].hashval)) {
1870 xfs_attr_trace_l_cl("maybe wrong blk", 1864 trace_xfs_attr_list_wrong_blk(context);
1871 context, leaf);
1872 xfs_da_brelse(NULL, bp); 1865 xfs_da_brelse(NULL, bp);
1873 bp = NULL; 1866 bp = NULL;
1874 } 1867 }
1875 break; 1868 break;
1876 default: 1869 default:
1877 xfs_attr_trace_l_c("wrong blk - ??", context); 1870 trace_xfs_attr_list_wrong_blk(context);
1878 xfs_da_brelse(NULL, bp); 1871 xfs_da_brelse(NULL, bp);
1879 bp = NULL; 1872 bp = NULL;
1880 } 1873 }
@@ -1919,8 +1912,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
1919 if (cursor->hashval 1912 if (cursor->hashval
1920 <= be32_to_cpu(btree->hashval)) { 1913 <= be32_to_cpu(btree->hashval)) {
1921 cursor->blkno = be32_to_cpu(btree->before); 1914 cursor->blkno = be32_to_cpu(btree->before);
1922 xfs_attr_trace_l_cb("descending", 1915 trace_xfs_attr_list_node_descend(context,
1923 context, btree); 1916 btree);
1924 break; 1917 break;
1925 } 1918 }
1926 } 1919 }
@@ -2270,85 +2263,3 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
2270 } 2263 }
2271 return(0); 2264 return(0);
2272} 2265}
2273
2274#if defined(XFS_ATTR_TRACE)
2275/*
2276 * Add a trace buffer entry for an attr_list context structure.
2277 */
2278void
2279xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
2280{
2281 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
2282 (__psunsigned_t)NULL,
2283 (__psunsigned_t)NULL,
2284 (__psunsigned_t)NULL);
2285}
2286
2287/*
2288 * Add a trace buffer entry for a context structure and a Btree node.
2289 */
2290void
2291xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
2292 struct xfs_da_intnode *node)
2293{
2294 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
2295 (__psunsigned_t)be16_to_cpu(node->hdr.count),
2296 (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
2297 (__psunsigned_t)be32_to_cpu(node->btree[
2298 be16_to_cpu(node->hdr.count)-1].hashval));
2299}
2300
2301/*
2302 * Add a trace buffer entry for a context structure and a Btree element.
2303 */
2304void
2305xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
2306 struct xfs_da_node_entry *btree)
2307{
2308 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
2309 (__psunsigned_t)be32_to_cpu(btree->hashval),
2310 (__psunsigned_t)be32_to_cpu(btree->before),
2311 (__psunsigned_t)NULL);
2312}
2313
2314/*
2315 * Add a trace buffer entry for a context structure and a leaf block.
2316 */
2317void
2318xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
2319 struct xfs_attr_leafblock *leaf)
2320{
2321 xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
2322 (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
2323 (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
2324 (__psunsigned_t)be32_to_cpu(leaf->entries[
2325 be16_to_cpu(leaf->hdr.count)-1].hashval));
2326}
2327
2328/*
2329 * Add a trace buffer entry for the arguments given to the routine,
2330 * generic form.
2331 */
2332void
2333xfs_attr_trace_enter(int type, char *where,
2334 struct xfs_attr_list_context *context,
2335 __psunsigned_t a13, __psunsigned_t a14,
2336 __psunsigned_t a15)
2337{
2338 ASSERT(xfs_attr_trace_buf);
2339 ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
2340 (void *)((__psunsigned_t)where),
2341 (void *)((__psunsigned_t)context->dp),
2342 (void *)((__psunsigned_t)context->cursor->hashval),
2343 (void *)((__psunsigned_t)context->cursor->blkno),
2344 (void *)((__psunsigned_t)context->cursor->offset),
2345 (void *)((__psunsigned_t)context->alist),
2346 (void *)((__psunsigned_t)context->bufsize),
2347 (void *)((__psunsigned_t)context->count),
2348 (void *)((__psunsigned_t)context->firstu),
2349 NULL,
2350 (void *)((__psunsigned_t)context->dupcnt),
2351 (void *)((__psunsigned_t)context->flags),
2352 (void *)a13, (void *)a14, (void *)a15);
2353}
2354#endif /* XFS_ATTR_TRACE */
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 12f0be3a73d4..59b410ce69a1 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -48,6 +48,16 @@ struct xfs_attr_list_context;
48#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */ 48#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
49#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */ 49#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
50 50
51#define XFS_ATTR_FLAGS \
52 { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
53 { ATTR_ROOT, "ROOT" }, \
54 { ATTR_TRUST, "TRUST" }, \
55 { ATTR_SECURE, "SECURE" }, \
56 { ATTR_CREATE, "CREATE" }, \
57 { ATTR_REPLACE, "REPLACE" }, \
58 { ATTR_KERNOTIME, "KERNOTIME" }, \
59 { ATTR_KERNOVAL, "KERNOVAL" }
60
51/* 61/*
52 * The maximum size (into the kernel or returned from the kernel) of an 62 * The maximum size (into the kernel or returned from the kernel) of an
53 * attribute value or the buffer used for an attr_list() call. Larger 63 * attribute value or the buffer used for an attr_list() call. Larger
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 0b687351293f..baf41b5af756 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -42,6 +42,7 @@
42#include "xfs_attr.h" 42#include "xfs_attr.h"
43#include "xfs_attr_leaf.h" 43#include "xfs_attr_leaf.h"
44#include "xfs_error.h" 44#include "xfs_error.h"
45#include "xfs_trace.h"
45 46
46/* 47/*
47 * xfs_attr_leaf.c 48 * xfs_attr_leaf.c
@@ -594,7 +595,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
594 cursor = context->cursor; 595 cursor = context->cursor;
595 ASSERT(cursor != NULL); 596 ASSERT(cursor != NULL);
596 597
597 xfs_attr_trace_l_c("sf start", context); 598 trace_xfs_attr_list_sf(context);
598 599
599 /* 600 /*
600 * If the buffer is large enough and the cursor is at the start, 601 * If the buffer is large enough and the cursor is at the start,
@@ -627,7 +628,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
627 return error; 628 return error;
628 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 629 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
629 } 630 }
630 xfs_attr_trace_l_c("sf big-gulp", context); 631 trace_xfs_attr_list_sf_all(context);
631 return(0); 632 return(0);
632 } 633 }
633 634
@@ -653,7 +654,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
653 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", 654 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
654 XFS_ERRLEVEL_LOW, 655 XFS_ERRLEVEL_LOW,
655 context->dp->i_mount, sfe); 656 context->dp->i_mount, sfe);
656 xfs_attr_trace_l_c("sf corrupted", context);
657 kmem_free(sbuf); 657 kmem_free(sbuf);
658 return XFS_ERROR(EFSCORRUPTED); 658 return XFS_ERROR(EFSCORRUPTED);
659 } 659 }
@@ -693,7 +693,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
693 } 693 }
694 if (i == nsbuf) { 694 if (i == nsbuf) {
695 kmem_free(sbuf); 695 kmem_free(sbuf);
696 xfs_attr_trace_l_c("blk end", context);
697 return(0); 696 return(0);
698 } 697 }
699 698
@@ -719,7 +718,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
719 } 718 }
720 719
721 kmem_free(sbuf); 720 kmem_free(sbuf);
722 xfs_attr_trace_l_c("sf E-O-F", context);
723 return(0); 721 return(0);
724} 722}
725 723
@@ -2323,7 +2321,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2323 cursor = context->cursor; 2321 cursor = context->cursor;
2324 cursor->initted = 1; 2322 cursor->initted = 1;
2325 2323
2326 xfs_attr_trace_l_cl("blk start", context, leaf); 2324 trace_xfs_attr_list_leaf(context);
2327 2325
2328 /* 2326 /*
2329 * Re-find our place in the leaf block if this is a new syscall. 2327 * Re-find our place in the leaf block if this is a new syscall.
@@ -2344,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2344 } 2342 }
2345 } 2343 }
2346 if (i == be16_to_cpu(leaf->hdr.count)) { 2344 if (i == be16_to_cpu(leaf->hdr.count)) {
2347 xfs_attr_trace_l_c("not found", context); 2345 trace_xfs_attr_list_notfound(context);
2348 return(0); 2346 return(0);
2349 } 2347 }
2350 } else { 2348 } else {
@@ -2419,7 +2417,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2419 break; 2417 break;
2420 cursor->offset++; 2418 cursor->offset++;
2421 } 2419 }
2422 xfs_attr_trace_l_cl("blk end", context, leaf); 2420 trace_xfs_attr_list_leaf_end(context);
2423 return(retval); 2421 return(retval);
2424} 2422}
2425 2423
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index ea22839caed2..76ab7b0cbb3a 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -25,8 +25,6 @@
25 * to fit into the literal area of the inode. 25 * to fit into the literal area of the inode.
26 */ 26 */
27 27
28struct xfs_inode;
29
30/* 28/*
31 * Entries are packed toward the top as tight as possible. 29 * Entries are packed toward the top as tight as possible.
32 */ 30 */
@@ -69,42 +67,4 @@ typedef struct xfs_attr_sf_sort {
69 (be16_to_cpu(((xfs_attr_shortform_t *) \ 67 (be16_to_cpu(((xfs_attr_shortform_t *) \
70 ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) 68 ((dp)->i_afp->if_u1.if_data))->hdr.totsize))
71 69
72#if defined(XFS_ATTR_TRACE)
73/*
74 * Kernel tracing support for attribute lists
75 */
76struct xfs_attr_list_context;
77struct xfs_da_intnode;
78struct xfs_da_node_entry;
79struct xfs_attr_leafblock;
80
81#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
82extern ktrace_t *xfs_attr_trace_buf;
83
84/*
85 * Trace record types.
86 */
87#define XFS_ATTR_KTRACE_L_C 1 /* context */
88#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */
89#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
90#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
91
92void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
93void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
94 struct xfs_da_intnode *node);
95void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
96 struct xfs_da_node_entry *btree);
97void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
98 struct xfs_attr_leafblock *leaf);
99void xfs_attr_trace_enter(int type, char *where,
100 struct xfs_attr_list_context *context,
101 __psunsigned_t a13, __psunsigned_t a14,
102 __psunsigned_t a15);
103#else
104#define xfs_attr_trace_l_c(w,c)
105#define xfs_attr_trace_l_cn(w,c,n)
106#define xfs_attr_trace_l_cb(w,c,b)
107#define xfs_attr_trace_l_cl(w,c,l)
108#endif /* XFS_ATTR_TRACE */
109
110#endif /* __XFS_ATTR_SF_H__ */ 70#endif /* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8971fb09d387..98251cdc52aa 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -54,6 +54,7 @@
54#include "xfs_buf_item.h" 54#include "xfs_buf_item.h"
55#include "xfs_filestream.h" 55#include "xfs_filestream.h"
56#include "xfs_vnodeops.h" 56#include "xfs_vnodeops.h"
57#include "xfs_trace.h"
57 58
58 59
59#ifdef DEBUG 60#ifdef DEBUG
@@ -272,71 +273,6 @@ xfs_bmap_isaeof(
272 int whichfork, /* data or attribute fork */ 273 int whichfork, /* data or attribute fork */
273 char *aeof); /* return value */ 274 char *aeof); /* return value */
274 275
275#ifdef XFS_BMAP_TRACE
276/*
277 * Add bmap trace entry prior to a call to xfs_iext_remove.
278 */
279STATIC void
280xfs_bmap_trace_delete(
281 const char *fname, /* function name */
282 char *desc, /* operation description */
283 xfs_inode_t *ip, /* incore inode pointer */
284 xfs_extnum_t idx, /* index of entry(entries) deleted */
285 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
286 int whichfork); /* data or attr fork */
287
288/*
289 * Add bmap trace entry prior to a call to xfs_iext_insert, or
290 * reading in the extents list from the disk (in the btree).
291 */
292STATIC void
293xfs_bmap_trace_insert(
294 const char *fname, /* function name */
295 char *desc, /* operation description */
296 xfs_inode_t *ip, /* incore inode pointer */
297 xfs_extnum_t idx, /* index of entry(entries) inserted */
298 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
299 xfs_bmbt_irec_t *r1, /* inserted record 1 */
300 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
301 int whichfork); /* data or attr fork */
302
303/*
304 * Add bmap trace entry after updating an extent record in place.
305 */
306STATIC void
307xfs_bmap_trace_post_update(
308 const char *fname, /* function name */
309 char *desc, /* operation description */
310 xfs_inode_t *ip, /* incore inode pointer */
311 xfs_extnum_t idx, /* index of entry updated */
312 int whichfork); /* data or attr fork */
313
314/*
315 * Add bmap trace entry prior to updating an extent record in place.
316 */
317STATIC void
318xfs_bmap_trace_pre_update(
319 const char *fname, /* function name */
320 char *desc, /* operation description */
321 xfs_inode_t *ip, /* incore inode pointer */
322 xfs_extnum_t idx, /* index of entry to be updated */
323 int whichfork); /* data or attr fork */
324
325#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
326 xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
327#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
328 xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
329#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
330 xfs_bmap_trace_post_update(__func__,d,ip,i,w)
331#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
332 xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
333#else
334#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
335#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
336#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
337#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
338#endif /* XFS_BMAP_TRACE */
339
340/* 276/*
341 * Compute the worst-case number of indirect blocks that will be used 277 * Compute the worst-case number of indirect blocks that will be used
342 * for ip's delayed extent of length "len". 278 * for ip's delayed extent of length "len".
@@ -363,18 +299,6 @@ xfs_bmap_validate_ret(
363#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) 299#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
364#endif /* DEBUG */ 300#endif /* DEBUG */
365 301
366#if defined(XFS_RW_TRACE)
367STATIC void
368xfs_bunmap_trace(
369 xfs_inode_t *ip,
370 xfs_fileoff_t bno,
371 xfs_filblks_t len,
372 int flags,
373 inst_t *ra);
374#else
375#define xfs_bunmap_trace(ip, bno, len, flags, ra)
376#endif /* XFS_RW_TRACE */
377
378STATIC int 302STATIC int
379xfs_bmap_count_tree( 303xfs_bmap_count_tree(
380 xfs_mount_t *mp, 304 xfs_mount_t *mp,
@@ -590,9 +514,9 @@ xfs_bmap_add_extent(
590 * already extents in the list. 514 * already extents in the list.
591 */ 515 */
592 if (nextents == 0) { 516 if (nextents == 0) {
593 XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL, 517 xfs_iext_insert(ip, 0, 1, new,
594 whichfork); 518 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
595 xfs_iext_insert(ifp, 0, 1, new); 519
596 ASSERT(cur == NULL); 520 ASSERT(cur == NULL);
597 ifp->if_lastex = 0; 521 ifp->if_lastex = 0;
598 if (!isnullstartblock(new->br_startblock)) { 522 if (!isnullstartblock(new->br_startblock)) {
@@ -759,26 +683,10 @@ xfs_bmap_add_extent_delay_real(
759 xfs_filblks_t temp=0; /* value for dnew calculations */ 683 xfs_filblks_t temp=0; /* value for dnew calculations */
760 xfs_filblks_t temp2=0;/* value for dnew calculations */ 684 xfs_filblks_t temp2=0;/* value for dnew calculations */
761 int tmp_rval; /* partial logging flags */ 685 int tmp_rval; /* partial logging flags */
762 enum { /* bit number definitions for state */
763 LEFT_CONTIG, RIGHT_CONTIG,
764 LEFT_FILLING, RIGHT_FILLING,
765 LEFT_DELAY, RIGHT_DELAY,
766 LEFT_VALID, RIGHT_VALID
767 };
768 686
769#define LEFT r[0] 687#define LEFT r[0]
770#define RIGHT r[1] 688#define RIGHT r[1]
771#define PREV r[2] 689#define PREV r[2]
772#define MASK(b) (1 << (b))
773#define MASK2(a,b) (MASK(a) | MASK(b))
774#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
775#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
776#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
777#define STATE_TEST(b) (state & MASK(b))
778#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
779 ((state &= ~MASK(b)), 0))
780#define SWITCH_STATE \
781 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
782 690
783 /* 691 /*
784 * Set up a bunch of variables to make the tests simpler. 692 * Set up a bunch of variables to make the tests simpler.
@@ -790,69 +698,80 @@ xfs_bmap_add_extent_delay_real(
790 new_endoff = new->br_startoff + new->br_blockcount; 698 new_endoff = new->br_startoff + new->br_blockcount;
791 ASSERT(PREV.br_startoff <= new->br_startoff); 699 ASSERT(PREV.br_startoff <= new->br_startoff);
792 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 700 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
701
793 /* 702 /*
794 * Set flags determining what part of the previous delayed allocation 703 * Set flags determining what part of the previous delayed allocation
795 * extent is being replaced by a real allocation. 704 * extent is being replaced by a real allocation.
796 */ 705 */
797 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); 706 if (PREV.br_startoff == new->br_startoff)
798 STATE_SET(RIGHT_FILLING, 707 state |= BMAP_LEFT_FILLING;
799 PREV.br_startoff + PREV.br_blockcount == new_endoff); 708 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
709 state |= BMAP_RIGHT_FILLING;
710
800 /* 711 /*
801 * Check and set flags if this segment has a left neighbor. 712 * Check and set flags if this segment has a left neighbor.
802 * Don't set contiguous if the combined extent would be too large. 713 * Don't set contiguous if the combined extent would be too large.
803 */ 714 */
804 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 715 if (idx > 0) {
716 state |= BMAP_LEFT_VALID;
805 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 717 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
806 STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); 718
719 if (isnullstartblock(LEFT.br_startblock))
720 state |= BMAP_LEFT_DELAY;
807 } 721 }
808 STATE_SET(LEFT_CONTIG, 722
809 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 723 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
810 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 724 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
811 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 725 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
812 LEFT.br_state == new->br_state && 726 LEFT.br_state == new->br_state &&
813 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); 727 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
728 state |= BMAP_LEFT_CONTIG;
729
814 /* 730 /*
815 * Check and set flags if this segment has a right neighbor. 731 * Check and set flags if this segment has a right neighbor.
816 * Don't set contiguous if the combined extent would be too large. 732 * Don't set contiguous if the combined extent would be too large.
817 * Also check for all-three-contiguous being too large. 733 * Also check for all-three-contiguous being too large.
818 */ 734 */
819 if (STATE_SET_TEST(RIGHT_VALID, 735 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
820 idx < 736 state |= BMAP_RIGHT_VALID;
821 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
822 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 737 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
823 STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); 738
739 if (isnullstartblock(RIGHT.br_startblock))
740 state |= BMAP_RIGHT_DELAY;
824 } 741 }
825 STATE_SET(RIGHT_CONTIG, 742
826 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 743 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
827 new_endoff == RIGHT.br_startoff && 744 new_endoff == RIGHT.br_startoff &&
828 new->br_startblock + new->br_blockcount == 745 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
829 RIGHT.br_startblock && 746 new->br_state == RIGHT.br_state &&
830 new->br_state == RIGHT.br_state && 747 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
831 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 748 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
832 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != 749 BMAP_RIGHT_FILLING)) !=
833 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || 750 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
834 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 751 BMAP_RIGHT_FILLING) ||
835 <= MAXEXTLEN)); 752 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
753 <= MAXEXTLEN))
754 state |= BMAP_RIGHT_CONTIG;
755
836 error = 0; 756 error = 0;
837 /* 757 /*
838 * Switch out based on the FILLING and CONTIG state bits. 758 * Switch out based on the FILLING and CONTIG state bits.
839 */ 759 */
840 switch (SWITCH_STATE) { 760 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
841 761 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
842 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 762 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
763 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
843 /* 764 /*
844 * Filling in all of a previously delayed allocation extent. 765 * Filling in all of a previously delayed allocation extent.
845 * The left and right neighbors are both contiguous with new. 766 * The left and right neighbors are both contiguous with new.
846 */ 767 */
847 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 768 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
848 XFS_DATA_FORK);
849 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 769 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
850 LEFT.br_blockcount + PREV.br_blockcount + 770 LEFT.br_blockcount + PREV.br_blockcount +
851 RIGHT.br_blockcount); 771 RIGHT.br_blockcount);
852 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 772 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
853 XFS_DATA_FORK); 773
854 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 774 xfs_iext_remove(ip, idx, 2, state);
855 xfs_iext_remove(ifp, idx, 2);
856 ip->i_df.if_lastex = idx - 1; 775 ip->i_df.if_lastex = idx - 1;
857 ip->i_d.di_nextents--; 776 ip->i_d.di_nextents--;
858 if (cur == NULL) 777 if (cur == NULL)
@@ -885,20 +804,18 @@ xfs_bmap_add_extent_delay_real(
885 RIGHT.br_blockcount; 804 RIGHT.br_blockcount;
886 break; 805 break;
887 806
888 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): 807 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
889 /* 808 /*
890 * Filling in all of a previously delayed allocation extent. 809 * Filling in all of a previously delayed allocation extent.
891 * The left neighbor is contiguous, the right is not. 810 * The left neighbor is contiguous, the right is not.
892 */ 811 */
893 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 812 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
894 XFS_DATA_FORK);
895 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 813 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
896 LEFT.br_blockcount + PREV.br_blockcount); 814 LEFT.br_blockcount + PREV.br_blockcount);
897 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 815 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
898 XFS_DATA_FORK); 816
899 ip->i_df.if_lastex = idx - 1; 817 ip->i_df.if_lastex = idx - 1;
900 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 818 xfs_iext_remove(ip, idx, 1, state);
901 xfs_iext_remove(ifp, idx, 1);
902 if (cur == NULL) 819 if (cur == NULL)
903 rval = XFS_ILOG_DEXT; 820 rval = XFS_ILOG_DEXT;
904 else { 821 else {
@@ -921,19 +838,19 @@ xfs_bmap_add_extent_delay_real(
921 PREV.br_blockcount; 838 PREV.br_blockcount;
922 break; 839 break;
923 840
924 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): 841 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
925 /* 842 /*
926 * Filling in all of a previously delayed allocation extent. 843 * Filling in all of a previously delayed allocation extent.
927 * The right neighbor is contiguous, the left is not. 844 * The right neighbor is contiguous, the left is not.
928 */ 845 */
929 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 846 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
930 xfs_bmbt_set_startblock(ep, new->br_startblock); 847 xfs_bmbt_set_startblock(ep, new->br_startblock);
931 xfs_bmbt_set_blockcount(ep, 848 xfs_bmbt_set_blockcount(ep,
932 PREV.br_blockcount + RIGHT.br_blockcount); 849 PREV.br_blockcount + RIGHT.br_blockcount);
933 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 850 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
851
934 ip->i_df.if_lastex = idx; 852 ip->i_df.if_lastex = idx;
935 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 853 xfs_iext_remove(ip, idx + 1, 1, state);
936 xfs_iext_remove(ifp, idx + 1, 1);
937 if (cur == NULL) 854 if (cur == NULL)
938 rval = XFS_ILOG_DEXT; 855 rval = XFS_ILOG_DEXT;
939 else { 856 else {
@@ -956,15 +873,16 @@ xfs_bmap_add_extent_delay_real(
956 RIGHT.br_blockcount; 873 RIGHT.br_blockcount;
957 break; 874 break;
958 875
959 case MASK2(LEFT_FILLING, RIGHT_FILLING): 876 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
960 /* 877 /*
961 * Filling in all of a previously delayed allocation extent. 878 * Filling in all of a previously delayed allocation extent.
962 * Neither the left nor right neighbors are contiguous with 879 * Neither the left nor right neighbors are contiguous with
963 * the new one. 880 * the new one.
964 */ 881 */
965 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 882 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
966 xfs_bmbt_set_startblock(ep, new->br_startblock); 883 xfs_bmbt_set_startblock(ep, new->br_startblock);
967 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 884 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
885
968 ip->i_df.if_lastex = idx; 886 ip->i_df.if_lastex = idx;
969 ip->i_d.di_nextents++; 887 ip->i_d.di_nextents++;
970 if (cur == NULL) 888 if (cur == NULL)
@@ -987,19 +905,20 @@ xfs_bmap_add_extent_delay_real(
987 temp2 = new->br_blockcount; 905 temp2 = new->br_blockcount;
988 break; 906 break;
989 907
990 case MASK2(LEFT_FILLING, LEFT_CONTIG): 908 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
991 /* 909 /*
992 * Filling in the first part of a previous delayed allocation. 910 * Filling in the first part of a previous delayed allocation.
993 * The left neighbor is contiguous. 911 * The left neighbor is contiguous.
994 */ 912 */
995 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 913 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
996 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 914 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
997 LEFT.br_blockcount + new->br_blockcount); 915 LEFT.br_blockcount + new->br_blockcount);
998 xfs_bmbt_set_startoff(ep, 916 xfs_bmbt_set_startoff(ep,
999 PREV.br_startoff + new->br_blockcount); 917 PREV.br_startoff + new->br_blockcount);
1000 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 918 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
919
1001 temp = PREV.br_blockcount - new->br_blockcount; 920 temp = PREV.br_blockcount - new->br_blockcount;
1002 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 921 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1003 xfs_bmbt_set_blockcount(ep, temp); 922 xfs_bmbt_set_blockcount(ep, temp);
1004 ip->i_df.if_lastex = idx - 1; 923 ip->i_df.if_lastex = idx - 1;
1005 if (cur == NULL) 924 if (cur == NULL)
@@ -1021,7 +940,7 @@ xfs_bmap_add_extent_delay_real(
1021 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 940 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1022 startblockval(PREV.br_startblock)); 941 startblockval(PREV.br_startblock));
1023 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 942 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1024 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 943 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1025 *dnew = temp; 944 *dnew = temp;
1026 /* DELTA: The boundary between two in-core extents moved. */ 945 /* DELTA: The boundary between two in-core extents moved. */
1027 temp = LEFT.br_startoff; 946 temp = LEFT.br_startoff;
@@ -1029,18 +948,16 @@ xfs_bmap_add_extent_delay_real(
1029 PREV.br_blockcount; 948 PREV.br_blockcount;
1030 break; 949 break;
1031 950
1032 case MASK(LEFT_FILLING): 951 case BMAP_LEFT_FILLING:
1033 /* 952 /*
1034 * Filling in the first part of a previous delayed allocation. 953 * Filling in the first part of a previous delayed allocation.
1035 * The left neighbor is not contiguous. 954 * The left neighbor is not contiguous.
1036 */ 955 */
1037 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 956 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1038 xfs_bmbt_set_startoff(ep, new_endoff); 957 xfs_bmbt_set_startoff(ep, new_endoff);
1039 temp = PREV.br_blockcount - new->br_blockcount; 958 temp = PREV.br_blockcount - new->br_blockcount;
1040 xfs_bmbt_set_blockcount(ep, temp); 959 xfs_bmbt_set_blockcount(ep, temp);
1041 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 960 xfs_iext_insert(ip, idx, 1, new, state);
1042 XFS_DATA_FORK);
1043 xfs_iext_insert(ifp, idx, 1, new);
1044 ip->i_df.if_lastex = idx; 961 ip->i_df.if_lastex = idx;
1045 ip->i_d.di_nextents++; 962 ip->i_d.di_nextents++;
1046 if (cur == NULL) 963 if (cur == NULL)
@@ -1071,27 +988,27 @@ xfs_bmap_add_extent_delay_real(
1071 (cur ? cur->bc_private.b.allocated : 0)); 988 (cur ? cur->bc_private.b.allocated : 0));
1072 ep = xfs_iext_get_ext(ifp, idx + 1); 989 ep = xfs_iext_get_ext(ifp, idx + 1);
1073 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 990 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1074 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); 991 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1075 *dnew = temp; 992 *dnew = temp;
1076 /* DELTA: One in-core extent is split in two. */ 993 /* DELTA: One in-core extent is split in two. */
1077 temp = PREV.br_startoff; 994 temp = PREV.br_startoff;
1078 temp2 = PREV.br_blockcount; 995 temp2 = PREV.br_blockcount;
1079 break; 996 break;
1080 997
1081 case MASK2(RIGHT_FILLING, RIGHT_CONTIG): 998 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1082 /* 999 /*
1083 * Filling in the last part of a previous delayed allocation. 1000 * Filling in the last part of a previous delayed allocation.
1084 * The right neighbor is contiguous with the new allocation. 1001 * The right neighbor is contiguous with the new allocation.
1085 */ 1002 */
1086 temp = PREV.br_blockcount - new->br_blockcount; 1003 temp = PREV.br_blockcount - new->br_blockcount;
1087 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1004 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1088 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1005 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1089 xfs_bmbt_set_blockcount(ep, temp); 1006 xfs_bmbt_set_blockcount(ep, temp);
1090 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1007 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1091 new->br_startoff, new->br_startblock, 1008 new->br_startoff, new->br_startblock,
1092 new->br_blockcount + RIGHT.br_blockcount, 1009 new->br_blockcount + RIGHT.br_blockcount,
1093 RIGHT.br_state); 1010 RIGHT.br_state);
1094 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1011 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1095 ip->i_df.if_lastex = idx + 1; 1012 ip->i_df.if_lastex = idx + 1;
1096 if (cur == NULL) 1013 if (cur == NULL)
1097 rval = XFS_ILOG_DEXT; 1014 rval = XFS_ILOG_DEXT;
@@ -1112,7 +1029,7 @@ xfs_bmap_add_extent_delay_real(
1112 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1029 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1113 startblockval(PREV.br_startblock)); 1030 startblockval(PREV.br_startblock));
1114 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1031 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1115 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1032 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1116 *dnew = temp; 1033 *dnew = temp;
1117 /* DELTA: The boundary between two in-core extents moved. */ 1034 /* DELTA: The boundary between two in-core extents moved. */
1118 temp = PREV.br_startoff; 1035 temp = PREV.br_startoff;
@@ -1120,17 +1037,15 @@ xfs_bmap_add_extent_delay_real(
1120 RIGHT.br_blockcount; 1037 RIGHT.br_blockcount;
1121 break; 1038 break;
1122 1039
1123 case MASK(RIGHT_FILLING): 1040 case BMAP_RIGHT_FILLING:
1124 /* 1041 /*
1125 * Filling in the last part of a previous delayed allocation. 1042 * Filling in the last part of a previous delayed allocation.
1126 * The right neighbor is not contiguous. 1043 * The right neighbor is not contiguous.
1127 */ 1044 */
1128 temp = PREV.br_blockcount - new->br_blockcount; 1045 temp = PREV.br_blockcount - new->br_blockcount;
1129 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1046 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1130 xfs_bmbt_set_blockcount(ep, temp); 1047 xfs_bmbt_set_blockcount(ep, temp);
1131 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1048 xfs_iext_insert(ip, idx + 1, 1, new, state);
1132 XFS_DATA_FORK);
1133 xfs_iext_insert(ifp, idx + 1, 1, new);
1134 ip->i_df.if_lastex = idx + 1; 1049 ip->i_df.if_lastex = idx + 1;
1135 ip->i_d.di_nextents++; 1050 ip->i_d.di_nextents++;
1136 if (cur == NULL) 1051 if (cur == NULL)
@@ -1161,7 +1076,7 @@ xfs_bmap_add_extent_delay_real(
1161 (cur ? cur->bc_private.b.allocated : 0)); 1076 (cur ? cur->bc_private.b.allocated : 0));
1162 ep = xfs_iext_get_ext(ifp, idx); 1077 ep = xfs_iext_get_ext(ifp, idx);
1163 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1078 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1164 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1079 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1165 *dnew = temp; 1080 *dnew = temp;
1166 /* DELTA: One in-core extent is split in two. */ 1081 /* DELTA: One in-core extent is split in two. */
1167 temp = PREV.br_startoff; 1082 temp = PREV.br_startoff;
@@ -1175,7 +1090,7 @@ xfs_bmap_add_extent_delay_real(
1175 * This case is avoided almost all the time. 1090 * This case is avoided almost all the time.
1176 */ 1091 */
1177 temp = new->br_startoff - PREV.br_startoff; 1092 temp = new->br_startoff - PREV.br_startoff;
1178 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1093 trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
1179 xfs_bmbt_set_blockcount(ep, temp); 1094 xfs_bmbt_set_blockcount(ep, temp);
1180 r[0] = *new; 1095 r[0] = *new;
1181 r[1].br_state = PREV.br_state; 1096 r[1].br_state = PREV.br_state;
@@ -1183,9 +1098,7 @@ xfs_bmap_add_extent_delay_real(
1183 r[1].br_startoff = new_endoff; 1098 r[1].br_startoff = new_endoff;
1184 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 1099 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1185 r[1].br_blockcount = temp2; 1100 r[1].br_blockcount = temp2;
1186 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1101 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1187 XFS_DATA_FORK);
1188 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1189 ip->i_df.if_lastex = idx + 1; 1102 ip->i_df.if_lastex = idx + 1;
1190 ip->i_d.di_nextents++; 1103 ip->i_d.di_nextents++;
1191 if (cur == NULL) 1104 if (cur == NULL)
@@ -1242,24 +1155,24 @@ xfs_bmap_add_extent_delay_real(
1242 } 1155 }
1243 ep = xfs_iext_get_ext(ifp, idx); 1156 ep = xfs_iext_get_ext(ifp, idx);
1244 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1157 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1245 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1158 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1246 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1159 trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
1247 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), 1160 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1248 nullstartblock((int)temp2)); 1161 nullstartblock((int)temp2));
1249 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1162 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
1250 *dnew = temp + temp2; 1163 *dnew = temp + temp2;
1251 /* DELTA: One in-core extent is split in three. */ 1164 /* DELTA: One in-core extent is split in three. */
1252 temp = PREV.br_startoff; 1165 temp = PREV.br_startoff;
1253 temp2 = PREV.br_blockcount; 1166 temp2 = PREV.br_blockcount;
1254 break; 1167 break;
1255 1168
1256 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1169 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1257 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1170 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1258 case MASK2(LEFT_FILLING, RIGHT_CONTIG): 1171 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1259 case MASK2(RIGHT_FILLING, LEFT_CONTIG): 1172 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1260 case MASK2(LEFT_CONTIG, RIGHT_CONTIG): 1173 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1261 case MASK(LEFT_CONTIG): 1174 case BMAP_LEFT_CONTIG:
1262 case MASK(RIGHT_CONTIG): 1175 case BMAP_RIGHT_CONTIG:
1263 /* 1176 /*
1264 * These cases are all impossible. 1177 * These cases are all impossible.
1265 */ 1178 */
@@ -1279,14 +1192,6 @@ done:
1279#undef LEFT 1192#undef LEFT
1280#undef RIGHT 1193#undef RIGHT
1281#undef PREV 1194#undef PREV
1282#undef MASK
1283#undef MASK2
1284#undef MASK3
1285#undef MASK4
1286#undef STATE_SET
1287#undef STATE_TEST
1288#undef STATE_SET_TEST
1289#undef SWITCH_STATE
1290} 1195}
1291 1196
1292/* 1197/*
@@ -1316,27 +1221,10 @@ xfs_bmap_add_extent_unwritten_real(
1316 int state = 0;/* state bits, accessed thru macros */ 1221 int state = 0;/* state bits, accessed thru macros */
1317 xfs_filblks_t temp=0; 1222 xfs_filblks_t temp=0;
1318 xfs_filblks_t temp2=0; 1223 xfs_filblks_t temp2=0;
1319 enum { /* bit number definitions for state */
1320 LEFT_CONTIG, RIGHT_CONTIG,
1321 LEFT_FILLING, RIGHT_FILLING,
1322 LEFT_DELAY, RIGHT_DELAY,
1323 LEFT_VALID, RIGHT_VALID
1324 };
1325 1224
1326#define LEFT r[0] 1225#define LEFT r[0]
1327#define RIGHT r[1] 1226#define RIGHT r[1]
1328#define PREV r[2] 1227#define PREV r[2]
1329#define MASK(b) (1 << (b))
1330#define MASK2(a,b) (MASK(a) | MASK(b))
1331#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
1332#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
1333#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1334#define STATE_TEST(b) (state & MASK(b))
1335#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1336 ((state &= ~MASK(b)), 0))
1337#define SWITCH_STATE \
1338 (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
1339
1340 /* 1228 /*
1341 * Set up a bunch of variables to make the tests simpler. 1229 * Set up a bunch of variables to make the tests simpler.
1342 */ 1230 */
@@ -1352,68 +1240,78 @@ xfs_bmap_add_extent_unwritten_real(
1352 new_endoff = new->br_startoff + new->br_blockcount; 1240 new_endoff = new->br_startoff + new->br_blockcount;
1353 ASSERT(PREV.br_startoff <= new->br_startoff); 1241 ASSERT(PREV.br_startoff <= new->br_startoff);
1354 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff); 1242 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1243
1355 /* 1244 /*
1356 * Set flags determining what part of the previous oldext allocation 1245 * Set flags determining what part of the previous oldext allocation
1357 * extent is being replaced by a newext allocation. 1246 * extent is being replaced by a newext allocation.
1358 */ 1247 */
1359 STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); 1248 if (PREV.br_startoff == new->br_startoff)
1360 STATE_SET(RIGHT_FILLING, 1249 state |= BMAP_LEFT_FILLING;
1361 PREV.br_startoff + PREV.br_blockcount == new_endoff); 1250 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1251 state |= BMAP_RIGHT_FILLING;
1252
1362 /* 1253 /*
1363 * Check and set flags if this segment has a left neighbor. 1254 * Check and set flags if this segment has a left neighbor.
1364 * Don't set contiguous if the combined extent would be too large. 1255 * Don't set contiguous if the combined extent would be too large.
1365 */ 1256 */
1366 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1257 if (idx > 0) {
1258 state |= BMAP_LEFT_VALID;
1367 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 1259 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1368 STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock)); 1260
1261 if (isnullstartblock(LEFT.br_startblock))
1262 state |= BMAP_LEFT_DELAY;
1369 } 1263 }
1370 STATE_SET(LEFT_CONTIG, 1264
1371 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 1265 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1372 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && 1266 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1373 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && 1267 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1374 LEFT.br_state == newext && 1268 LEFT.br_state == newext &&
1375 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1269 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1270 state |= BMAP_LEFT_CONTIG;
1271
1376 /* 1272 /*
1377 * Check and set flags if this segment has a right neighbor. 1273 * Check and set flags if this segment has a right neighbor.
1378 * Don't set contiguous if the combined extent would be too large. 1274 * Don't set contiguous if the combined extent would be too large.
1379 * Also check for all-three-contiguous being too large. 1275 * Also check for all-three-contiguous being too large.
1380 */ 1276 */
1381 if (STATE_SET_TEST(RIGHT_VALID, 1277 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1382 idx < 1278 state |= BMAP_RIGHT_VALID;
1383 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
1384 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 1279 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1385 STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock)); 1280 if (isnullstartblock(RIGHT.br_startblock))
1281 state |= BMAP_RIGHT_DELAY;
1386 } 1282 }
1387 STATE_SET(RIGHT_CONTIG, 1283
1388 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 1284 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1389 new_endoff == RIGHT.br_startoff && 1285 new_endoff == RIGHT.br_startoff &&
1390 new->br_startblock + new->br_blockcount == 1286 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1391 RIGHT.br_startblock && 1287 newext == RIGHT.br_state &&
1392 newext == RIGHT.br_state && 1288 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1393 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && 1289 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1394 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) != 1290 BMAP_RIGHT_FILLING)) !=
1395 MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) || 1291 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1396 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount 1292 BMAP_RIGHT_FILLING) ||
1397 <= MAXEXTLEN)); 1293 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1294 <= MAXEXTLEN))
1295 state |= BMAP_RIGHT_CONTIG;
1296
1398 /* 1297 /*
1399 * Switch out based on the FILLING and CONTIG state bits. 1298 * Switch out based on the FILLING and CONTIG state bits.
1400 */ 1299 */
1401 switch (SWITCH_STATE) { 1300 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1402 1301 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1403 case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1302 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1303 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1404 /* 1304 /*
1405 * Setting all of a previous oldext extent to newext. 1305 * Setting all of a previous oldext extent to newext.
1406 * The left and right neighbors are both contiguous with new. 1306 * The left and right neighbors are both contiguous with new.
1407 */ 1307 */
1408 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1308 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1409 XFS_DATA_FORK);
1410 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1309 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1411 LEFT.br_blockcount + PREV.br_blockcount + 1310 LEFT.br_blockcount + PREV.br_blockcount +
1412 RIGHT.br_blockcount); 1311 RIGHT.br_blockcount);
1413 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1312 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1414 XFS_DATA_FORK); 1313
1415 XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 1314 xfs_iext_remove(ip, idx, 2, state);
1416 xfs_iext_remove(ifp, idx, 2);
1417 ip->i_df.if_lastex = idx - 1; 1315 ip->i_df.if_lastex = idx - 1;
1418 ip->i_d.di_nextents -= 2; 1316 ip->i_d.di_nextents -= 2;
1419 if (cur == NULL) 1317 if (cur == NULL)
@@ -1450,20 +1348,18 @@ xfs_bmap_add_extent_unwritten_real(
1450 RIGHT.br_blockcount; 1348 RIGHT.br_blockcount;
1451 break; 1349 break;
1452 1350
1453 case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG): 1351 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1454 /* 1352 /*
1455 * Setting all of a previous oldext extent to newext. 1353 * Setting all of a previous oldext extent to newext.
1456 * The left neighbor is contiguous, the right is not. 1354 * The left neighbor is contiguous, the right is not.
1457 */ 1355 */
1458 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 1356 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1459 XFS_DATA_FORK);
1460 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1357 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1461 LEFT.br_blockcount + PREV.br_blockcount); 1358 LEFT.br_blockcount + PREV.br_blockcount);
1462 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 1359 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1463 XFS_DATA_FORK); 1360
1464 ip->i_df.if_lastex = idx - 1; 1361 ip->i_df.if_lastex = idx - 1;
1465 XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 1362 xfs_iext_remove(ip, idx, 1, state);
1466 xfs_iext_remove(ifp, idx, 1);
1467 ip->i_d.di_nextents--; 1363 ip->i_d.di_nextents--;
1468 if (cur == NULL) 1364 if (cur == NULL)
1469 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1365 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1492,21 +1388,18 @@ xfs_bmap_add_extent_unwritten_real(
1492 PREV.br_blockcount; 1388 PREV.br_blockcount;
1493 break; 1389 break;
1494 1390
1495 case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG): 1391 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1496 /* 1392 /*
1497 * Setting all of a previous oldext extent to newext. 1393 * Setting all of a previous oldext extent to newext.
1498 * The right neighbor is contiguous, the left is not. 1394 * The right neighbor is contiguous, the left is not.
1499 */ 1395 */
1500 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, 1396 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1501 XFS_DATA_FORK);
1502 xfs_bmbt_set_blockcount(ep, 1397 xfs_bmbt_set_blockcount(ep,
1503 PREV.br_blockcount + RIGHT.br_blockcount); 1398 PREV.br_blockcount + RIGHT.br_blockcount);
1504 xfs_bmbt_set_state(ep, newext); 1399 xfs_bmbt_set_state(ep, newext);
1505 XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, 1400 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1506 XFS_DATA_FORK);
1507 ip->i_df.if_lastex = idx; 1401 ip->i_df.if_lastex = idx;
1508 XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 1402 xfs_iext_remove(ip, idx + 1, 1, state);
1509 xfs_iext_remove(ifp, idx + 1, 1);
1510 ip->i_d.di_nextents--; 1403 ip->i_d.di_nextents--;
1511 if (cur == NULL) 1404 if (cur == NULL)
1512 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1405 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1535,17 +1428,16 @@ xfs_bmap_add_extent_unwritten_real(
1535 RIGHT.br_blockcount; 1428 RIGHT.br_blockcount;
1536 break; 1429 break;
1537 1430
1538 case MASK2(LEFT_FILLING, RIGHT_FILLING): 1431 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1539 /* 1432 /*
1540 * Setting all of a previous oldext extent to newext. 1433 * Setting all of a previous oldext extent to newext.
1541 * Neither the left nor right neighbors are contiguous with 1434 * Neither the left nor right neighbors are contiguous with
1542 * the new one. 1435 * the new one.
1543 */ 1436 */
1544 XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, 1437 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1545 XFS_DATA_FORK);
1546 xfs_bmbt_set_state(ep, newext); 1438 xfs_bmbt_set_state(ep, newext);
1547 XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, 1439 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1548 XFS_DATA_FORK); 1440
1549 ip->i_df.if_lastex = idx; 1441 ip->i_df.if_lastex = idx;
1550 if (cur == NULL) 1442 if (cur == NULL)
1551 rval = XFS_ILOG_DEXT; 1443 rval = XFS_ILOG_DEXT;
@@ -1566,27 +1458,25 @@ xfs_bmap_add_extent_unwritten_real(
1566 temp2 = new->br_blockcount; 1458 temp2 = new->br_blockcount;
1567 break; 1459 break;
1568 1460
1569 case MASK2(LEFT_FILLING, LEFT_CONTIG): 1461 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1570 /* 1462 /*
1571 * Setting the first part of a previous oldext extent to newext. 1463 * Setting the first part of a previous oldext extent to newext.
1572 * The left neighbor is contiguous. 1464 * The left neighbor is contiguous.
1573 */ 1465 */
1574 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, 1466 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1575 XFS_DATA_FORK);
1576 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1467 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1577 LEFT.br_blockcount + new->br_blockcount); 1468 LEFT.br_blockcount + new->br_blockcount);
1578 xfs_bmbt_set_startoff(ep, 1469 xfs_bmbt_set_startoff(ep,
1579 PREV.br_startoff + new->br_blockcount); 1470 PREV.br_startoff + new->br_blockcount);
1580 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, 1471 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1581 XFS_DATA_FORK); 1472
1582 XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, 1473 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1583 XFS_DATA_FORK);
1584 xfs_bmbt_set_startblock(ep, 1474 xfs_bmbt_set_startblock(ep,
1585 new->br_startblock + new->br_blockcount); 1475 new->br_startblock + new->br_blockcount);
1586 xfs_bmbt_set_blockcount(ep, 1476 xfs_bmbt_set_blockcount(ep,
1587 PREV.br_blockcount - new->br_blockcount); 1477 PREV.br_blockcount - new->br_blockcount);
1588 XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, 1478 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1589 XFS_DATA_FORK); 1479
1590 ip->i_df.if_lastex = idx - 1; 1480 ip->i_df.if_lastex = idx - 1;
1591 if (cur == NULL) 1481 if (cur == NULL)
1592 rval = XFS_ILOG_DEXT; 1482 rval = XFS_ILOG_DEXT;
@@ -1617,22 +1507,21 @@ xfs_bmap_add_extent_unwritten_real(
1617 PREV.br_blockcount; 1507 PREV.br_blockcount;
1618 break; 1508 break;
1619 1509
1620 case MASK(LEFT_FILLING): 1510 case BMAP_LEFT_FILLING:
1621 /* 1511 /*
1622 * Setting the first part of a previous oldext extent to newext. 1512 * Setting the first part of a previous oldext extent to newext.
1623 * The left neighbor is not contiguous. 1513 * The left neighbor is not contiguous.
1624 */ 1514 */
1625 XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1515 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1626 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1516 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1627 xfs_bmbt_set_startoff(ep, new_endoff); 1517 xfs_bmbt_set_startoff(ep, new_endoff);
1628 xfs_bmbt_set_blockcount(ep, 1518 xfs_bmbt_set_blockcount(ep,
1629 PREV.br_blockcount - new->br_blockcount); 1519 PREV.br_blockcount - new->br_blockcount);
1630 xfs_bmbt_set_startblock(ep, 1520 xfs_bmbt_set_startblock(ep,
1631 new->br_startblock + new->br_blockcount); 1521 new->br_startblock + new->br_blockcount);
1632 XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1522 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1633 XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 1523
1634 XFS_DATA_FORK); 1524 xfs_iext_insert(ip, idx, 1, new, state);
1635 xfs_iext_insert(ifp, idx, 1, new);
1636 ip->i_df.if_lastex = idx; 1525 ip->i_df.if_lastex = idx;
1637 ip->i_d.di_nextents++; 1526 ip->i_d.di_nextents++;
1638 if (cur == NULL) 1527 if (cur == NULL)
@@ -1660,24 +1549,21 @@ xfs_bmap_add_extent_unwritten_real(
1660 temp2 = PREV.br_blockcount; 1549 temp2 = PREV.br_blockcount;
1661 break; 1550 break;
1662 1551
1663 case MASK2(RIGHT_FILLING, RIGHT_CONTIG): 1552 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1664 /* 1553 /*
1665 * Setting the last part of a previous oldext extent to newext. 1554 * Setting the last part of a previous oldext extent to newext.
1666 * The right neighbor is contiguous with the new allocation. 1555 * The right neighbor is contiguous with the new allocation.
1667 */ 1556 */
1668 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, 1557 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1669 XFS_DATA_FORK); 1558 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1670 XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
1671 XFS_DATA_FORK);
1672 xfs_bmbt_set_blockcount(ep, 1559 xfs_bmbt_set_blockcount(ep,
1673 PREV.br_blockcount - new->br_blockcount); 1560 PREV.br_blockcount - new->br_blockcount);
1674 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, 1561 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1675 XFS_DATA_FORK);
1676 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1562 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1677 new->br_startoff, new->br_startblock, 1563 new->br_startoff, new->br_startblock,
1678 new->br_blockcount + RIGHT.br_blockcount, newext); 1564 new->br_blockcount + RIGHT.br_blockcount, newext);
1679 XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, 1565 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1680 XFS_DATA_FORK); 1566
1681 ip->i_df.if_lastex = idx + 1; 1567 ip->i_df.if_lastex = idx + 1;
1682 if (cur == NULL) 1568 if (cur == NULL)
1683 rval = XFS_ILOG_DEXT; 1569 rval = XFS_ILOG_DEXT;
@@ -1707,18 +1593,17 @@ xfs_bmap_add_extent_unwritten_real(
1707 RIGHT.br_blockcount; 1593 RIGHT.br_blockcount;
1708 break; 1594 break;
1709 1595
1710 case MASK(RIGHT_FILLING): 1596 case BMAP_RIGHT_FILLING:
1711 /* 1597 /*
1712 * Setting the last part of a previous oldext extent to newext. 1598 * Setting the last part of a previous oldext extent to newext.
1713 * The right neighbor is not contiguous. 1599 * The right neighbor is not contiguous.
1714 */ 1600 */
1715 XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1601 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1716 xfs_bmbt_set_blockcount(ep, 1602 xfs_bmbt_set_blockcount(ep,
1717 PREV.br_blockcount - new->br_blockcount); 1603 PREV.br_blockcount - new->br_blockcount);
1718 XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1604 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1719 XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1605
1720 XFS_DATA_FORK); 1606 xfs_iext_insert(ip, idx + 1, 1, new, state);
1721 xfs_iext_insert(ifp, idx + 1, 1, new);
1722 ip->i_df.if_lastex = idx + 1; 1607 ip->i_df.if_lastex = idx + 1;
1723 ip->i_d.di_nextents++; 1608 ip->i_d.di_nextents++;
1724 if (cur == NULL) 1609 if (cur == NULL)
@@ -1756,19 +1641,18 @@ xfs_bmap_add_extent_unwritten_real(
1756 * newext. Contiguity is impossible here. 1641 * newext. Contiguity is impossible here.
1757 * One extent becomes three extents. 1642 * One extent becomes three extents.
1758 */ 1643 */
1759 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1644 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1760 xfs_bmbt_set_blockcount(ep, 1645 xfs_bmbt_set_blockcount(ep,
1761 new->br_startoff - PREV.br_startoff); 1646 new->br_startoff - PREV.br_startoff);
1762 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1647 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1648
1763 r[0] = *new; 1649 r[0] = *new;
1764 r[1].br_startoff = new_endoff; 1650 r[1].br_startoff = new_endoff;
1765 r[1].br_blockcount = 1651 r[1].br_blockcount =
1766 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1652 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1767 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1653 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1768 r[1].br_state = oldext; 1654 r[1].br_state = oldext;
1769 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1655 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1770 XFS_DATA_FORK);
1771 xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
1772 ip->i_df.if_lastex = idx + 1; 1656 ip->i_df.if_lastex = idx + 1;
1773 ip->i_d.di_nextents += 2; 1657 ip->i_d.di_nextents += 2;
1774 if (cur == NULL) 1658 if (cur == NULL)
@@ -1813,13 +1697,13 @@ xfs_bmap_add_extent_unwritten_real(
1813 temp2 = PREV.br_blockcount; 1697 temp2 = PREV.br_blockcount;
1814 break; 1698 break;
1815 1699
1816 case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1700 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1817 case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG): 1701 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1818 case MASK2(LEFT_FILLING, RIGHT_CONTIG): 1702 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1819 case MASK2(RIGHT_FILLING, LEFT_CONTIG): 1703 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1820 case MASK2(LEFT_CONTIG, RIGHT_CONTIG): 1704 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1821 case MASK(LEFT_CONTIG): 1705 case BMAP_LEFT_CONTIG:
1822 case MASK(RIGHT_CONTIG): 1706 case BMAP_RIGHT_CONTIG:
1823 /* 1707 /*
1824 * These cases are all impossible. 1708 * These cases are all impossible.
1825 */ 1709 */
@@ -1839,14 +1723,6 @@ done:
1839#undef LEFT 1723#undef LEFT
1840#undef RIGHT 1724#undef RIGHT
1841#undef PREV 1725#undef PREV
1842#undef MASK
1843#undef MASK2
1844#undef MASK3
1845#undef MASK4
1846#undef STATE_SET
1847#undef STATE_TEST
1848#undef STATE_SET_TEST
1849#undef SWITCH_STATE
1850} 1726}
1851 1727
1852/* 1728/*
@@ -1872,62 +1748,57 @@ xfs_bmap_add_extent_hole_delay(
1872 int state; /* state bits, accessed thru macros */ 1748 int state; /* state bits, accessed thru macros */
1873 xfs_filblks_t temp=0; /* temp for indirect calculations */ 1749 xfs_filblks_t temp=0; /* temp for indirect calculations */
1874 xfs_filblks_t temp2=0; 1750 xfs_filblks_t temp2=0;
1875 enum { /* bit number definitions for state */
1876 LEFT_CONTIG, RIGHT_CONTIG,
1877 LEFT_DELAY, RIGHT_DELAY,
1878 LEFT_VALID, RIGHT_VALID
1879 };
1880
1881#define MASK(b) (1 << (b))
1882#define MASK2(a,b) (MASK(a) | MASK(b))
1883#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1884#define STATE_TEST(b) (state & MASK(b))
1885#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
1886 ((state &= ~MASK(b)), 0))
1887#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1888 1751
1889 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1752 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1890 ep = xfs_iext_get_ext(ifp, idx); 1753 ep = xfs_iext_get_ext(ifp, idx);
1891 state = 0; 1754 state = 0;
1892 ASSERT(isnullstartblock(new->br_startblock)); 1755 ASSERT(isnullstartblock(new->br_startblock));
1756
1893 /* 1757 /*
1894 * Check and set flags if this segment has a left neighbor 1758 * Check and set flags if this segment has a left neighbor
1895 */ 1759 */
1896 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1760 if (idx > 0) {
1761 state |= BMAP_LEFT_VALID;
1897 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1762 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1898 STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); 1763
1764 if (isnullstartblock(left.br_startblock))
1765 state |= BMAP_LEFT_DELAY;
1899 } 1766 }
1767
1900 /* 1768 /*
1901 * Check and set flags if the current (right) segment exists. 1769 * Check and set flags if the current (right) segment exists.
1902 * If it doesn't exist, we're converting the hole at end-of-file. 1770 * If it doesn't exist, we're converting the hole at end-of-file.
1903 */ 1771 */
1904 if (STATE_SET_TEST(RIGHT_VALID, 1772 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1905 idx < 1773 state |= BMAP_RIGHT_VALID;
1906 ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1907 xfs_bmbt_get_all(ep, &right); 1774 xfs_bmbt_get_all(ep, &right);
1908 STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); 1775
1776 if (isnullstartblock(right.br_startblock))
1777 state |= BMAP_RIGHT_DELAY;
1909 } 1778 }
1779
1910 /* 1780 /*
1911 * Set contiguity flags on the left and right neighbors. 1781 * Set contiguity flags on the left and right neighbors.
1912 * Don't let extents get too large, even if the pieces are contiguous. 1782 * Don't let extents get too large, even if the pieces are contiguous.
1913 */ 1783 */
1914 STATE_SET(LEFT_CONTIG, 1784 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1915 STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) && 1785 left.br_startoff + left.br_blockcount == new->br_startoff &&
1916 left.br_startoff + left.br_blockcount == new->br_startoff && 1786 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1917 left.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1787 state |= BMAP_LEFT_CONTIG;
1918 STATE_SET(RIGHT_CONTIG, 1788
1919 STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) && 1789 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1920 new->br_startoff + new->br_blockcount == right.br_startoff && 1790 new->br_startoff + new->br_blockcount == right.br_startoff &&
1921 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1791 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1922 (!STATE_TEST(LEFT_CONTIG) || 1792 (!(state & BMAP_LEFT_CONTIG) ||
1923 (left.br_blockcount + new->br_blockcount + 1793 (left.br_blockcount + new->br_blockcount +
1924 right.br_blockcount <= MAXEXTLEN))); 1794 right.br_blockcount <= MAXEXTLEN)))
1795 state |= BMAP_RIGHT_CONTIG;
1796
1925 /* 1797 /*
1926 * Switch out based on the contiguity flags. 1798 * Switch out based on the contiguity flags.
1927 */ 1799 */
1928 switch (SWITCH_STATE) { 1800 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1929 1801 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1930 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1931 /* 1802 /*
1932 * New allocation is contiguous with delayed allocations 1803 * New allocation is contiguous with delayed allocations
1933 * on the left and on the right. 1804 * on the left and on the right.
@@ -1935,8 +1806,8 @@ xfs_bmap_add_extent_hole_delay(
1935 */ 1806 */
1936 temp = left.br_blockcount + new->br_blockcount + 1807 temp = left.br_blockcount + new->br_blockcount +
1937 right.br_blockcount; 1808 right.br_blockcount;
1938 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 1809
1939 XFS_DATA_FORK); 1810 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1940 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1811 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1941 oldlen = startblockval(left.br_startblock) + 1812 oldlen = startblockval(left.br_startblock) +
1942 startblockval(new->br_startblock) + 1813 startblockval(new->br_startblock) +
@@ -1944,53 +1815,52 @@ xfs_bmap_add_extent_hole_delay(
1944 newlen = xfs_bmap_worst_indlen(ip, temp); 1815 newlen = xfs_bmap_worst_indlen(ip, temp);
1945 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1816 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1946 nullstartblock((int)newlen)); 1817 nullstartblock((int)newlen));
1947 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 1818 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1948 XFS_DATA_FORK); 1819
1949 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); 1820 xfs_iext_remove(ip, idx, 1, state);
1950 xfs_iext_remove(ifp, idx, 1);
1951 ip->i_df.if_lastex = idx - 1; 1821 ip->i_df.if_lastex = idx - 1;
1952 /* DELTA: Two in-core extents were replaced by one. */ 1822 /* DELTA: Two in-core extents were replaced by one. */
1953 temp2 = temp; 1823 temp2 = temp;
1954 temp = left.br_startoff; 1824 temp = left.br_startoff;
1955 break; 1825 break;
1956 1826
1957 case MASK(LEFT_CONTIG): 1827 case BMAP_LEFT_CONTIG:
1958 /* 1828 /*
1959 * New allocation is contiguous with a delayed allocation 1829 * New allocation is contiguous with a delayed allocation
1960 * on the left. 1830 * on the left.
1961 * Merge the new allocation with the left neighbor. 1831 * Merge the new allocation with the left neighbor.
1962 */ 1832 */
1963 temp = left.br_blockcount + new->br_blockcount; 1833 temp = left.br_blockcount + new->br_blockcount;
1964 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, 1834 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1965 XFS_DATA_FORK);
1966 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1835 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1967 oldlen = startblockval(left.br_startblock) + 1836 oldlen = startblockval(left.br_startblock) +
1968 startblockval(new->br_startblock); 1837 startblockval(new->br_startblock);
1969 newlen = xfs_bmap_worst_indlen(ip, temp); 1838 newlen = xfs_bmap_worst_indlen(ip, temp);
1970 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1839 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1971 nullstartblock((int)newlen)); 1840 nullstartblock((int)newlen));
1972 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, 1841 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1973 XFS_DATA_FORK); 1842
1974 ip->i_df.if_lastex = idx - 1; 1843 ip->i_df.if_lastex = idx - 1;
1975 /* DELTA: One in-core extent grew into a hole. */ 1844 /* DELTA: One in-core extent grew into a hole. */
1976 temp2 = temp; 1845 temp2 = temp;
1977 temp = left.br_startoff; 1846 temp = left.br_startoff;
1978 break; 1847 break;
1979 1848
1980 case MASK(RIGHT_CONTIG): 1849 case BMAP_RIGHT_CONTIG:
1981 /* 1850 /*
1982 * New allocation is contiguous with a delayed allocation 1851 * New allocation is contiguous with a delayed allocation
1983 * on the right. 1852 * on the right.
1984 * Merge the new allocation with the right neighbor. 1853 * Merge the new allocation with the right neighbor.
1985 */ 1854 */
1986 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1855 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1987 temp = new->br_blockcount + right.br_blockcount; 1856 temp = new->br_blockcount + right.br_blockcount;
1988 oldlen = startblockval(new->br_startblock) + 1857 oldlen = startblockval(new->br_startblock) +
1989 startblockval(right.br_startblock); 1858 startblockval(right.br_startblock);
1990 newlen = xfs_bmap_worst_indlen(ip, temp); 1859 newlen = xfs_bmap_worst_indlen(ip, temp);
1991 xfs_bmbt_set_allf(ep, new->br_startoff, 1860 xfs_bmbt_set_allf(ep, new->br_startoff,
1992 nullstartblock((int)newlen), temp, right.br_state); 1861 nullstartblock((int)newlen), temp, right.br_state);
1993 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1862 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1863
1994 ip->i_df.if_lastex = idx; 1864 ip->i_df.if_lastex = idx;
1995 /* DELTA: One in-core extent grew into a hole. */ 1865 /* DELTA: One in-core extent grew into a hole. */
1996 temp2 = temp; 1866 temp2 = temp;
@@ -2004,9 +1874,7 @@ xfs_bmap_add_extent_hole_delay(
2004 * Insert a new entry. 1874 * Insert a new entry.
2005 */ 1875 */
2006 oldlen = newlen = 0; 1876 oldlen = newlen = 0;
2007 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, 1877 xfs_iext_insert(ip, idx, 1, new, state);
2008 XFS_DATA_FORK);
2009 xfs_iext_insert(ifp, idx, 1, new);
2010 ip->i_df.if_lastex = idx; 1878 ip->i_df.if_lastex = idx;
2011 /* DELTA: A new in-core extent was added in a hole. */ 1879 /* DELTA: A new in-core extent was added in a hole. */
2012 temp2 = new->br_blockcount; 1880 temp2 = new->br_blockcount;
@@ -2030,12 +1898,6 @@ xfs_bmap_add_extent_hole_delay(
2030 } 1898 }
2031 *logflagsp = 0; 1899 *logflagsp = 0;
2032 return 0; 1900 return 0;
2033#undef MASK
2034#undef MASK2
2035#undef STATE_SET
2036#undef STATE_TEST
2037#undef STATE_SET_TEST
2038#undef SWITCH_STATE
2039} 1901}
2040 1902
2041/* 1903/*
@@ -2062,83 +1924,75 @@ xfs_bmap_add_extent_hole_real(
2062 int state; /* state bits, accessed thru macros */ 1924 int state; /* state bits, accessed thru macros */
2063 xfs_filblks_t temp=0; 1925 xfs_filblks_t temp=0;
2064 xfs_filblks_t temp2=0; 1926 xfs_filblks_t temp2=0;
2065 enum { /* bit number definitions for state */
2066 LEFT_CONTIG, RIGHT_CONTIG,
2067 LEFT_DELAY, RIGHT_DELAY,
2068 LEFT_VALID, RIGHT_VALID
2069 };
2070
2071#define MASK(b) (1 << (b))
2072#define MASK2(a,b) (MASK(a) | MASK(b))
2073#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
2074#define STATE_TEST(b) (state & MASK(b))
2075#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
2076 ((state &= ~MASK(b)), 0))
2077#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
2078 1927
2079 ifp = XFS_IFORK_PTR(ip, whichfork); 1928 ifp = XFS_IFORK_PTR(ip, whichfork);
2080 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 1929 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
2081 ep = xfs_iext_get_ext(ifp, idx); 1930 ep = xfs_iext_get_ext(ifp, idx);
2082 state = 0; 1931 state = 0;
1932
1933 if (whichfork == XFS_ATTR_FORK)
1934 state |= BMAP_ATTRFORK;
1935
2083 /* 1936 /*
2084 * Check and set flags if this segment has a left neighbor. 1937 * Check and set flags if this segment has a left neighbor.
2085 */ 1938 */
2086 if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { 1939 if (idx > 0) {
1940 state |= BMAP_LEFT_VALID;
2087 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1941 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
2088 STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock)); 1942 if (isnullstartblock(left.br_startblock))
1943 state |= BMAP_LEFT_DELAY;
2089 } 1944 }
1945
2090 /* 1946 /*
2091 * Check and set flags if this segment has a current value. 1947 * Check and set flags if this segment has a current value.
2092 * Not true if we're inserting into the "hole" at eof. 1948 * Not true if we're inserting into the "hole" at eof.
2093 */ 1949 */
2094 if (STATE_SET_TEST(RIGHT_VALID, 1950 if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2095 idx < 1951 state |= BMAP_RIGHT_VALID;
2096 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
2097 xfs_bmbt_get_all(ep, &right); 1952 xfs_bmbt_get_all(ep, &right);
2098 STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock)); 1953 if (isnullstartblock(right.br_startblock))
1954 state |= BMAP_RIGHT_DELAY;
2099 } 1955 }
1956
2100 /* 1957 /*
2101 * We're inserting a real allocation between "left" and "right". 1958 * We're inserting a real allocation between "left" and "right".
2102 * Set the contiguity flags. Don't let extents get too large. 1959 * Set the contiguity flags. Don't let extents get too large.
2103 */ 1960 */
2104 STATE_SET(LEFT_CONTIG, 1961 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2105 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) && 1962 left.br_startoff + left.br_blockcount == new->br_startoff &&
2106 left.br_startoff + left.br_blockcount == new->br_startoff && 1963 left.br_startblock + left.br_blockcount == new->br_startblock &&
2107 left.br_startblock + left.br_blockcount == new->br_startblock && 1964 left.br_state == new->br_state &&
2108 left.br_state == new->br_state && 1965 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2109 left.br_blockcount + new->br_blockcount <= MAXEXTLEN); 1966 state |= BMAP_LEFT_CONTIG;
2110 STATE_SET(RIGHT_CONTIG, 1967
2111 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) && 1968 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2112 new->br_startoff + new->br_blockcount == right.br_startoff && 1969 new->br_startoff + new->br_blockcount == right.br_startoff &&
2113 new->br_startblock + new->br_blockcount == 1970 new->br_startblock + new->br_blockcount == right.br_startblock &&
2114 right.br_startblock && 1971 new->br_state == right.br_state &&
2115 new->br_state == right.br_state && 1972 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2116 new->br_blockcount + right.br_blockcount <= MAXEXTLEN && 1973 (!(state & BMAP_LEFT_CONTIG) ||
2117 (!STATE_TEST(LEFT_CONTIG) || 1974 left.br_blockcount + new->br_blockcount +
2118 left.br_blockcount + new->br_blockcount + 1975 right.br_blockcount <= MAXEXTLEN))
2119 right.br_blockcount <= MAXEXTLEN)); 1976 state |= BMAP_RIGHT_CONTIG;
2120 1977
2121 error = 0; 1978 error = 0;
2122 /* 1979 /*
2123 * Select which case we're in here, and implement it. 1980 * Select which case we're in here, and implement it.
2124 */ 1981 */
2125 switch (SWITCH_STATE) { 1982 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2126 1983 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2127 case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
2128 /* 1984 /*
2129 * New allocation is contiguous with real allocations on the 1985 * New allocation is contiguous with real allocations on the
2130 * left and on the right. 1986 * left and on the right.
2131 * Merge all three into a single extent record. 1987 * Merge all three into a single extent record.
2132 */ 1988 */
2133 XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 1989 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2134 whichfork);
2135 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1990 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2136 left.br_blockcount + new->br_blockcount + 1991 left.br_blockcount + new->br_blockcount +
2137 right.br_blockcount); 1992 right.br_blockcount);
2138 XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 1993 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2139 whichfork); 1994
2140 XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork); 1995 xfs_iext_remove(ip, idx, 1, state);
2141 xfs_iext_remove(ifp, idx, 1);
2142 ifp->if_lastex = idx - 1; 1996 ifp->if_lastex = idx - 1;
2143 XFS_IFORK_NEXT_SET(ip, whichfork, 1997 XFS_IFORK_NEXT_SET(ip, whichfork,
2144 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 1998 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2173,16 +2027,17 @@ xfs_bmap_add_extent_hole_real(
2173 right.br_blockcount; 2027 right.br_blockcount;
2174 break; 2028 break;
2175 2029
2176 case MASK(LEFT_CONTIG): 2030 case BMAP_LEFT_CONTIG:
2177 /* 2031 /*
2178 * New allocation is contiguous with a real allocation 2032 * New allocation is contiguous with a real allocation
2179 * on the left. 2033 * on the left.
2180 * Merge the new allocation with the left neighbor. 2034 * Merge the new allocation with the left neighbor.
2181 */ 2035 */
2182 XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork); 2036 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2183 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2037 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2184 left.br_blockcount + new->br_blockcount); 2038 left.br_blockcount + new->br_blockcount);
2185 XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); 2039 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2040
2186 ifp->if_lastex = idx - 1; 2041 ifp->if_lastex = idx - 1;
2187 if (cur == NULL) { 2042 if (cur == NULL) {
2188 rval = xfs_ilog_fext(whichfork); 2043 rval = xfs_ilog_fext(whichfork);
@@ -2207,17 +2062,18 @@ xfs_bmap_add_extent_hole_real(
2207 new->br_blockcount; 2062 new->br_blockcount;
2208 break; 2063 break;
2209 2064
2210 case MASK(RIGHT_CONTIG): 2065 case BMAP_RIGHT_CONTIG:
2211 /* 2066 /*
2212 * New allocation is contiguous with a real allocation 2067 * New allocation is contiguous with a real allocation
2213 * on the right. 2068 * on the right.
2214 * Merge the new allocation with the right neighbor. 2069 * Merge the new allocation with the right neighbor.
2215 */ 2070 */
2216 XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork); 2071 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
2217 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, 2072 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2218 new->br_blockcount + right.br_blockcount, 2073 new->br_blockcount + right.br_blockcount,
2219 right.br_state); 2074 right.br_state);
2220 XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); 2075 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
2076
2221 ifp->if_lastex = idx; 2077 ifp->if_lastex = idx;
2222 if (cur == NULL) { 2078 if (cur == NULL) {
2223 rval = xfs_ilog_fext(whichfork); 2079 rval = xfs_ilog_fext(whichfork);
@@ -2248,8 +2104,7 @@ xfs_bmap_add_extent_hole_real(
2248 * real allocation. 2104 * real allocation.
2249 * Insert a new entry. 2105 * Insert a new entry.
2250 */ 2106 */
2251 XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork); 2107 xfs_iext_insert(ip, idx, 1, new, state);
2252 xfs_iext_insert(ifp, idx, 1, new);
2253 ifp->if_lastex = idx; 2108 ifp->if_lastex = idx;
2254 XFS_IFORK_NEXT_SET(ip, whichfork, 2109 XFS_IFORK_NEXT_SET(ip, whichfork,
2255 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 2110 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2283,12 +2138,6 @@ xfs_bmap_add_extent_hole_real(
2283done: 2138done:
2284 *logflagsp = rval; 2139 *logflagsp = rval;
2285 return error; 2140 return error;
2286#undef MASK
2287#undef MASK2
2288#undef STATE_SET
2289#undef STATE_TEST
2290#undef STATE_SET_TEST
2291#undef SWITCH_STATE
2292} 2141}
2293 2142
2294/* 2143/*
@@ -3115,8 +2964,13 @@ xfs_bmap_del_extent(
3115 uint qfield; /* quota field to update */ 2964 uint qfield; /* quota field to update */
3116 xfs_filblks_t temp; /* for indirect length calculations */ 2965 xfs_filblks_t temp; /* for indirect length calculations */
3117 xfs_filblks_t temp2; /* for indirect length calculations */ 2966 xfs_filblks_t temp2; /* for indirect length calculations */
2967 int state = 0;
3118 2968
3119 XFS_STATS_INC(xs_del_exlist); 2969 XFS_STATS_INC(xs_del_exlist);
2970
2971 if (whichfork == XFS_ATTR_FORK)
2972 state |= BMAP_ATTRFORK;
2973
3120 mp = ip->i_mount; 2974 mp = ip->i_mount;
3121 ifp = XFS_IFORK_PTR(ip, whichfork); 2975 ifp = XFS_IFORK_PTR(ip, whichfork);
3122 ASSERT((idx >= 0) && (idx < ifp->if_bytes / 2976 ASSERT((idx >= 0) && (idx < ifp->if_bytes /
@@ -3196,8 +3050,8 @@ xfs_bmap_del_extent(
3196 /* 3050 /*
3197 * Matches the whole extent. Delete the entry. 3051 * Matches the whole extent. Delete the entry.
3198 */ 3052 */
3199 XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork); 3053 xfs_iext_remove(ip, idx, 1,
3200 xfs_iext_remove(ifp, idx, 1); 3054 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3201 ifp->if_lastex = idx; 3055 ifp->if_lastex = idx;
3202 if (delay) 3056 if (delay)
3203 break; 3057 break;
@@ -3217,7 +3071,7 @@ xfs_bmap_del_extent(
3217 /* 3071 /*
3218 * Deleting the first part of the extent. 3072 * Deleting the first part of the extent.
3219 */ 3073 */
3220 XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork); 3074 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3221 xfs_bmbt_set_startoff(ep, del_endoff); 3075 xfs_bmbt_set_startoff(ep, del_endoff);
3222 temp = got.br_blockcount - del->br_blockcount; 3076 temp = got.br_blockcount - del->br_blockcount;
3223 xfs_bmbt_set_blockcount(ep, temp); 3077 xfs_bmbt_set_blockcount(ep, temp);
@@ -3226,13 +3080,12 @@ xfs_bmap_del_extent(
3226 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3080 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3227 da_old); 3081 da_old);
3228 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 3082 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3229 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, 3083 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3230 whichfork);
3231 da_new = temp; 3084 da_new = temp;
3232 break; 3085 break;
3233 } 3086 }
3234 xfs_bmbt_set_startblock(ep, del_endblock); 3087 xfs_bmbt_set_startblock(ep, del_endblock);
3235 XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); 3088 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3236 if (!cur) { 3089 if (!cur) {
3237 flags |= xfs_ilog_fext(whichfork); 3090 flags |= xfs_ilog_fext(whichfork);
3238 break; 3091 break;
@@ -3248,19 +3101,18 @@ xfs_bmap_del_extent(
3248 * Deleting the last part of the extent. 3101 * Deleting the last part of the extent.
3249 */ 3102 */
3250 temp = got.br_blockcount - del->br_blockcount; 3103 temp = got.br_blockcount - del->br_blockcount;
3251 XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork); 3104 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3252 xfs_bmbt_set_blockcount(ep, temp); 3105 xfs_bmbt_set_blockcount(ep, temp);
3253 ifp->if_lastex = idx; 3106 ifp->if_lastex = idx;
3254 if (delay) { 3107 if (delay) {
3255 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3108 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3256 da_old); 3109 da_old);
3257 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 3110 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3258 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, 3111 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3259 whichfork);
3260 da_new = temp; 3112 da_new = temp;
3261 break; 3113 break;
3262 } 3114 }
3263 XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); 3115 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3264 if (!cur) { 3116 if (!cur) {
3265 flags |= xfs_ilog_fext(whichfork); 3117 flags |= xfs_ilog_fext(whichfork);
3266 break; 3118 break;
@@ -3277,7 +3129,7 @@ xfs_bmap_del_extent(
3277 * Deleting the middle of the extent. 3129 * Deleting the middle of the extent.
3278 */ 3130 */
3279 temp = del->br_startoff - got.br_startoff; 3131 temp = del->br_startoff - got.br_startoff;
3280 XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork); 3132 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3281 xfs_bmbt_set_blockcount(ep, temp); 3133 xfs_bmbt_set_blockcount(ep, temp);
3282 new.br_startoff = del_endoff; 3134 new.br_startoff = del_endoff;
3283 temp2 = got_endoff - del_endoff; 3135 temp2 = got_endoff - del_endoff;
@@ -3364,10 +3216,8 @@ xfs_bmap_del_extent(
3364 } 3216 }
3365 } 3217 }
3366 } 3218 }
3367 XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork); 3219 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3368 XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL, 3220 xfs_iext_insert(ip, idx + 1, 1, &new, state);
3369 whichfork);
3370 xfs_iext_insert(ifp, idx + 1, 1, &new);
3371 ifp->if_lastex = idx + 1; 3221 ifp->if_lastex = idx + 1;
3372 break; 3222 break;
3373 } 3223 }
@@ -3687,7 +3537,9 @@ xfs_bmap_local_to_extents(
3687 xfs_iext_add(ifp, 0, 1); 3537 xfs_iext_add(ifp, 0, 1);
3688 ep = xfs_iext_get_ext(ifp, 0); 3538 ep = xfs_iext_get_ext(ifp, 0);
3689 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3539 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3690 XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); 3540 trace_xfs_bmap_post_update(ip, 0,
3541 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3542 _THIS_IP_);
3691 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3543 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3692 ip->i_d.di_nblocks = 1; 3544 ip->i_d.di_nblocks = 1;
3693 xfs_trans_mod_dquot_byino(tp, ip, 3545 xfs_trans_mod_dquot_byino(tp, ip,
@@ -3800,158 +3652,6 @@ xfs_bmap_search_extents(
3800 return ep; 3652 return ep;
3801} 3653}
3802 3654
3803
3804#ifdef XFS_BMAP_TRACE
3805ktrace_t *xfs_bmap_trace_buf;
3806
3807/*
3808 * Add a bmap trace buffer entry. Base routine for the others.
3809 */
3810STATIC void
3811xfs_bmap_trace_addentry(
3812 int opcode, /* operation */
3813 const char *fname, /* function name */
3814 char *desc, /* operation description */
3815 xfs_inode_t *ip, /* incore inode pointer */
3816 xfs_extnum_t idx, /* index of entry(ies) */
3817 xfs_extnum_t cnt, /* count of entries, 1 or 2 */
3818 xfs_bmbt_rec_host_t *r1, /* first record */
3819 xfs_bmbt_rec_host_t *r2, /* second record or null */
3820 int whichfork) /* data or attr fork */
3821{
3822 xfs_bmbt_rec_host_t tr2;
3823
3824 ASSERT(cnt == 1 || cnt == 2);
3825 ASSERT(r1 != NULL);
3826 if (cnt == 1) {
3827 ASSERT(r2 == NULL);
3828 r2 = &tr2;
3829 memset(&tr2, 0, sizeof(tr2));
3830 } else
3831 ASSERT(r2 != NULL);
3832 ktrace_enter(xfs_bmap_trace_buf,
3833 (void *)(__psint_t)(opcode | (whichfork << 16)),
3834 (void *)fname, (void *)desc, (void *)ip,
3835 (void *)(__psint_t)idx,
3836 (void *)(__psint_t)cnt,
3837 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3838 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3839 (void *)(__psunsigned_t)(r1->l0 >> 32),
3840 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3841 (void *)(__psunsigned_t)(r1->l1 >> 32),
3842 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3843 (void *)(__psunsigned_t)(r2->l0 >> 32),
3844 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3845 (void *)(__psunsigned_t)(r2->l1 >> 32),
3846 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3847 );
3848 ASSERT(ip->i_xtrace);
3849 ktrace_enter(ip->i_xtrace,
3850 (void *)(__psint_t)(opcode | (whichfork << 16)),
3851 (void *)fname, (void *)desc, (void *)ip,
3852 (void *)(__psint_t)idx,
3853 (void *)(__psint_t)cnt,
3854 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3855 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3856 (void *)(__psunsigned_t)(r1->l0 >> 32),
3857 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3858 (void *)(__psunsigned_t)(r1->l1 >> 32),
3859 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3860 (void *)(__psunsigned_t)(r2->l0 >> 32),
3861 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3862 (void *)(__psunsigned_t)(r2->l1 >> 32),
3863 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3864 );
3865}
3866
3867/*
3868 * Add bmap trace entry prior to a call to xfs_iext_remove.
3869 */
3870STATIC void
3871xfs_bmap_trace_delete(
3872 const char *fname, /* function name */
3873 char *desc, /* operation description */
3874 xfs_inode_t *ip, /* incore inode pointer */
3875 xfs_extnum_t idx, /* index of entry(entries) deleted */
3876 xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
3877 int whichfork) /* data or attr fork */
3878{
3879 xfs_ifork_t *ifp; /* inode fork pointer */
3880
3881 ifp = XFS_IFORK_PTR(ip, whichfork);
3882 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3883 cnt, xfs_iext_get_ext(ifp, idx),
3884 cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
3885 whichfork);
3886}
3887
3888/*
3889 * Add bmap trace entry prior to a call to xfs_iext_insert, or
3890 * reading in the extents list from the disk (in the btree).
3891 */
3892STATIC void
3893xfs_bmap_trace_insert(
3894 const char *fname, /* function name */
3895 char *desc, /* operation description */
3896 xfs_inode_t *ip, /* incore inode pointer */
3897 xfs_extnum_t idx, /* index of entry(entries) inserted */
3898 xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
3899 xfs_bmbt_irec_t *r1, /* inserted record 1 */
3900 xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
3901 int whichfork) /* data or attr fork */
3902{
3903 xfs_bmbt_rec_host_t tr1; /* compressed record 1 */
3904 xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */
3905
3906 xfs_bmbt_set_all(&tr1, r1);
3907 if (cnt == 2) {
3908 ASSERT(r2 != NULL);
3909 xfs_bmbt_set_all(&tr2, r2);
3910 } else {
3911 ASSERT(cnt == 1);
3912 ASSERT(r2 == NULL);
3913 }
3914 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3915 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3916}
3917
3918/*
3919 * Add bmap trace entry after updating an extent record in place.
3920 */
3921STATIC void
3922xfs_bmap_trace_post_update(
3923 const char *fname, /* function name */
3924 char *desc, /* operation description */
3925 xfs_inode_t *ip, /* incore inode pointer */
3926 xfs_extnum_t idx, /* index of entry updated */
3927 int whichfork) /* data or attr fork */
3928{
3929 xfs_ifork_t *ifp; /* inode fork pointer */
3930
3931 ifp = XFS_IFORK_PTR(ip, whichfork);
3932 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3933 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3934}
3935
3936/*
3937 * Add bmap trace entry prior to updating an extent record in place.
3938 */
3939STATIC void
3940xfs_bmap_trace_pre_update(
3941 const char *fname, /* function name */
3942 char *desc, /* operation description */
3943 xfs_inode_t *ip, /* incore inode pointer */
3944 xfs_extnum_t idx, /* index of entry to be updated */
3945 int whichfork) /* data or attr fork */
3946{
3947 xfs_ifork_t *ifp; /* inode fork pointer */
3948
3949 ifp = XFS_IFORK_PTR(ip, whichfork);
3950 xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3951 xfs_iext_get_ext(ifp, idx), NULL, whichfork);
3952}
3953#endif /* XFS_BMAP_TRACE */
3954
3955/* 3655/*
3956 * Compute the worst-case number of indirect blocks that will be used 3656 * Compute the worst-case number of indirect blocks that will be used
3957 * for ip's delayed extent of length "len". 3657 * for ip's delayed extent of length "len".
@@ -3983,37 +3683,6 @@ xfs_bmap_worst_indlen(
3983 return rval; 3683 return rval;
3984} 3684}
3985 3685
3986#if defined(XFS_RW_TRACE)
3987STATIC void
3988xfs_bunmap_trace(
3989 xfs_inode_t *ip,
3990 xfs_fileoff_t bno,
3991 xfs_filblks_t len,
3992 int flags,
3993 inst_t *ra)
3994{
3995 if (ip->i_rwtrace == NULL)
3996 return;
3997 ktrace_enter(ip->i_rwtrace,
3998 (void *)(__psint_t)XFS_BUNMAP,
3999 (void *)ip,
4000 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
4001 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
4002 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
4003 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
4004 (void *)(__psint_t)len,
4005 (void *)(__psint_t)flags,
4006 (void *)(unsigned long)current_cpu(),
4007 (void *)ra,
4008 (void *)0,
4009 (void *)0,
4010 (void *)0,
4011 (void *)0,
4012 (void *)0,
4013 (void *)0);
4014}
4015#endif
4016
4017/* 3686/*
4018 * Convert inode from non-attributed to attributed. 3687 * Convert inode from non-attributed to attributed.
4019 * Must not be in a transaction, ip must not be locked. 3688 * Must not be in a transaction, ip must not be locked.
@@ -4702,34 +4371,30 @@ error0:
4702 return XFS_ERROR(EFSCORRUPTED); 4371 return XFS_ERROR(EFSCORRUPTED);
4703} 4372}
4704 4373
4705#ifdef XFS_BMAP_TRACE 4374#ifdef DEBUG
4706/* 4375/*
4707 * Add bmap trace insert entries for all the contents of the extent records. 4376 * Add bmap trace insert entries for all the contents of the extent records.
4708 */ 4377 */
4709void 4378void
4710xfs_bmap_trace_exlist( 4379xfs_bmap_trace_exlist(
4711 const char *fname, /* function name */
4712 xfs_inode_t *ip, /* incore inode pointer */ 4380 xfs_inode_t *ip, /* incore inode pointer */
4713 xfs_extnum_t cnt, /* count of entries in the list */ 4381 xfs_extnum_t cnt, /* count of entries in the list */
4714 int whichfork) /* data or attr fork */ 4382 int whichfork, /* data or attr fork */
4383 unsigned long caller_ip)
4715{ 4384{
4716 xfs_bmbt_rec_host_t *ep; /* current extent record */
4717 xfs_extnum_t idx; /* extent record index */ 4385 xfs_extnum_t idx; /* extent record index */
4718 xfs_ifork_t *ifp; /* inode fork pointer */ 4386 xfs_ifork_t *ifp; /* inode fork pointer */
4719 xfs_bmbt_irec_t s; /* file extent record */ 4387 int state = 0;
4388
4389 if (whichfork == XFS_ATTR_FORK)
4390 state |= BMAP_ATTRFORK;
4720 4391
4721 ifp = XFS_IFORK_PTR(ip, whichfork); 4392 ifp = XFS_IFORK_PTR(ip, whichfork);
4722 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4393 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4723 for (idx = 0; idx < cnt; idx++) { 4394 for (idx = 0; idx < cnt; idx++)
4724 ep = xfs_iext_get_ext(ifp, idx); 4395 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4725 xfs_bmbt_get_all(ep, &s);
4726 XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
4727 whichfork);
4728 }
4729} 4396}
4730#endif
4731 4397
4732#ifdef DEBUG
4733/* 4398/*
4734 * Validate that the bmbt_irecs being returned from bmapi are valid 4399 * Validate that the bmbt_irecs being returned from bmapi are valid
4735 * given the callers original parameters. Specifically check the 4400 * given the callers original parameters. Specifically check the
@@ -5478,7 +5143,8 @@ xfs_bunmapi(
5478 int rsvd; /* OK to allocate reserved blocks */ 5143 int rsvd; /* OK to allocate reserved blocks */
5479 xfs_fsblock_t sum; 5144 xfs_fsblock_t sum;
5480 5145
5481 xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address); 5146 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5147
5482 whichfork = (flags & XFS_BMAPI_ATTRFORK) ? 5148 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5483 XFS_ATTR_FORK : XFS_DATA_FORK; 5149 XFS_ATTR_FORK : XFS_DATA_FORK;
5484 ifp = XFS_IFORK_PTR(ip, whichfork); 5150 ifp = XFS_IFORK_PTR(ip, whichfork);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 56f62d2edc35..419dafb9d87d 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -95,6 +95,21 @@ typedef struct xfs_bmap_free
95 /* need write cache flushing and no */ 95 /* need write cache flushing and no */
96 /* additional allocation alignments */ 96 /* additional allocation alignments */
97 97
98#define XFS_BMAPI_FLAGS \
99 { XFS_BMAPI_WRITE, "WRITE" }, \
100 { XFS_BMAPI_DELAY, "DELAY" }, \
101 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
102 { XFS_BMAPI_METADATA, "METADATA" }, \
103 { XFS_BMAPI_EXACT, "EXACT" }, \
104 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
105 { XFS_BMAPI_ASYNC, "ASYNC" }, \
106 { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
107 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
108 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
109 { XFS_BMAPI_CONTIG, "CONTIG" }, \
110 { XFS_BMAPI_CONVERT, "CONVERT" }
111
112
98static inline int xfs_bmapi_aflag(int w) 113static inline int xfs_bmapi_aflag(int w)
99{ 114{
100 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0); 115 return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -135,36 +150,43 @@ typedef struct xfs_bmalloca {
135 char conv; /* overwriting unwritten extents */ 150 char conv; /* overwriting unwritten extents */
136} xfs_bmalloca_t; 151} xfs_bmalloca_t;
137 152
138#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE)
139/* 153/*
140 * Trace operations for bmap extent tracing 154 * Flags for xfs_bmap_add_extent*.
141 */ 155 */
142#define XFS_BMAP_KTRACE_DELETE 1 156#define BMAP_LEFT_CONTIG (1 << 0)
143#define XFS_BMAP_KTRACE_INSERT 2 157#define BMAP_RIGHT_CONTIG (1 << 1)
144#define XFS_BMAP_KTRACE_PRE_UP 3 158#define BMAP_LEFT_FILLING (1 << 2)
145#define XFS_BMAP_KTRACE_POST_UP 4 159#define BMAP_RIGHT_FILLING (1 << 3)
146 160#define BMAP_LEFT_DELAY (1 << 4)
147#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */ 161#define BMAP_RIGHT_DELAY (1 << 5)
148#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */ 162#define BMAP_LEFT_VALID (1 << 6)
149extern ktrace_t *xfs_bmap_trace_buf; 163#define BMAP_RIGHT_VALID (1 << 7)
164#define BMAP_ATTRFORK (1 << 8)
165
166#define XFS_BMAP_EXT_FLAGS \
167 { BMAP_LEFT_CONTIG, "LC" }, \
168 { BMAP_RIGHT_CONTIG, "RC" }, \
169 { BMAP_LEFT_FILLING, "LF" }, \
170 { BMAP_RIGHT_FILLING, "RF" }, \
171 { BMAP_ATTRFORK, "ATTR" }
150 172
151/* 173/*
152 * Add bmap trace insert entries for all the contents of the extent list. 174 * Add bmap trace insert entries for all the contents of the extent list.
175 *
176 * Quite excessive tracing. Only do this for debug builds.
153 */ 177 */
178#if defined(__KERNEL) && defined(DEBUG)
154void 179void
155xfs_bmap_trace_exlist( 180xfs_bmap_trace_exlist(
156 const char *fname, /* function name */
157 struct xfs_inode *ip, /* incore inode pointer */ 181 struct xfs_inode *ip, /* incore inode pointer */
158 xfs_extnum_t cnt, /* count of entries in list */ 182 xfs_extnum_t cnt, /* count of entries in list */
159 int whichfork); /* data or attr fork */ 183 int whichfork,
184 unsigned long caller_ip); /* data or attr fork */
160#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ 185#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
161 xfs_bmap_trace_exlist(__func__,ip,c,w) 186 xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
162 187#else
163#else /* __KERNEL__ && XFS_BMAP_TRACE */
164
165#define XFS_BMAP_TRACE_EXLIST(ip,c,w) 188#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
166 189#endif
167#endif /* __KERNEL__ && XFS_BMAP_TRACE */
168 190
169/* 191/*
170 * Convert inode from non-attributed to attributed. 192 * Convert inode from non-attributed to attributed.
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 6f5ccede63f9..38751d5fac6f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -768,12 +768,6 @@ xfs_bmbt_trace_enter(
768 (void *)a0, (void *)a1, (void *)a2, (void *)a3, 768 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
769 (void *)a4, (void *)a5, (void *)a6, (void *)a7, 769 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
770 (void *)a8, (void *)a9, (void *)a10); 770 (void *)a8, (void *)a9, (void *)a10);
771 ktrace_enter(ip->i_btrace,
772 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
773 (void *)func, (void *)s, (void *)ip, (void *)cur,
774 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
775 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
776 (void *)a8, (void *)a9, (void *)a10);
777} 771}
778 772
779STATIC void 773STATIC void
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 52b5f14d0c32..36a0992dd669 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -39,6 +39,7 @@
39#include "xfs_btree_trace.h" 39#include "xfs_btree_trace.h"
40#include "xfs_ialloc.h" 40#include "xfs_ialloc.h"
41#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
42 43
43/* 44/*
44 * Cursor allocation zone. 45 * Cursor allocation zone.
@@ -81,7 +82,7 @@ xfs_btree_check_lblock(
81 XFS_ERRTAG_BTREE_CHECK_LBLOCK, 82 XFS_ERRTAG_BTREE_CHECK_LBLOCK,
82 XFS_RANDOM_BTREE_CHECK_LBLOCK))) { 83 XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
83 if (bp) 84 if (bp)
84 xfs_buftrace("LBTREE ERROR", bp); 85 trace_xfs_btree_corrupt(bp, _RET_IP_);
85 XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, 86 XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
86 mp); 87 mp);
87 return XFS_ERROR(EFSCORRUPTED); 88 return XFS_ERROR(EFSCORRUPTED);
@@ -119,7 +120,7 @@ xfs_btree_check_sblock(
119 XFS_ERRTAG_BTREE_CHECK_SBLOCK, 120 XFS_ERRTAG_BTREE_CHECK_SBLOCK,
120 XFS_RANDOM_BTREE_CHECK_SBLOCK))) { 121 XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
121 if (bp) 122 if (bp)
122 xfs_buftrace("SBTREE ERROR", bp); 123 trace_xfs_btree_corrupt(bp, _RET_IP_);
123 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock", 124 XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
124 XFS_ERRLEVEL_LOW, cur->bc_mp, block); 125 XFS_ERRLEVEL_LOW, cur->bc_mp, block);
125 return XFS_ERROR(EFSCORRUPTED); 126 return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h
index b3f5eb3c3c6c..2d8a309873ea 100644
--- a/fs/xfs/xfs_btree_trace.h
+++ b/fs/xfs/xfs_btree_trace.h
@@ -58,8 +58,6 @@ void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
58 struct xfs_buf *, int, int); 58 struct xfs_buf *, int, int);
59void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, 59void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
60 struct xfs_buf *, int, int, int); 60 struct xfs_buf *, int, int, int);
61void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *,
62 xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int);
63void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); 61void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
64void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, 62void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
65 union xfs_btree_ptr, union xfs_btree_key *, int); 63 union xfs_btree_ptr, union xfs_btree_key *, int);
@@ -71,24 +69,10 @@ void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
71 union xfs_btree_rec *, int); 69 union xfs_btree_rec *, int);
72void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); 70void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
73 71
74
75#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */
76extern ktrace_t *xfs_allocbt_trace_buf;
77
78#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */
79extern ktrace_t *xfs_inobt_trace_buf;
80
81#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
82#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
83extern ktrace_t *xfs_bmbt_trace_buf;
84
85
86#define XFS_BTREE_TRACE_ARGBI(c, b, i) \ 72#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
87 xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) 73 xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
88#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ 74#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
89 xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) 75 xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
90#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \
91 xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
92#define XFS_BTREE_TRACE_ARGI(c, i) \ 76#define XFS_BTREE_TRACE_ARGI(c, i) \
93 xfs_btree_trace_argi(__func__, c, i, __LINE__) 77 xfs_btree_trace_argi(__func__, c, i, __LINE__)
94#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ 78#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
@@ -104,7 +88,6 @@ extern ktrace_t *xfs_bmbt_trace_buf;
104#else 88#else
105#define XFS_BTREE_TRACE_ARGBI(c, b, i) 89#define XFS_BTREE_TRACE_ARGBI(c, b, i)
106#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) 90#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
107#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j)
108#define XFS_BTREE_TRACE_ARGI(c, i) 91#define XFS_BTREE_TRACE_ARGI(c, i)
109#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) 92#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
110#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) 93#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 92af4098c7e8..a30f7e9eb2b9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -29,6 +29,7 @@
29#include "xfs_buf_item.h" 29#include "xfs_buf_item.h"
30#include "xfs_trans_priv.h" 30#include "xfs_trans_priv.h"
31#include "xfs_error.h" 31#include "xfs_error.h"
32#include "xfs_trace.h"
32 33
33 34
34kmem_zone_t *xfs_buf_item_zone; 35kmem_zone_t *xfs_buf_item_zone;
@@ -164,7 +165,7 @@ xfs_buf_item_size(
164 * is the buf log format structure with the 165 * is the buf log format structure with the
165 * cancel flag in it. 166 * cancel flag in it.
166 */ 167 */
167 xfs_buf_item_trace("SIZE STALE", bip); 168 trace_xfs_buf_item_size_stale(bip);
168 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 169 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
169 return 1; 170 return 1;
170 } 171 }
@@ -206,7 +207,7 @@ xfs_buf_item_size(
206 } 207 }
207 } 208 }
208 209
209 xfs_buf_item_trace("SIZE NORM", bip); 210 trace_xfs_buf_item_size(bip);
210 return nvecs; 211 return nvecs;
211} 212}
212 213
@@ -259,7 +260,7 @@ xfs_buf_item_format(
259 * is the buf log format structure with the 260 * is the buf log format structure with the
260 * cancel flag in it. 261 * cancel flag in it.
261 */ 262 */
262 xfs_buf_item_trace("FORMAT STALE", bip); 263 trace_xfs_buf_item_format_stale(bip);
263 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 264 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
264 bip->bli_format.blf_size = nvecs; 265 bip->bli_format.blf_size = nvecs;
265 return; 266 return;
@@ -335,7 +336,7 @@ xfs_buf_item_format(
335 /* 336 /*
336 * Check to make sure everything is consistent. 337 * Check to make sure everything is consistent.
337 */ 338 */
338 xfs_buf_item_trace("FORMAT NORM", bip); 339 trace_xfs_buf_item_format(bip);
339 xfs_buf_item_log_check(bip); 340 xfs_buf_item_log_check(bip);
340} 341}
341 342
@@ -355,8 +356,7 @@ xfs_buf_item_pin(
355 ASSERT(atomic_read(&bip->bli_refcount) > 0); 356 ASSERT(atomic_read(&bip->bli_refcount) > 0);
356 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 357 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
357 (bip->bli_flags & XFS_BLI_STALE)); 358 (bip->bli_flags & XFS_BLI_STALE));
358 xfs_buf_item_trace("PIN", bip); 359 trace_xfs_buf_item_pin(bip);
359 xfs_buftrace("XFS_PIN", bp);
360 xfs_bpin(bp); 360 xfs_bpin(bp);
361} 361}
362 362
@@ -383,8 +383,7 @@ xfs_buf_item_unpin(
383 ASSERT(bp != NULL); 383 ASSERT(bp != NULL);
384 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); 384 ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
385 ASSERT(atomic_read(&bip->bli_refcount) > 0); 385 ASSERT(atomic_read(&bip->bli_refcount) > 0);
386 xfs_buf_item_trace("UNPIN", bip); 386 trace_xfs_buf_item_unpin(bip);
387 xfs_buftrace("XFS_UNPIN", bp);
388 387
389 freed = atomic_dec_and_test(&bip->bli_refcount); 388 freed = atomic_dec_and_test(&bip->bli_refcount);
390 ailp = bip->bli_item.li_ailp; 389 ailp = bip->bli_item.li_ailp;
@@ -395,8 +394,8 @@ xfs_buf_item_unpin(
395 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 394 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
396 ASSERT(XFS_BUF_ISSTALE(bp)); 395 ASSERT(XFS_BUF_ISSTALE(bp));
397 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 396 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
398 xfs_buf_item_trace("UNPIN STALE", bip); 397 trace_xfs_buf_item_unpin_stale(bip);
399 xfs_buftrace("XFS_UNPIN STALE", bp); 398
400 /* 399 /*
401 * If we get called here because of an IO error, we may 400 * If we get called here because of an IO error, we may
402 * or may not have the item on the AIL. xfs_trans_ail_delete() 401 * or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -440,8 +439,8 @@ xfs_buf_item_unpin_remove(
440 if ((atomic_read(&bip->bli_refcount) == 1) && 439 if ((atomic_read(&bip->bli_refcount) == 1) &&
441 (bip->bli_flags & XFS_BLI_STALE)) { 440 (bip->bli_flags & XFS_BLI_STALE)) {
442 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); 441 ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
443 xfs_buf_item_trace("UNPIN REMOVE", bip); 442 trace_xfs_buf_item_unpin_stale(bip);
444 xfs_buftrace("XFS_UNPIN_REMOVE", bp); 443
445 /* 444 /*
446 * yes -- clear the xaction descriptor in-use flag 445 * yes -- clear the xaction descriptor in-use flag
447 * and free the chunk if required. We can safely 446 * and free the chunk if required. We can safely
@@ -495,7 +494,7 @@ xfs_buf_item_trylock(
495 XFS_BUF_HOLD(bp); 494 XFS_BUF_HOLD(bp);
496 495
497 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 496 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
498 xfs_buf_item_trace("TRYLOCK SUCCESS", bip); 497 trace_xfs_buf_item_trylock(bip);
499 return XFS_ITEM_SUCCESS; 498 return XFS_ITEM_SUCCESS;
500} 499}
501 500
@@ -524,7 +523,6 @@ xfs_buf_item_unlock(
524 uint hold; 523 uint hold;
525 524
526 bp = bip->bli_buf; 525 bp = bip->bli_buf;
527 xfs_buftrace("XFS_UNLOCK", bp);
528 526
529 /* 527 /*
530 * Clear the buffer's association with this transaction. 528 * Clear the buffer's association with this transaction.
@@ -547,7 +545,7 @@ xfs_buf_item_unlock(
547 */ 545 */
548 if (bip->bli_flags & XFS_BLI_STALE) { 546 if (bip->bli_flags & XFS_BLI_STALE) {
549 bip->bli_flags &= ~XFS_BLI_LOGGED; 547 bip->bli_flags &= ~XFS_BLI_LOGGED;
550 xfs_buf_item_trace("UNLOCK STALE", bip); 548 trace_xfs_buf_item_unlock_stale(bip);
551 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 549 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
552 if (!aborted) 550 if (!aborted)
553 return; 551 return;
@@ -574,7 +572,7 @@ xfs_buf_item_unlock(
574 * release the buffer at the end of this routine. 572 * release the buffer at the end of this routine.
575 */ 573 */
576 hold = bip->bli_flags & XFS_BLI_HOLD; 574 hold = bip->bli_flags & XFS_BLI_HOLD;
577 xfs_buf_item_trace("UNLOCK", bip); 575 trace_xfs_buf_item_unlock(bip);
578 576
579 /* 577 /*
580 * If the buf item isn't tracking any data, free it. 578 * If the buf item isn't tracking any data, free it.
@@ -618,7 +616,8 @@ xfs_buf_item_committed(
618 xfs_buf_log_item_t *bip, 616 xfs_buf_log_item_t *bip,
619 xfs_lsn_t lsn) 617 xfs_lsn_t lsn)
620{ 618{
621 xfs_buf_item_trace("COMMITTED", bip); 619 trace_xfs_buf_item_committed(bip);
620
622 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 621 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
623 (bip->bli_item.li_lsn != 0)) { 622 (bip->bli_item.li_lsn != 0)) {
624 return bip->bli_item.li_lsn; 623 return bip->bli_item.li_lsn;
@@ -640,7 +639,7 @@ xfs_buf_item_push(
640 xfs_buf_t *bp; 639 xfs_buf_t *bp;
641 640
642 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 641 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
643 xfs_buf_item_trace("PUSH", bip); 642 trace_xfs_buf_item_push(bip);
644 643
645 bp = bip->bli_buf; 644 bp = bip->bli_buf;
646 645
@@ -738,9 +737,6 @@ xfs_buf_item_init(
738 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 737 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
739 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 738 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
740 bip->bli_format.blf_map_size = map_size; 739 bip->bli_format.blf_map_size = map_size;
741#ifdef XFS_BLI_TRACE
742 bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
743#endif
744 740
745#ifdef XFS_TRANS_DEBUG 741#ifdef XFS_TRANS_DEBUG
746 /* 742 /*
@@ -878,9 +874,6 @@ xfs_buf_item_free(
878 kmem_free(bip->bli_logged); 874 kmem_free(bip->bli_logged);
879#endif /* XFS_TRANS_DEBUG */ 875#endif /* XFS_TRANS_DEBUG */
880 876
881#ifdef XFS_BLI_TRACE
882 ktrace_free(bip->bli_trace);
883#endif
884 kmem_zone_free(xfs_buf_item_zone, bip); 877 kmem_zone_free(xfs_buf_item_zone, bip);
885} 878}
886 879
@@ -897,7 +890,8 @@ xfs_buf_item_relse(
897{ 890{
898 xfs_buf_log_item_t *bip; 891 xfs_buf_log_item_t *bip;
899 892
900 xfs_buftrace("XFS_RELSE", bp); 893 trace_xfs_buf_item_relse(bp, _RET_IP_);
894
901 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); 895 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
902 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); 896 XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
903 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && 897 if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
@@ -994,7 +988,7 @@ xfs_buf_iodone_callbacks(
994 if (XFS_FORCED_SHUTDOWN(mp)) { 988 if (XFS_FORCED_SHUTDOWN(mp)) {
995 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); 989 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
996 XFS_BUF_SUPER_STALE(bp); 990 XFS_BUF_SUPER_STALE(bp);
997 xfs_buftrace("BUF_IODONE_CB", bp); 991 trace_xfs_buf_item_iodone(bp, _RET_IP_);
998 xfs_buf_do_callbacks(bp, lip); 992 xfs_buf_do_callbacks(bp, lip);
999 XFS_BUF_SET_FSPRIVATE(bp, NULL); 993 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1000 XFS_BUF_CLR_IODONE_FUNC(bp); 994 XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1030,7 +1024,7 @@ xfs_buf_iodone_callbacks(
1030 XFS_BUF_SET_START(bp); 1024 XFS_BUF_SET_START(bp);
1031 } 1025 }
1032 ASSERT(XFS_BUF_IODONE_FUNC(bp)); 1026 ASSERT(XFS_BUF_IODONE_FUNC(bp));
1033 xfs_buftrace("BUF_IODONE ASYNC", bp); 1027 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1034 xfs_buf_relse(bp); 1028 xfs_buf_relse(bp);
1035 } else { 1029 } else {
1036 /* 1030 /*
@@ -1053,9 +1047,7 @@ xfs_buf_iodone_callbacks(
1053 } 1047 }
1054 return; 1048 return;
1055 } 1049 }
1056#ifdef XFSERRORDEBUG 1050
1057 xfs_buftrace("XFS BUFCB NOERR", bp);
1058#endif
1059 xfs_buf_do_callbacks(bp, lip); 1051 xfs_buf_do_callbacks(bp, lip);
1060 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1052 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1061 XFS_BUF_CLR_IODONE_FUNC(bp); 1053 XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1081,7 +1073,9 @@ xfs_buf_error_relse(
1081 XFS_BUF_DONE(bp); 1073 XFS_BUF_DONE(bp);
1082 XFS_BUF_UNDELAYWRITE(bp); 1074 XFS_BUF_UNDELAYWRITE(bp);
1083 XFS_BUF_ERROR(bp,0); 1075 XFS_BUF_ERROR(bp,0);
1084 xfs_buftrace("BUF_ERROR_RELSE", bp); 1076
1077 trace_xfs_buf_error_relse(bp, _RET_IP_);
1078
1085 if (! XFS_FORCED_SHUTDOWN(mp)) 1079 if (! XFS_FORCED_SHUTDOWN(mp))
1086 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1080 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1087 /* 1081 /*
@@ -1128,34 +1122,3 @@ xfs_buf_iodone(
1128 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); 1122 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
1129 xfs_buf_item_free(bip); 1123 xfs_buf_item_free(bip);
1130} 1124}
1131
1132#if defined(XFS_BLI_TRACE)
1133void
1134xfs_buf_item_trace(
1135 char *id,
1136 xfs_buf_log_item_t *bip)
1137{
1138 xfs_buf_t *bp;
1139 ASSERT(bip->bli_trace != NULL);
1140
1141 bp = bip->bli_buf;
1142 ktrace_enter(bip->bli_trace,
1143 (void *)id,
1144 (void *)bip->bli_buf,
1145 (void *)((unsigned long)bip->bli_flags),
1146 (void *)((unsigned long)bip->bli_recur),
1147 (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
1148 (void *)((unsigned long)
1149 (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
1150 (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
1151 (void *)((unsigned long)XFS_BUF_COUNT(bp)),
1152 (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
1153 XFS_BUF_FSPRIVATE(bp, void *),
1154 XFS_BUF_FSPRIVATE2(bp, void *),
1155 (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
1156 (void *)XFS_BUF_IODONE_FUNC(bp),
1157 (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
1158 (void *)bip->bli_item.li_desc,
1159 (void *)((unsigned long)bip->bli_item.li_flags));
1160}
1161#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 5a41c348bb1c..217f34af00cb 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -70,22 +70,21 @@ typedef struct xfs_buf_log_format_t {
70#define XFS_BLI_INODE_ALLOC_BUF 0x10 70#define XFS_BLI_INODE_ALLOC_BUF 0x10
71#define XFS_BLI_STALE_INODE 0x20 71#define XFS_BLI_STALE_INODE 0x20
72 72
73#define XFS_BLI_FLAGS \
74 { XFS_BLI_HOLD, "HOLD" }, \
75 { XFS_BLI_DIRTY, "DIRTY" }, \
76 { XFS_BLI_STALE, "STALE" }, \
77 { XFS_BLI_LOGGED, "LOGGED" }, \
78 { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
79 { XFS_BLI_STALE_INODE, "STALE_INODE" }
80
73 81
74#ifdef __KERNEL__ 82#ifdef __KERNEL__
75 83
76struct xfs_buf; 84struct xfs_buf;
77struct ktrace;
78struct xfs_mount; 85struct xfs_mount;
79struct xfs_buf_log_item; 86struct xfs_buf_log_item;
80 87
81#if defined(XFS_BLI_TRACE)
82#define XFS_BLI_TRACE_SIZE 32
83
84void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
85#else
86#define xfs_buf_item_trace(id, bip)
87#endif
88
89/* 88/*
90 * This is the in core log item structure used to track information 89 * This is the in core log item structure used to track information
91 * needed to log buffers. It tracks how many times the lock has been 90 * needed to log buffers. It tracks how many times the lock has been
@@ -97,9 +96,6 @@ typedef struct xfs_buf_log_item {
97 unsigned int bli_flags; /* misc flags */ 96 unsigned int bli_flags; /* misc flags */
98 unsigned int bli_recur; /* lock recursion count */ 97 unsigned int bli_recur; /* lock recursion count */
99 atomic_t bli_refcount; /* cnt of tp refs */ 98 atomic_t bli_refcount; /* cnt of tp refs */
100#ifdef XFS_BLI_TRACE
101 struct ktrace *bli_trace; /* event trace buf */
102#endif
103#ifdef XFS_TRANS_DEBUG 99#ifdef XFS_TRANS_DEBUG
104 char *bli_orig; /* original buffer copy */ 100 char *bli_orig; /* original buffer copy */
105 char *bli_logged; /* bytes logged (bitmap) */ 101 char *bli_logged; /* bytes logged (bitmap) */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 2847bbc1c534..c0c8869115b1 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -46,6 +46,7 @@
46#include "xfs_dir2_block.h" 46#include "xfs_dir2_block.h"
47#include "xfs_dir2_node.h" 47#include "xfs_dir2_node.h"
48#include "xfs_error.h" 48#include "xfs_error.h"
49#include "xfs_trace.h"
49 50
50/* 51/*
51 * xfs_da_btree.c 52 * xfs_da_btree.c
@@ -2107,7 +2108,7 @@ xfs_da_do_buf(
2107 (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), 2108 (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
2108 mp, XFS_ERRTAG_DA_READ_BUF, 2109 mp, XFS_ERRTAG_DA_READ_BUF,
2109 XFS_RANDOM_DA_READ_BUF))) { 2110 XFS_RANDOM_DA_READ_BUF))) {
2110 xfs_buftrace("DA READ ERROR", rbp->bps[0]); 2111 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2111 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)", 2112 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2112 XFS_ERRLEVEL_LOW, mp, info); 2113 XFS_ERRLEVEL_LOW, mp, info);
2113 error = XFS_ERROR(EFSCORRUPTED); 2114 error = XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 8c536167bf75..30cd08f56a3a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -125,6 +125,13 @@ typedef struct xfs_da_args {
125#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */ 125#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
126#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */ 126#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
127 127
128#define XFS_DA_OP_FLAGS \
129 { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
130 { XFS_DA_OP_RENAME, "RENAME" }, \
131 { XFS_DA_OP_ADDNAME, "ADDNAME" }, \
132 { XFS_DA_OP_OKNOENT, "OKNOENT" }, \
133 { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
134
128/* 135/*
129 * Structure to describe buffer(s) for a block. 136 * Structure to describe buffer(s) for a block.
130 * This is needed in the directory version 2 format case, when 137 * This is needed in the directory version 2 format case, when
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index ab89a7e94a0f..d1483a4f71b8 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -43,6 +43,7 @@
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_rw.h" 44#include "xfs_rw.h"
45#include "xfs_vnodeops.h" 45#include "xfs_vnodeops.h"
46#include "xfs_trace.h"
46 47
47/* 48/*
48 * Syssgi interface for swapext 49 * Syssgi interface for swapext
@@ -168,7 +169,6 @@ xfs_swap_extents(
168 } 169 }
169 170
170 if (VN_CACHED(VFS_I(tip)) != 0) { 171 if (VN_CACHED(VFS_I(tip)) != 0) {
171 xfs_inval_cached_trace(tip, 0, -1, 0, -1);
172 error = xfs_flushinval_pages(tip, 0, -1, 172 error = xfs_flushinval_pages(tip, 0, -1,
173 FI_REMAPF_LOCKED); 173 FI_REMAPF_LOCKED);
174 if (error) 174 if (error)
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index bb1d58eb3982..93634a7e90e9 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -40,9 +40,9 @@
40#include "xfs_dir2_leaf.h" 40#include "xfs_dir2_leaf.h"
41#include "xfs_dir2_block.h" 41#include "xfs_dir2_block.h"
42#include "xfs_dir2_node.h" 42#include "xfs_dir2_node.h"
43#include "xfs_dir2_trace.h"
44#include "xfs_error.h" 43#include "xfs_error.h"
45#include "xfs_vnodeops.h" 44#include "xfs_vnodeops.h"
45#include "xfs_trace.h"
46 46
47struct xfs_name xfs_name_dotdot = {"..", 2}; 47struct xfs_name xfs_name_dotdot = {"..", 2};
48 48
@@ -525,7 +525,8 @@ xfs_dir2_grow_inode(
525 xfs_trans_t *tp; 525 xfs_trans_t *tp;
526 xfs_drfsbno_t nblks; 526 xfs_drfsbno_t nblks;
527 527
528 xfs_dir2_trace_args_s("grow_inode", args, space); 528 trace_xfs_dir2_grow_inode(args, space);
529
529 dp = args->dp; 530 dp = args->dp;
530 tp = args->trans; 531 tp = args->trans;
531 mp = dp->i_mount; 532 mp = dp->i_mount;
@@ -703,7 +704,8 @@ xfs_dir2_shrink_inode(
703 xfs_mount_t *mp; 704 xfs_mount_t *mp;
704 xfs_trans_t *tp; 705 xfs_trans_t *tp;
705 706
706 xfs_dir2_trace_args_db("shrink_inode", args, db, bp); 707 trace_xfs_dir2_shrink_inode(args, db);
708
707 dp = args->dp; 709 dp = args->dp;
708 mp = dp->i_mount; 710 mp = dp->i_mount;
709 tp = args->trans; 711 tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ab52e9e1c1ee..ddc4ecc7807f 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -36,8 +36,8 @@
36#include "xfs_dir2_data.h" 36#include "xfs_dir2_data.h"
37#include "xfs_dir2_leaf.h" 37#include "xfs_dir2_leaf.h"
38#include "xfs_dir2_block.h" 38#include "xfs_dir2_block.h"
39#include "xfs_dir2_trace.h"
40#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_trace.h"
41 41
42/* 42/*
43 * Local function prototypes. 43 * Local function prototypes.
@@ -94,7 +94,8 @@ xfs_dir2_block_addname(
94 __be16 *tagp; /* pointer to tag value */ 94 __be16 *tagp; /* pointer to tag value */
95 xfs_trans_t *tp; /* transaction structure */ 95 xfs_trans_t *tp; /* transaction structure */
96 96
97 xfs_dir2_trace_args("block_addname", args); 97 trace_xfs_dir2_block_addname(args);
98
98 dp = args->dp; 99 dp = args->dp;
99 tp = args->trans; 100 tp = args->trans;
100 mp = dp->i_mount; 101 mp = dp->i_mount;
@@ -590,7 +591,8 @@ xfs_dir2_block_lookup(
590 int error; /* error return value */ 591 int error; /* error return value */
591 xfs_mount_t *mp; /* filesystem mount point */ 592 xfs_mount_t *mp; /* filesystem mount point */
592 593
593 xfs_dir2_trace_args("block_lookup", args); 594 trace_xfs_dir2_block_lookup(args);
595
594 /* 596 /*
595 * Get the buffer, look up the entry. 597 * Get the buffer, look up the entry.
596 * If not found (ENOENT) then return, have no buffer. 598 * If not found (ENOENT) then return, have no buffer.
@@ -747,7 +749,8 @@ xfs_dir2_block_removename(
747 int size; /* shortform size */ 749 int size; /* shortform size */
748 xfs_trans_t *tp; /* transaction pointer */ 750 xfs_trans_t *tp; /* transaction pointer */
749 751
750 xfs_dir2_trace_args("block_removename", args); 752 trace_xfs_dir2_block_removename(args);
753
751 /* 754 /*
752 * Look up the entry in the block. Gets the buffer and entry index. 755 * Look up the entry in the block. Gets the buffer and entry index.
753 * It will always be there, the vnodeops level does a lookup first. 756 * It will always be there, the vnodeops level does a lookup first.
@@ -823,7 +826,8 @@ xfs_dir2_block_replace(
823 int error; /* error return value */ 826 int error; /* error return value */
824 xfs_mount_t *mp; /* filesystem mount point */ 827 xfs_mount_t *mp; /* filesystem mount point */
825 828
826 xfs_dir2_trace_args("block_replace", args); 829 trace_xfs_dir2_block_replace(args);
830
827 /* 831 /*
828 * Lookup the entry in the directory. Get buffer and entry index. 832 * Lookup the entry in the directory. Get buffer and entry index.
829 * This will always succeed since the caller has already done a lookup. 833 * This will always succeed since the caller has already done a lookup.
@@ -897,7 +901,8 @@ xfs_dir2_leaf_to_block(
897 int to; /* block/leaf to index */ 901 int to; /* block/leaf to index */
898 xfs_trans_t *tp; /* transaction pointer */ 902 xfs_trans_t *tp; /* transaction pointer */
899 903
900 xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp); 904 trace_xfs_dir2_leaf_to_block(args);
905
901 dp = args->dp; 906 dp = args->dp;
902 tp = args->trans; 907 tp = args->trans;
903 mp = dp->i_mount; 908 mp = dp->i_mount;
@@ -1044,7 +1049,8 @@ xfs_dir2_sf_to_block(
1044 xfs_trans_t *tp; /* transaction pointer */ 1049 xfs_trans_t *tp; /* transaction pointer */
1045 struct xfs_name name; 1050 struct xfs_name name;
1046 1051
1047 xfs_dir2_trace_args("sf_to_block", args); 1052 trace_xfs_dir2_sf_to_block(args);
1053
1048 dp = args->dp; 1054 dp = args->dp;
1049 tp = args->trans; 1055 tp = args->trans;
1050 mp = dp->i_mount; 1056 mp = dp->i_mount;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 41ad537c49e9..29f484c11b3a 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,8 +38,8 @@
38#include "xfs_dir2_leaf.h" 38#include "xfs_dir2_leaf.h"
39#include "xfs_dir2_block.h" 39#include "xfs_dir2_block.h"
40#include "xfs_dir2_node.h" 40#include "xfs_dir2_node.h"
41#include "xfs_dir2_trace.h"
42#include "xfs_error.h" 41#include "xfs_error.h"
42#include "xfs_trace.h"
43 43
44/* 44/*
45 * Local function declarations. 45 * Local function declarations.
@@ -80,7 +80,8 @@ xfs_dir2_block_to_leaf(
80 int needscan; /* need to rescan bestfree */ 80 int needscan; /* need to rescan bestfree */
81 xfs_trans_t *tp; /* transaction pointer */ 81 xfs_trans_t *tp; /* transaction pointer */
82 82
83 xfs_dir2_trace_args_b("block_to_leaf", args, dbp); 83 trace_xfs_dir2_block_to_leaf(args);
84
84 dp = args->dp; 85 dp = args->dp;
85 mp = dp->i_mount; 86 mp = dp->i_mount;
86 tp = args->trans; 87 tp = args->trans;
@@ -188,7 +189,8 @@ xfs_dir2_leaf_addname(
188 xfs_trans_t *tp; /* transaction pointer */ 189 xfs_trans_t *tp; /* transaction pointer */
189 xfs_dir2_db_t use_block; /* data block number */ 190 xfs_dir2_db_t use_block; /* data block number */
190 191
191 xfs_dir2_trace_args("leaf_addname", args); 192 trace_xfs_dir2_leaf_addname(args);
193
192 dp = args->dp; 194 dp = args->dp;
193 tp = args->trans; 195 tp = args->trans;
194 mp = dp->i_mount; 196 mp = dp->i_mount;
@@ -1266,7 +1268,8 @@ xfs_dir2_leaf_lookup(
1266 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1268 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1267 xfs_trans_t *tp; /* transaction pointer */ 1269 xfs_trans_t *tp; /* transaction pointer */
1268 1270
1269 xfs_dir2_trace_args("leaf_lookup", args); 1271 trace_xfs_dir2_leaf_lookup(args);
1272
1270 /* 1273 /*
1271 * Look up name in the leaf block, returning both buffers and index. 1274 * Look up name in the leaf block, returning both buffers and index.
1272 */ 1275 */
@@ -1454,7 +1457,8 @@ xfs_dir2_leaf_removename(
1454 xfs_dir2_data_off_t oldbest; /* old value of best free */ 1457 xfs_dir2_data_off_t oldbest; /* old value of best free */
1455 xfs_trans_t *tp; /* transaction pointer */ 1458 xfs_trans_t *tp; /* transaction pointer */
1456 1459
1457 xfs_dir2_trace_args("leaf_removename", args); 1460 trace_xfs_dir2_leaf_removename(args);
1461
1458 /* 1462 /*
1459 * Lookup the leaf entry, get the leaf and data blocks read in. 1463 * Lookup the leaf entry, get the leaf and data blocks read in.
1460 */ 1464 */
@@ -1586,7 +1590,8 @@ xfs_dir2_leaf_replace(
1586 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1590 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1587 xfs_trans_t *tp; /* transaction pointer */ 1591 xfs_trans_t *tp; /* transaction pointer */
1588 1592
1589 xfs_dir2_trace_args("leaf_replace", args); 1593 trace_xfs_dir2_leaf_replace(args);
1594
1590 /* 1595 /*
1591 * Look up the entry. 1596 * Look up the entry.
1592 */ 1597 */
@@ -1766,7 +1771,9 @@ xfs_dir2_node_to_leaf(
1766 if (state->path.active > 1) 1771 if (state->path.active > 1)
1767 return 0; 1772 return 0;
1768 args = state->args; 1773 args = state->args;
1769 xfs_dir2_trace_args("node_to_leaf", args); 1774
1775 trace_xfs_dir2_node_to_leaf(args);
1776
1770 mp = state->mp; 1777 mp = state->mp;
1771 dp = args->dp; 1778 dp = args->dp;
1772 tp = args->trans; 1779 tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 5a81ccd1045b..ce6e355199b5 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -37,8 +37,8 @@
37#include "xfs_dir2_leaf.h" 37#include "xfs_dir2_leaf.h"
38#include "xfs_dir2_block.h" 38#include "xfs_dir2_block.h"
39#include "xfs_dir2_node.h" 39#include "xfs_dir2_node.h"
40#include "xfs_dir2_trace.h"
41#include "xfs_error.h" 40#include "xfs_error.h"
41#include "xfs_trace.h"
42 42
43/* 43/*
44 * Function declarations. 44 * Function declarations.
@@ -123,7 +123,8 @@ xfs_dir2_leaf_to_node(
123 __be16 *to; /* pointer to freespace entry */ 123 __be16 *to; /* pointer to freespace entry */
124 xfs_trans_t *tp; /* transaction pointer */ 124 xfs_trans_t *tp; /* transaction pointer */
125 125
126 xfs_dir2_trace_args_b("leaf_to_node", args, lbp); 126 trace_xfs_dir2_leaf_to_node(args);
127
127 dp = args->dp; 128 dp = args->dp;
128 mp = dp->i_mount; 129 mp = dp->i_mount;
129 tp = args->trans; 130 tp = args->trans;
@@ -196,7 +197,8 @@ xfs_dir2_leafn_add(
196 xfs_mount_t *mp; /* filesystem mount point */ 197 xfs_mount_t *mp; /* filesystem mount point */
197 xfs_trans_t *tp; /* transaction pointer */ 198 xfs_trans_t *tp; /* transaction pointer */
198 199
199 xfs_dir2_trace_args_sb("leafn_add", args, index, bp); 200 trace_xfs_dir2_leafn_add(args, index);
201
200 dp = args->dp; 202 dp = args->dp;
201 mp = dp->i_mount; 203 mp = dp->i_mount;
202 tp = args->trans; 204 tp = args->trans;
@@ -711,8 +713,8 @@ xfs_dir2_leafn_moveents(
711 int stale; /* count stale leaves copied */ 713 int stale; /* count stale leaves copied */
712 xfs_trans_t *tp; /* transaction pointer */ 714 xfs_trans_t *tp; /* transaction pointer */
713 715
714 xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d, 716 trace_xfs_dir2_leafn_moveents(args, start_s, start_d, count);
715 start_d, count); 717
716 /* 718 /*
717 * Silently return if nothing to do. 719 * Silently return if nothing to do.
718 */ 720 */
@@ -933,7 +935,8 @@ xfs_dir2_leafn_remove(
933 int needscan; /* need to rescan data frees */ 935 int needscan; /* need to rescan data frees */
934 xfs_trans_t *tp; /* transaction pointer */ 936 xfs_trans_t *tp; /* transaction pointer */
935 937
936 xfs_dir2_trace_args_sb("leafn_remove", args, index, bp); 938 trace_xfs_dir2_leafn_remove(args, index);
939
937 dp = args->dp; 940 dp = args->dp;
938 tp = args->trans; 941 tp = args->trans;
939 mp = dp->i_mount; 942 mp = dp->i_mount;
@@ -1363,7 +1366,8 @@ xfs_dir2_node_addname(
1363 int rval; /* sub-return value */ 1366 int rval; /* sub-return value */
1364 xfs_da_state_t *state; /* btree cursor */ 1367 xfs_da_state_t *state; /* btree cursor */
1365 1368
1366 xfs_dir2_trace_args("node_addname", args); 1369 trace_xfs_dir2_node_addname(args);
1370
1367 /* 1371 /*
1368 * Allocate and initialize the state (btree cursor). 1372 * Allocate and initialize the state (btree cursor).
1369 */ 1373 */
@@ -1822,7 +1826,8 @@ xfs_dir2_node_lookup(
1822 int rval; /* operation return value */ 1826 int rval; /* operation return value */
1823 xfs_da_state_t *state; /* btree cursor */ 1827 xfs_da_state_t *state; /* btree cursor */
1824 1828
1825 xfs_dir2_trace_args("node_lookup", args); 1829 trace_xfs_dir2_node_lookup(args);
1830
1826 /* 1831 /*
1827 * Allocate and initialize the btree cursor. 1832 * Allocate and initialize the btree cursor.
1828 */ 1833 */
@@ -1875,7 +1880,8 @@ xfs_dir2_node_removename(
1875 int rval; /* operation return value */ 1880 int rval; /* operation return value */
1876 xfs_da_state_t *state; /* btree cursor */ 1881 xfs_da_state_t *state; /* btree cursor */
1877 1882
1878 xfs_dir2_trace_args("node_removename", args); 1883 trace_xfs_dir2_node_removename(args);
1884
1879 /* 1885 /*
1880 * Allocate and initialize the btree cursor. 1886 * Allocate and initialize the btree cursor.
1881 */ 1887 */
@@ -1944,7 +1950,8 @@ xfs_dir2_node_replace(
1944 int rval; /* internal return value */ 1950 int rval; /* internal return value */
1945 xfs_da_state_t *state; /* btree cursor */ 1951 xfs_da_state_t *state; /* btree cursor */
1946 1952
1947 xfs_dir2_trace_args("node_replace", args); 1953 trace_xfs_dir2_node_replace(args);
1954
1948 /* 1955 /*
1949 * Allocate and initialize the btree cursor. 1956 * Allocate and initialize the btree cursor.
1950 */ 1957 */
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index e89734e84646..9d4f17a69676 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -37,7 +37,7 @@
37#include "xfs_dir2_data.h" 37#include "xfs_dir2_data.h"
38#include "xfs_dir2_leaf.h" 38#include "xfs_dir2_leaf.h"
39#include "xfs_dir2_block.h" 39#include "xfs_dir2_block.h"
40#include "xfs_dir2_trace.h" 40#include "xfs_trace.h"
41 41
42/* 42/*
43 * Prototypes for internal functions. 43 * Prototypes for internal functions.
@@ -169,7 +169,8 @@ xfs_dir2_block_to_sf(
169 xfs_dir2_sf_t *sfp; /* shortform structure */ 169 xfs_dir2_sf_t *sfp; /* shortform structure */
170 xfs_ino_t temp; 170 xfs_ino_t temp;
171 171
172 xfs_dir2_trace_args_sb("block_to_sf", args, size, bp); 172 trace_xfs_dir2_block_to_sf(args);
173
173 dp = args->dp; 174 dp = args->dp;
174 mp = dp->i_mount; 175 mp = dp->i_mount;
175 176
@@ -281,7 +282,8 @@ xfs_dir2_sf_addname(
281 xfs_dir2_sf_t *sfp; /* shortform structure */ 282 xfs_dir2_sf_t *sfp; /* shortform structure */
282 xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */ 283 xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
283 284
284 xfs_dir2_trace_args("sf_addname", args); 285 trace_xfs_dir2_sf_addname(args);
286
285 ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); 287 ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
286 dp = args->dp; 288 dp = args->dp;
287 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 289 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -654,7 +656,8 @@ xfs_dir2_sf_create(
654 xfs_dir2_sf_t *sfp; /* shortform structure */ 656 xfs_dir2_sf_t *sfp; /* shortform structure */
655 int size; /* directory size */ 657 int size; /* directory size */
656 658
657 xfs_dir2_trace_args_i("sf_create", args, pino); 659 trace_xfs_dir2_sf_create(args);
660
658 dp = args->dp; 661 dp = args->dp;
659 662
660 ASSERT(dp != NULL); 663 ASSERT(dp != NULL);
@@ -808,7 +811,8 @@ xfs_dir2_sf_lookup(
808 enum xfs_dacmp cmp; /* comparison result */ 811 enum xfs_dacmp cmp; /* comparison result */
809 xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */ 812 xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
810 813
811 xfs_dir2_trace_args("sf_lookup", args); 814 trace_xfs_dir2_sf_lookup(args);
815
812 xfs_dir2_sf_check(args); 816 xfs_dir2_sf_check(args);
813 dp = args->dp; 817 dp = args->dp;
814 818
@@ -891,7 +895,8 @@ xfs_dir2_sf_removename(
891 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 895 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
892 xfs_dir2_sf_t *sfp; /* shortform structure */ 896 xfs_dir2_sf_t *sfp; /* shortform structure */
893 897
894 xfs_dir2_trace_args("sf_removename", args); 898 trace_xfs_dir2_sf_removename(args);
899
895 dp = args->dp; 900 dp = args->dp;
896 901
897 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 902 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -982,7 +987,8 @@ xfs_dir2_sf_replace(
982 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 987 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
983 xfs_dir2_sf_t *sfp; /* shortform structure */ 988 xfs_dir2_sf_t *sfp; /* shortform structure */
984 989
985 xfs_dir2_trace_args("sf_replace", args); 990 trace_xfs_dir2_sf_replace(args);
991
986 dp = args->dp; 992 dp = args->dp;
987 993
988 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 994 ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -1125,7 +1131,8 @@ xfs_dir2_sf_toino4(
1125 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1131 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1126 xfs_dir2_sf_t *sfp; /* new sf directory */ 1132 xfs_dir2_sf_t *sfp; /* new sf directory */
1127 1133
1128 xfs_dir2_trace_args("sf_toino4", args); 1134 trace_xfs_dir2_sf_toino4(args);
1135
1129 dp = args->dp; 1136 dp = args->dp;
1130 1137
1131 /* 1138 /*
@@ -1202,7 +1209,8 @@ xfs_dir2_sf_toino8(
1202 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1209 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1203 xfs_dir2_sf_t *sfp; /* new sf directory */ 1210 xfs_dir2_sf_t *sfp; /* new sf directory */
1204 1211
1205 xfs_dir2_trace_args("sf_toino8", args); 1212 trace_xfs_dir2_sf_toino8(args);
1213
1206 dp = args->dp; 1214 dp = args->dp;
1207 1215
1208 /* 1216 /*
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
deleted file mode 100644
index 6cc7c0c681ac..000000000000
--- a/fs/xfs/xfs_dir2_trace.c
+++ /dev/null
@@ -1,216 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_inum.h"
22#include "xfs_dir2.h"
23#include "xfs_da_btree.h"
24#include "xfs_bmap_btree.h"
25#include "xfs_dir2_sf.h"
26#include "xfs_attr_sf.h"
27#include "xfs_dinode.h"
28#include "xfs_inode.h"
29#include "xfs_dir2_trace.h"
30
31#ifdef XFS_DIR2_TRACE
32ktrace_t *xfs_dir2_trace_buf;
33
34/*
35 * Enter something in the trace buffers.
36 */
37static void
38xfs_dir2_trace_enter(
39 xfs_inode_t *dp,
40 int type,
41 char *where,
42 char *name,
43 int namelen,
44 void *a0,
45 void *a1,
46 void *a2,
47 void *a3,
48 void *a4,
49 void *a5,
50 void *a6,
51 void *a7)
52{
53 void *n[5];
54
55 ASSERT(xfs_dir2_trace_buf);
56 ASSERT(dp->i_dir_trace);
57 if (name)
58 memcpy(n, name, min((int)sizeof(n), namelen));
59 else
60 memset((char *)n, 0, sizeof(n));
61 ktrace_enter(xfs_dir2_trace_buf,
62 (void *)(long)type, (void *)where,
63 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
64 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
65 (void *)(long)namelen,
66 (void *)n[0], (void *)n[1], (void *)n[2],
67 (void *)n[3], (void *)n[4]);
68 ktrace_enter(dp->i_dir_trace,
69 (void *)(long)type, (void *)where,
70 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
71 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
72 (void *)(long)namelen,
73 (void *)n[0], (void *)n[1], (void *)n[2],
74 (void *)n[3], (void *)n[4]);
75}
76
77void
78xfs_dir2_trace_args(
79 char *where,
80 xfs_da_args_t *args)
81{
82 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where,
83 (char *)args->name, (int)args->namelen,
84 (void *)(unsigned long)args->hashval,
85 (void *)((unsigned long)(args->inumber >> 32)),
86 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
87 (void *)args->dp, (void *)args->trans,
88 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
89 NULL, NULL);
90}
91
92void
93xfs_dir2_trace_args_b(
94 char *where,
95 xfs_da_args_t *args,
96 xfs_dabuf_t *bp)
97{
98 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where,
99 (char *)args->name, (int)args->namelen,
100 (void *)(unsigned long)args->hashval,
101 (void *)((unsigned long)(args->inumber >> 32)),
102 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
103 (void *)args->dp, (void *)args->trans,
104 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
105 (void *)(bp ? bp->bps[0] : NULL), NULL);
106}
107
108void
109xfs_dir2_trace_args_bb(
110 char *where,
111 xfs_da_args_t *args,
112 xfs_dabuf_t *lbp,
113 xfs_dabuf_t *dbp)
114{
115 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where,
116 (char *)args->name, (int)args->namelen,
117 (void *)(unsigned long)args->hashval,
118 (void *)((unsigned long)(args->inumber >> 32)),
119 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
120 (void *)args->dp, (void *)args->trans,
121 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
122 (void *)(lbp ? lbp->bps[0] : NULL),
123 (void *)(dbp ? dbp->bps[0] : NULL));
124}
125
126void
127xfs_dir2_trace_args_bibii(
128 char *where,
129 xfs_da_args_t *args,
130 xfs_dabuf_t *bs,
131 int ss,
132 xfs_dabuf_t *bd,
133 int sd,
134 int c)
135{
136 xfs_buf_t *bpbs = bs ? bs->bps[0] : NULL;
137 xfs_buf_t *bpbd = bd ? bd->bps[0] : NULL;
138
139 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where,
140 (char *)args->name, (int)args->namelen,
141 (void *)args->dp, (void *)args->trans,
142 (void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd,
143 (void *)(long)c, NULL);
144}
145
146void
147xfs_dir2_trace_args_db(
148 char *where,
149 xfs_da_args_t *args,
150 xfs_dir2_db_t db,
151 xfs_dabuf_t *bp)
152{
153 xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
154
155 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where,
156 (char *)args->name, (int)args->namelen,
157 (void *)(unsigned long)args->hashval,
158 (void *)((unsigned long)(args->inumber >> 32)),
159 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
160 (void *)args->dp, (void *)args->trans,
161 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
162 (void *)(long)db, (void *)dbp);
163}
164
165void
166xfs_dir2_trace_args_i(
167 char *where,
168 xfs_da_args_t *args,
169 xfs_ino_t i)
170{
171 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where,
172 (char *)args->name, (int)args->namelen,
173 (void *)(unsigned long)args->hashval,
174 (void *)((unsigned long)(args->inumber >> 32)),
175 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
176 (void *)args->dp, (void *)args->trans,
177 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
178 (void *)((unsigned long)(i >> 32)),
179 (void *)((unsigned long)(i & 0xFFFFFFFF)));
180}
181
182void
183xfs_dir2_trace_args_s(
184 char *where,
185 xfs_da_args_t *args,
186 int s)
187{
188 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where,
189 (char *)args->name, (int)args->namelen,
190 (void *)(unsigned long)args->hashval,
191 (void *)((unsigned long)(args->inumber >> 32)),
192 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
193 (void *)args->dp, (void *)args->trans,
194 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
195 (void *)(long)s, NULL);
196}
197
198void
199xfs_dir2_trace_args_sb(
200 char *where,
201 xfs_da_args_t *args,
202 int s,
203 xfs_dabuf_t *bp)
204{
205 xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
206
207 xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where,
208 (char *)args->name, (int)args->namelen,
209 (void *)(unsigned long)args->hashval,
210 (void *)((unsigned long)(args->inumber >> 32)),
211 (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
212 (void *)args->dp, (void *)args->trans,
213 (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
214 (void *)(long)s, (void *)dbp);
215}
216#endif /* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h
deleted file mode 100644
index ca3c754f4822..000000000000
--- a/fs/xfs/xfs_dir2_trace.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright (c) 2000,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DIR2_TRACE_H__
19#define __XFS_DIR2_TRACE_H__
20
21/*
22 * Tracing for xfs v2 directories.
23 */
24
25#if defined(XFS_DIR2_TRACE)
26
27struct ktrace;
28struct xfs_dabuf;
29struct xfs_da_args;
30
31#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */
32#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */
33extern struct ktrace *xfs_dir2_trace_buf;
34
35#define XFS_DIR2_KTRACE_ARGS 1 /* args only */
36#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */
37#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */
38#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */
39#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */
40#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */
41#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */
42#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */
43
44void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
45void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
46 struct xfs_dabuf *bp);
47void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args,
48 struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
49void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args,
50 struct xfs_dabuf *bs, int ss,
51 struct xfs_dabuf *bd, int sd, int c);
52void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args,
53 xfs_dir2_db_t db, struct xfs_dabuf *bp);
54void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i);
55void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s);
56void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
57 struct xfs_dabuf *bp);
58
59#else /* XFS_DIR2_TRACE */
60
61#define xfs_dir2_trace_args(where, args)
62#define xfs_dir2_trace_args_b(where, args, bp)
63#define xfs_dir2_trace_args_bb(where, args, lbp, dbp)
64#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c)
65#define xfs_dir2_trace_args_db(where, args, db, bp)
66#define xfs_dir2_trace_args_i(where, args, i)
67#define xfs_dir2_trace_args_s(where, args, s)
68#define xfs_dir2_trace_args_sb(where, args, s, bp)
69
70#endif /* XFS_DIR2_TRACE */
71
72#endif /* __XFS_DIR2_TRACE_H__ */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index edf8bdf4141f..a631e1451abb 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -34,6 +34,7 @@
34#include "xfs_utils.h" 34#include "xfs_utils.h"
35#include "xfs_mru_cache.h" 35#include "xfs_mru_cache.h"
36#include "xfs_filestream.h" 36#include "xfs_filestream.h"
37#include "xfs_trace.h"
37 38
38#ifdef XFS_FILESTREAMS_TRACE 39#ifdef XFS_FILESTREAMS_TRACE
39 40
@@ -394,9 +395,7 @@ xfs_filestream_init(void)
394 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item"); 395 item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
395 if (!item_zone) 396 if (!item_zone)
396 return -ENOMEM; 397 return -ENOMEM;
397#ifdef XFS_FILESTREAMS_TRACE 398
398 xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
399#endif
400 return 0; 399 return 0;
401} 400}
402 401
@@ -407,9 +406,6 @@ xfs_filestream_init(void)
407void 406void
408xfs_filestream_uninit(void) 407xfs_filestream_uninit(void)
409{ 408{
410#ifdef XFS_FILESTREAMS_TRACE
411 ktrace_free(xfs_filestreams_trace_buf);
412#endif
413 kmem_zone_destroy(item_zone); 409 kmem_zone_destroy(item_zone);
414} 410}
415 411
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 36079aa91344..a13919a6a364 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -45,6 +45,7 @@
45#include "xfs_rtalloc.h" 45#include "xfs_rtalloc.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47#include "xfs_filestream.h" 47#include "xfs_filestream.h"
48#include "xfs_trace.h"
48 49
49/* 50/*
50 * File system operations 51 * File system operations
@@ -347,6 +348,7 @@ xfs_growfs_data_private(
347 be32_add_cpu(&agf->agf_length, new); 348 be32_add_cpu(&agf->agf_length, new);
348 ASSERT(be32_to_cpu(agf->agf_length) == 349 ASSERT(be32_to_cpu(agf->agf_length) ==
349 be32_to_cpu(agi->agi_length)); 350 be32_to_cpu(agi->agi_length));
351
350 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 352 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
351 /* 353 /*
352 * Free the new space. 354 * Free the new space.
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 073bb4a26b19..f5c904a10c11 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,7 +43,7 @@
43#include "xfs_inode_item.h" 43#include "xfs_inode_item.h"
44#include "xfs_bmap.h" 44#include "xfs_bmap.h"
45#include "xfs_btree_trace.h" 45#include "xfs_btree_trace.h"
46#include "xfs_dir2_trace.h" 46#include "xfs_trace.h"
47 47
48 48
49/* 49/*
@@ -90,28 +90,6 @@ xfs_inode_alloc(
90 ip->i_size = 0; 90 ip->i_size = 0;
91 ip->i_new_size = 0; 91 ip->i_new_size = 0;
92 92
93 /*
94 * Initialize inode's trace buffers.
95 */
96#ifdef XFS_INODE_TRACE
97 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
98#endif
99#ifdef XFS_BMAP_TRACE
100 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
101#endif
102#ifdef XFS_BTREE_TRACE
103 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
104#endif
105#ifdef XFS_RW_TRACE
106 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
107#endif
108#ifdef XFS_ILOCK_TRACE
109 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
110#endif
111#ifdef XFS_DIR2_TRACE
112 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
113#endif
114
115 /* prevent anyone from using this yet */ 93 /* prevent anyone from using this yet */
116 VFS_I(ip)->i_state = I_NEW|I_LOCK; 94 VFS_I(ip)->i_state = I_NEW|I_LOCK;
117 95
@@ -133,25 +111,6 @@ xfs_inode_free(
133 if (ip->i_afp) 111 if (ip->i_afp)
134 xfs_idestroy_fork(ip, XFS_ATTR_FORK); 112 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
135 113
136#ifdef XFS_INODE_TRACE
137 ktrace_free(ip->i_trace);
138#endif
139#ifdef XFS_BMAP_TRACE
140 ktrace_free(ip->i_xtrace);
141#endif
142#ifdef XFS_BTREE_TRACE
143 ktrace_free(ip->i_btrace);
144#endif
145#ifdef XFS_RW_TRACE
146 ktrace_free(ip->i_rwtrace);
147#endif
148#ifdef XFS_ILOCK_TRACE
149 ktrace_free(ip->i_lock_trace);
150#endif
151#ifdef XFS_DIR2_TRACE
152 ktrace_free(ip->i_dir_trace);
153#endif
154
155 if (ip->i_itemp) { 114 if (ip->i_itemp) {
156 /* 115 /*
157 * Only if we are shutting down the fs will we see an 116 * Only if we are shutting down the fs will we see an
@@ -210,6 +169,7 @@ xfs_iget_cache_hit(
210 * instead of polling for it. 169 * instead of polling for it.
211 */ 170 */
212 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 171 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
172 trace_xfs_iget_skip(ip);
213 XFS_STATS_INC(xs_ig_frecycle); 173 XFS_STATS_INC(xs_ig_frecycle);
214 error = EAGAIN; 174 error = EAGAIN;
215 goto out_error; 175 goto out_error;
@@ -228,7 +188,7 @@ xfs_iget_cache_hit(
228 * Need to carefully get it back into useable state. 188 * Need to carefully get it back into useable state.
229 */ 189 */
230 if (ip->i_flags & XFS_IRECLAIMABLE) { 190 if (ip->i_flags & XFS_IRECLAIMABLE) {
231 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 191 trace_xfs_iget_reclaim(ip);
232 192
233 /* 193 /*
234 * We need to set XFS_INEW atomically with clearing the 194 * We need to set XFS_INEW atomically with clearing the
@@ -254,6 +214,7 @@ xfs_iget_cache_hit(
254 ip->i_flags &= ~XFS_INEW; 214 ip->i_flags &= ~XFS_INEW;
255 ip->i_flags |= XFS_IRECLAIMABLE; 215 ip->i_flags |= XFS_IRECLAIMABLE;
256 __xfs_inode_set_reclaim_tag(pag, ip); 216 __xfs_inode_set_reclaim_tag(pag, ip);
217 trace_xfs_iget_reclaim(ip);
257 goto out_error; 218 goto out_error;
258 } 219 }
259 inode->i_state = I_LOCK|I_NEW; 220 inode->i_state = I_LOCK|I_NEW;
@@ -273,8 +234,9 @@ xfs_iget_cache_hit(
273 xfs_ilock(ip, lock_flags); 234 xfs_ilock(ip, lock_flags);
274 235
275 xfs_iflags_clear(ip, XFS_ISTALE); 236 xfs_iflags_clear(ip, XFS_ISTALE);
276 xfs_itrace_exit_tag(ip, "xfs_iget.found");
277 XFS_STATS_INC(xs_ig_found); 237 XFS_STATS_INC(xs_ig_found);
238
239 trace_xfs_iget_found(ip);
278 return 0; 240 return 0;
279 241
280out_error: 242out_error:
@@ -308,7 +270,7 @@ xfs_iget_cache_miss(
308 if (error) 270 if (error)
309 goto out_destroy; 271 goto out_destroy;
310 272
311 xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 273 xfs_itrace_entry(ip);
312 274
313 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { 275 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
314 error = ENOENT; 276 error = ENOENT;
@@ -353,6 +315,8 @@ xfs_iget_cache_miss(
353 315
354 write_unlock(&pag->pag_ici_lock); 316 write_unlock(&pag->pag_ici_lock);
355 radix_tree_preload_end(); 317 radix_tree_preload_end();
318
319 trace_xfs_iget_alloc(ip);
356 *ipp = ip; 320 *ipp = ip;
357 return 0; 321 return 0;
358 322
@@ -639,7 +603,7 @@ xfs_ilock(
639 else if (lock_flags & XFS_ILOCK_SHARED) 603 else if (lock_flags & XFS_ILOCK_SHARED)
640 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); 604 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
641 605
642 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); 606 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
643} 607}
644 608
645/* 609/*
@@ -684,7 +648,7 @@ xfs_ilock_nowait(
684 if (!mrtryaccess(&ip->i_lock)) 648 if (!mrtryaccess(&ip->i_lock))
685 goto out_undo_iolock; 649 goto out_undo_iolock;
686 } 650 }
687 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); 651 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
688 return 1; 652 return 1;
689 653
690 out_undo_iolock: 654 out_undo_iolock:
@@ -746,7 +710,7 @@ xfs_iunlock(
746 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, 710 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
747 (xfs_log_item_t*)(ip->i_itemp)); 711 (xfs_log_item_t*)(ip->i_itemp));
748 } 712 }
749 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); 713 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
750} 714}
751 715
752/* 716/*
@@ -765,6 +729,8 @@ xfs_ilock_demote(
765 mrdemote(&ip->i_lock); 729 mrdemote(&ip->i_lock);
766 if (lock_flags & XFS_IOLOCK_EXCL) 730 if (lock_flags & XFS_IOLOCK_EXCL)
767 mrdemote(&ip->i_iolock); 731 mrdemote(&ip->i_iolock);
732
733 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
768} 734}
769 735
770#ifdef DEBUG 736#ifdef DEBUG
@@ -795,52 +761,3 @@ xfs_isilocked(
795 return 1; 761 return 1;
796} 762}
797#endif 763#endif
798
799#ifdef XFS_INODE_TRACE
800
801#define KTRACE_ENTER(ip, vk, s, line, ra) \
802 ktrace_enter((ip)->i_trace, \
803/* 0 */ (void *)(__psint_t)(vk), \
804/* 1 */ (void *)(s), \
805/* 2 */ (void *)(__psint_t) line, \
806/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
807/* 4 */ (void *)(ra), \
808/* 5 */ NULL, \
809/* 6 */ (void *)(__psint_t)current_cpu(), \
810/* 7 */ (void *)(__psint_t)current_pid(), \
811/* 8 */ (void *)__return_address, \
812/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
813
814/*
815 * Vnode tracing code.
816 */
817void
818_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
819{
820 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
821}
822
823void
824_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
825{
826 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
827}
828
829void
830xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
831{
832 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
833}
834
835void
836_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
837{
838 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
839}
840
841void
842xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
843{
844 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
845}
846#endif /* XFS_INODE_TRACE */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b92a4fa2a0a1..ce278b3ae7fc 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -47,10 +47,10 @@
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_error.h" 48#include "xfs_error.h"
49#include "xfs_utils.h" 49#include "xfs_utils.h"
50#include "xfs_dir2_trace.h"
51#include "xfs_quota.h" 50#include "xfs_quota.h"
52#include "xfs_filestream.h" 51#include "xfs_filestream.h"
53#include "xfs_vnodeops.h" 52#include "xfs_vnodeops.h"
53#include "xfs_trace.h"
54 54
55kmem_zone_t *xfs_ifork_zone; 55kmem_zone_t *xfs_ifork_zone;
56kmem_zone_t *xfs_inode_zone; 56kmem_zone_t *xfs_inode_zone;
@@ -1291,42 +1291,6 @@ xfs_file_last_byte(
1291 return last_byte; 1291 return last_byte;
1292} 1292}
1293 1293
1294#if defined(XFS_RW_TRACE)
1295STATIC void
1296xfs_itrunc_trace(
1297 int tag,
1298 xfs_inode_t *ip,
1299 int flag,
1300 xfs_fsize_t new_size,
1301 xfs_off_t toss_start,
1302 xfs_off_t toss_finish)
1303{
1304 if (ip->i_rwtrace == NULL) {
1305 return;
1306 }
1307
1308 ktrace_enter(ip->i_rwtrace,
1309 (void*)((long)tag),
1310 (void*)ip,
1311 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1312 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1313 (void*)((long)flag),
1314 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1315 (void*)(unsigned long)(new_size & 0xffffffff),
1316 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1317 (void*)(unsigned long)(toss_start & 0xffffffff),
1318 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1319 (void*)(unsigned long)(toss_finish & 0xffffffff),
1320 (void*)(unsigned long)current_cpu(),
1321 (void*)(unsigned long)current_pid(),
1322 (void*)NULL,
1323 (void*)NULL,
1324 (void*)NULL);
1325}
1326#else
1327#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1328#endif
1329
1330/* 1294/*
1331 * Start the truncation of the file to new_size. The new size 1295 * Start the truncation of the file to new_size. The new size
1332 * must be smaller than the current size. This routine will 1296 * must be smaller than the current size. This routine will
@@ -1409,8 +1373,7 @@ xfs_itruncate_start(
1409 return 0; 1373 return 0;
1410 } 1374 }
1411 last_byte = xfs_file_last_byte(ip); 1375 last_byte = xfs_file_last_byte(ip);
1412 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start, 1376 trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
1413 last_byte);
1414 if (last_byte > toss_start) { 1377 if (last_byte > toss_start) {
1415 if (flags & XFS_ITRUNC_DEFINITE) { 1378 if (flags & XFS_ITRUNC_DEFINITE) {
1416 xfs_tosspages(ip, toss_start, 1379 xfs_tosspages(ip, toss_start,
@@ -1514,7 +1477,8 @@ xfs_itruncate_finish(
1514 new_size = 0LL; 1477 new_size = 0LL;
1515 } 1478 }
1516 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); 1479 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1517 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0); 1480 trace_xfs_itruncate_finish_start(ip, new_size);
1481
1518 /* 1482 /*
1519 * The first thing we do is set the size to new_size permanently 1483 * The first thing we do is set the size to new_size permanently
1520 * on disk. This way we don't have to worry about anyone ever 1484 * on disk. This way we don't have to worry about anyone ever
@@ -1731,7 +1695,7 @@ xfs_itruncate_finish(
1731 ASSERT((new_size != 0) || 1695 ASSERT((new_size != 0) ||
1732 (fork == XFS_ATTR_FORK) || 1696 (fork == XFS_ATTR_FORK) ||
1733 (ip->i_d.di_nextents == 0)); 1697 (ip->i_d.di_nextents == 0));
1734 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0); 1698 trace_xfs_itruncate_finish_end(ip, new_size);
1735 return 0; 1699 return 0;
1736} 1700}
1737 1701
@@ -3252,23 +3216,6 @@ corrupt_out:
3252 return XFS_ERROR(EFSCORRUPTED); 3216 return XFS_ERROR(EFSCORRUPTED);
3253} 3217}
3254 3218
3255
3256
3257#ifdef XFS_ILOCK_TRACE
3258void
3259xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3260{
3261 ktrace_enter(ip->i_lock_trace,
3262 (void *)ip,
3263 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3264 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3265 (void *)ra, /* caller of ilock */
3266 (void *)(unsigned long)current_cpu(),
3267 (void *)(unsigned long)current_pid(),
3268 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3269}
3270#endif
3271
3272/* 3219/*
3273 * Return a pointer to the extent record at file index idx. 3220 * Return a pointer to the extent record at file index idx.
3274 */ 3221 */
@@ -3300,13 +3247,17 @@ xfs_iext_get_ext(
3300 */ 3247 */
3301void 3248void
3302xfs_iext_insert( 3249xfs_iext_insert(
3303 xfs_ifork_t *ifp, /* inode fork pointer */ 3250 xfs_inode_t *ip, /* incore inode pointer */
3304 xfs_extnum_t idx, /* starting index of new items */ 3251 xfs_extnum_t idx, /* starting index of new items */
3305 xfs_extnum_t count, /* number of inserted items */ 3252 xfs_extnum_t count, /* number of inserted items */
3306 xfs_bmbt_irec_t *new) /* items to insert */ 3253 xfs_bmbt_irec_t *new, /* items to insert */
3254 int state) /* type of extent conversion */
3307{ 3255{
3256 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3308 xfs_extnum_t i; /* extent record index */ 3257 xfs_extnum_t i; /* extent record index */
3309 3258
3259 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
3260
3310 ASSERT(ifp->if_flags & XFS_IFEXTENTS); 3261 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3311 xfs_iext_add(ifp, idx, count); 3262 xfs_iext_add(ifp, idx, count);
3312 for (i = idx; i < idx + count; i++, new++) 3263 for (i = idx; i < idx + count; i++, new++)
@@ -3549,13 +3500,17 @@ xfs_iext_add_indirect_multi(
3549 */ 3500 */
3550void 3501void
3551xfs_iext_remove( 3502xfs_iext_remove(
3552 xfs_ifork_t *ifp, /* inode fork pointer */ 3503 xfs_inode_t *ip, /* incore inode pointer */
3553 xfs_extnum_t idx, /* index to begin removing exts */ 3504 xfs_extnum_t idx, /* index to begin removing exts */
3554 int ext_diff) /* number of extents to remove */ 3505 int ext_diff, /* number of extents to remove */
3506 int state) /* type of extent conversion */
3555{ 3507{
3508 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
3556 xfs_extnum_t nextents; /* number of extents in file */ 3509 xfs_extnum_t nextents; /* number of extents in file */
3557 int new_size; /* size of extents after removal */ 3510 int new_size; /* size of extents after removal */
3558 3511
3512 trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
3513
3559 ASSERT(ext_diff > 0); 3514 ASSERT(ext_diff > 0);
3560 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 3515 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3561 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); 3516 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 41555de1d1db..ec1f28c4fc4f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -213,7 +213,6 @@ typedef struct xfs_icdinode {
213 213
214struct bhv_desc; 214struct bhv_desc;
215struct cred; 215struct cred;
216struct ktrace;
217struct xfs_buf; 216struct xfs_buf;
218struct xfs_bmap_free; 217struct xfs_bmap_free;
219struct xfs_bmbt_irec; 218struct xfs_bmbt_irec;
@@ -222,13 +221,6 @@ struct xfs_mount;
222struct xfs_trans; 221struct xfs_trans;
223struct xfs_dquot; 222struct xfs_dquot;
224 223
225#if defined(XFS_ILOCK_TRACE)
226#define XFS_ILOCK_KTRACE_SIZE 32
227extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *);
228#else
229#define xfs_ilock_trace(i,n,f,ra)
230#endif
231
232typedef struct dm_attrs_s { 224typedef struct dm_attrs_s {
233 __uint32_t da_dmevmask; /* DMIG event mask */ 225 __uint32_t da_dmevmask; /* DMIG event mask */
234 __uint16_t da_dmstate; /* DMIG state info */ 226 __uint16_t da_dmstate; /* DMIG state info */
@@ -271,26 +263,6 @@ typedef struct xfs_inode {
271 263
272 /* VFS inode */ 264 /* VFS inode */
273 struct inode i_vnode; /* embedded VFS inode */ 265 struct inode i_vnode; /* embedded VFS inode */
274
275 /* Trace buffers per inode. */
276#ifdef XFS_INODE_TRACE
277 struct ktrace *i_trace; /* general inode trace */
278#endif
279#ifdef XFS_BMAP_TRACE
280 struct ktrace *i_xtrace; /* inode extent list trace */
281#endif
282#ifdef XFS_BTREE_TRACE
283 struct ktrace *i_btrace; /* inode bmap btree trace */
284#endif
285#ifdef XFS_RW_TRACE
286 struct ktrace *i_rwtrace; /* inode read/write trace */
287#endif
288#ifdef XFS_ILOCK_TRACE
289 struct ktrace *i_lock_trace; /* inode lock/unlock trace */
290#endif
291#ifdef XFS_DIR2_TRACE
292 struct ktrace *i_dir_trace; /* inode directory trace */
293#endif
294} xfs_inode_t; 266} xfs_inode_t;
295 267
296#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \ 268#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
@@ -406,6 +378,14 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
406#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ 378#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
407 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) 379 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
408 380
381#define XFS_LOCK_FLAGS \
382 { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
383 { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
384 { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
385 { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
386 { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" }
387
388
409/* 389/*
410 * Flags for lockdep annotations. 390 * Flags for lockdep annotations.
411 * 391 *
@@ -455,6 +435,10 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
455#define XFS_ITRUNC_DEFINITE 0x1 435#define XFS_ITRUNC_DEFINITE 0x1
456#define XFS_ITRUNC_MAYBE 0x2 436#define XFS_ITRUNC_MAYBE 0x2
457 437
438#define XFS_ITRUNC_FLAGS \
439 { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
440 { XFS_ITRUNC_MAYBE, "MAYBE" }
441
458/* 442/*
459 * For multiple groups support: if S_ISGID bit is set in the parent 443 * For multiple groups support: if S_ISGID bit is set in the parent
460 * directory, group of new file is set to that of the parent, and 444 * directory, group of new file is set to that of the parent, and
@@ -507,48 +491,16 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
507void xfs_synchronize_times(xfs_inode_t *); 491void xfs_synchronize_times(xfs_inode_t *);
508void xfs_mark_inode_dirty_sync(xfs_inode_t *); 492void xfs_mark_inode_dirty_sync(xfs_inode_t *);
509 493
510#if defined(XFS_INODE_TRACE)
511
512#define INODE_TRACE_SIZE 16 /* number of trace entries */
513#define INODE_KTRACE_ENTRY 1
514#define INODE_KTRACE_EXIT 2
515#define INODE_KTRACE_HOLD 3
516#define INODE_KTRACE_REF 4
517#define INODE_KTRACE_RELE 5
518
519extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
520extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
521extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
522extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
523extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
524#define xfs_itrace_entry(ip) \
525 _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
526#define xfs_itrace_exit(ip) \
527 _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
528#define xfs_itrace_exit_tag(ip, tag) \
529 _xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
530#define xfs_itrace_ref(ip) \
531 _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
532
533#else
534#define xfs_itrace_entry(a)
535#define xfs_itrace_exit(a)
536#define xfs_itrace_exit_tag(a, b)
537#define xfs_itrace_hold(a, b, c, d)
538#define xfs_itrace_ref(a)
539#define xfs_itrace_rele(a, b, c, d)
540#endif
541
542#define IHOLD(ip) \ 494#define IHOLD(ip) \
543do { \ 495do { \
544 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ 496 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
545 atomic_inc(&(VFS_I(ip)->i_count)); \ 497 atomic_inc(&(VFS_I(ip)->i_count)); \
546 xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ 498 trace_xfs_ihold(ip, _THIS_IP_); \
547} while (0) 499} while (0)
548 500
549#define IRELE(ip) \ 501#define IRELE(ip) \
550do { \ 502do { \
551 xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ 503 trace_xfs_irele(ip, _THIS_IP_); \
552 iput(VFS_I(ip)); \ 504 iput(VFS_I(ip)); \
553} while (0) 505} while (0)
554 506
@@ -577,11 +529,11 @@ int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
577int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int); 529int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int);
578 530
579xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); 531xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
580void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, 532void xfs_iext_insert(xfs_inode_t *, xfs_extnum_t, xfs_extnum_t,
581 xfs_bmbt_irec_t *); 533 xfs_bmbt_irec_t *, int);
582void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int); 534void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
583void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int); 535void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
584void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int); 536void xfs_iext_remove(xfs_inode_t *, xfs_extnum_t, int, int);
585void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); 537void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
586void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); 538void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
587void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); 539void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9794b876d6ff..f38855d21ea5 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -41,6 +41,7 @@
41#include "xfs_ialloc.h" 41#include "xfs_ialloc.h"
42#include "xfs_rw.h" 42#include "xfs_rw.h"
43#include "xfs_error.h" 43#include "xfs_error.h"
44#include "xfs_trace.h"
44 45
45 46
46kmem_zone_t *xfs_ili_zone; /* inode log item zone */ 47kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -800,7 +801,9 @@ xfs_inode_item_pushbuf(
800 !completion_done(&ip->i_flush)); 801 !completion_done(&ip->i_flush));
801 iip->ili_pushbuf_flag = 0; 802 iip->ili_pushbuf_flag = 0;
802 xfs_iunlock(ip, XFS_ILOCK_SHARED); 803 xfs_iunlock(ip, XFS_ILOCK_SHARED);
803 xfs_buftrace("INODE ITEM PUSH", bp); 804
805 trace_xfs_inode_item_push(bp, _RET_IP_);
806
804 if (XFS_BUF_ISPINNED(bp)) { 807 if (XFS_BUF_ISPINNED(bp)) {
805 xfs_log_force(mp, (xfs_lsn_t)0, 808 xfs_log_force(mp, (xfs_lsn_t)0,
806 XFS_LOG_FORCE); 809 XFS_LOG_FORCE);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7294abce6ef2..0b65039951a0 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,72 +47,8 @@
47#include "xfs_trans_space.h" 47#include "xfs_trans_space.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_iomap.h" 49#include "xfs_iomap.h"
50#include "xfs_trace.h"
50 51
51#if defined(XFS_RW_TRACE)
52void
53xfs_iomap_enter_trace(
54 int tag,
55 xfs_inode_t *ip,
56 xfs_off_t offset,
57 ssize_t count)
58{
59 if (!ip->i_rwtrace)
60 return;
61
62 ktrace_enter(ip->i_rwtrace,
63 (void *)((unsigned long)tag),
64 (void *)ip,
65 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
66 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
67 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
68 (void *)((unsigned long)(offset & 0xffffffff)),
69 (void *)((unsigned long)count),
70 (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
71 (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
72 (void *)((unsigned long)current_pid()),
73 (void *)NULL,
74 (void *)NULL,
75 (void *)NULL,
76 (void *)NULL,
77 (void *)NULL,
78 (void *)NULL);
79}
80
81void
82xfs_iomap_map_trace(
83 int tag,
84 xfs_inode_t *ip,
85 xfs_off_t offset,
86 ssize_t count,
87 xfs_iomap_t *iomapp,
88 xfs_bmbt_irec_t *imapp,
89 int flags)
90{
91 if (!ip->i_rwtrace)
92 return;
93
94 ktrace_enter(ip->i_rwtrace,
95 (void *)((unsigned long)tag),
96 (void *)ip,
97 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
100 (void *)((unsigned long)(offset & 0xffffffff)),
101 (void *)((unsigned long)count),
102 (void *)((unsigned long)flags),
103 (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
104 (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
105 (void *)((unsigned long)(iomapp->iomap_delta)),
106 (void *)((unsigned long)(iomapp->iomap_bsize)),
107 (void *)((unsigned long)(iomapp->iomap_bn)),
108 (void *)(__psint_t)(imapp->br_startoff),
109 (void *)((unsigned long)(imapp->br_blockcount)),
110 (void *)(__psint_t)(imapp->br_startblock));
111}
112#else
113#define xfs_iomap_enter_trace(tag, io, offset, count)
114#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
115#endif
116 52
117#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 53#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
118 << mp->m_writeio_log) 54 << mp->m_writeio_log)
@@ -187,21 +123,20 @@ xfs_iomap(
187 if (XFS_FORCED_SHUTDOWN(mp)) 123 if (XFS_FORCED_SHUTDOWN(mp))
188 return XFS_ERROR(EIO); 124 return XFS_ERROR(EIO);
189 125
126 trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
127
190 switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { 128 switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
191 case BMAPI_READ: 129 case BMAPI_READ:
192 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count);
193 lockmode = xfs_ilock_map_shared(ip); 130 lockmode = xfs_ilock_map_shared(ip);
194 bmapi_flags = XFS_BMAPI_ENTIRE; 131 bmapi_flags = XFS_BMAPI_ENTIRE;
195 break; 132 break;
196 case BMAPI_WRITE: 133 case BMAPI_WRITE:
197 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
198 lockmode = XFS_ILOCK_EXCL; 134 lockmode = XFS_ILOCK_EXCL;
199 if (flags & BMAPI_IGNSTATE) 135 if (flags & BMAPI_IGNSTATE)
200 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; 136 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
201 xfs_ilock(ip, lockmode); 137 xfs_ilock(ip, lockmode);
202 break; 138 break;
203 case BMAPI_ALLOCATE: 139 case BMAPI_ALLOCATE:
204 xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
205 lockmode = XFS_ILOCK_SHARED; 140 lockmode = XFS_ILOCK_SHARED;
206 bmapi_flags = XFS_BMAPI_ENTIRE; 141 bmapi_flags = XFS_BMAPI_ENTIRE;
207 142
@@ -237,8 +172,7 @@ xfs_iomap(
237 if (nimaps && 172 if (nimaps &&
238 (imap.br_startblock != HOLESTARTBLOCK) && 173 (imap.br_startblock != HOLESTARTBLOCK) &&
239 (imap.br_startblock != DELAYSTARTBLOCK)) { 174 (imap.br_startblock != DELAYSTARTBLOCK)) {
240 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 175 trace_xfs_iomap_found(ip, offset, count, flags, &imap);
241 offset, count, iomapp, &imap, flags);
242 break; 176 break;
243 } 177 }
244 178
@@ -250,8 +184,7 @@ xfs_iomap(
250 &imap, &nimaps); 184 &imap, &nimaps);
251 } 185 }
252 if (!error) { 186 if (!error) {
253 xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip, 187 trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
254 offset, count, iomapp, &imap, flags);
255 } 188 }
256 iomap_flags = IOMAP_NEW; 189 iomap_flags = IOMAP_NEW;
257 break; 190 break;
@@ -261,8 +194,7 @@ xfs_iomap(
261 lockmode = 0; 194 lockmode = 0;
262 195
263 if (nimaps && !isnullstartblock(imap.br_startblock)) { 196 if (nimaps && !isnullstartblock(imap.br_startblock)) {
264 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 197 trace_xfs_iomap_found(ip, offset, count, flags, &imap);
265 offset, count, iomapp, &imap, flags);
266 break; 198 break;
267 } 199 }
268 200
@@ -623,8 +555,7 @@ retry:
623 * delalloc blocks and retry without EOF preallocation. 555 * delalloc blocks and retry without EOF preallocation.
624 */ 556 */
625 if (nimaps == 0) { 557 if (nimaps == 0) {
626 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 558 trace_xfs_delalloc_enospc(ip, offset, count);
627 ip, offset, count);
628 if (flushed) 559 if (flushed)
629 return XFS_ERROR(ENOSPC); 560 return XFS_ERROR(ENOSPC);
630 561
@@ -837,7 +768,7 @@ xfs_iomap_write_unwritten(
837 int committed; 768 int committed;
838 int error; 769 int error;
839 770
840 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count); 771 trace_xfs_unwritten_convert(ip, offset, count);
841 772
842 offset_fsb = XFS_B_TO_FSBT(mp, offset); 773 offset_fsb = XFS_B_TO_FSBT(mp, offset);
843 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 774 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fdcf7b82747f..174f29990991 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -43,6 +43,14 @@ typedef enum {
43 BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ 43 BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
44} bmapi_flags_t; 44} bmapi_flags_t;
45 45
46#define BMAPI_FLAGS \
47 { BMAPI_READ, "READ" }, \
48 { BMAPI_WRITE, "WRITE" }, \
49 { BMAPI_ALLOCATE, "ALLOCATE" }, \
50 { BMAPI_IGNSTATE, "IGNSTATE" }, \
51 { BMAPI_DIRECT, "DIRECT" }, \
52 { BMAPI_MMAP, "MMAP" }, \
53 { BMAPI_TRYLOCK, "TRYLOCK" }
46 54
47/* 55/*
48 * xfs_iomap_t: File system I/O map 56 * xfs_iomap_t: File system I/O map
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9dbdff3ea484..4cb1792040e3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -40,6 +40,7 @@
40#include "xfs_dinode.h" 40#include "xfs_dinode.h"
41#include "xfs_inode.h" 41#include "xfs_inode.h"
42#include "xfs_rw.h" 42#include "xfs_rw.h"
43#include "xfs_trace.h"
43 44
44kmem_zone_t *xfs_log_ticket_zone; 45kmem_zone_t *xfs_log_ticket_zone;
45 46
@@ -122,85 +123,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
122 123
123STATIC int xlog_iclogs_empty(xlog_t *log); 124STATIC int xlog_iclogs_empty(xlog_t *log);
124 125
125#if defined(XFS_LOG_TRACE)
126
127#define XLOG_TRACE_LOGGRANT_SIZE 2048
128#define XLOG_TRACE_ICLOG_SIZE 256
129
130void
131xlog_trace_loggrant_alloc(xlog_t *log)
132{
133 log->l_grant_trace = ktrace_alloc(XLOG_TRACE_LOGGRANT_SIZE, KM_NOFS);
134}
135
136void
137xlog_trace_loggrant_dealloc(xlog_t *log)
138{
139 ktrace_free(log->l_grant_trace);
140}
141
142void
143xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
144{
145 unsigned long cnts;
146
147 /* ticket counts are 1 byte each */
148 cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
149
150 ktrace_enter(log->l_grant_trace,
151 (void *)tic,
152 (void *)log->l_reserve_headq,
153 (void *)log->l_write_headq,
154 (void *)((unsigned long)log->l_grant_reserve_cycle),
155 (void *)((unsigned long)log->l_grant_reserve_bytes),
156 (void *)((unsigned long)log->l_grant_write_cycle),
157 (void *)((unsigned long)log->l_grant_write_bytes),
158 (void *)((unsigned long)log->l_curr_cycle),
159 (void *)((unsigned long)log->l_curr_block),
160 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
161 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
162 (void *)string,
163 (void *)((unsigned long)tic->t_trans_type),
164 (void *)cnts,
165 (void *)((unsigned long)tic->t_curr_res),
166 (void *)((unsigned long)tic->t_unit_res));
167}
168
169void
170xlog_trace_iclog_alloc(xlog_in_core_t *iclog)
171{
172 iclog->ic_trace = ktrace_alloc(XLOG_TRACE_ICLOG_SIZE, KM_NOFS);
173}
174
175void
176xlog_trace_iclog_dealloc(xlog_in_core_t *iclog)
177{
178 ktrace_free(iclog->ic_trace);
179}
180
181void
182xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
183{
184 ktrace_enter(iclog->ic_trace,
185 (void *)((unsigned long)state),
186 (void *)((unsigned long)current_pid()),
187 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
188 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
189 (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
190 (void *)NULL, (void *)NULL);
191}
192#else
193
194#define xlog_trace_loggrant_alloc(log)
195#define xlog_trace_loggrant_dealloc(log)
196#define xlog_trace_loggrant(log,tic,string)
197
198#define xlog_trace_iclog_alloc(iclog)
199#define xlog_trace_iclog_dealloc(iclog)
200#define xlog_trace_iclog(iclog,state)
201
202#endif /* XFS_LOG_TRACE */
203
204 126
205static void 127static void
206xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) 128xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
@@ -353,15 +275,17 @@ xfs_log_done(xfs_mount_t *mp,
353 275
354 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || 276 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
355 (flags & XFS_LOG_REL_PERM_RESERV)) { 277 (flags & XFS_LOG_REL_PERM_RESERV)) {
278 trace_xfs_log_done_nonperm(log, ticket);
279
356 /* 280 /*
357 * Release ticket if not permanent reservation or a specific 281 * Release ticket if not permanent reservation or a specific
358 * request has been made to release a permanent reservation. 282 * request has been made to release a permanent reservation.
359 */ 283 */
360 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
361 xlog_ungrant_log_space(log, ticket); 284 xlog_ungrant_log_space(log, ticket);
362 xfs_log_ticket_put(ticket); 285 xfs_log_ticket_put(ticket);
363 } else { 286 } else {
364 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); 287 trace_xfs_log_done_perm(log, ticket);
288
365 xlog_regrant_reserve_log_space(log, ticket); 289 xlog_regrant_reserve_log_space(log, ticket);
366 /* If this ticket was a permanent reservation and we aren't 290 /* If this ticket was a permanent reservation and we aren't
367 * trying to release it, reset the inited flags; so next time 291 * trying to release it, reset the inited flags; so next time
@@ -505,10 +429,13 @@ xfs_log_reserve(xfs_mount_t *mp,
505 429
506 XFS_STATS_INC(xs_try_logspace); 430 XFS_STATS_INC(xs_try_logspace);
507 431
432
508 if (*ticket != NULL) { 433 if (*ticket != NULL) {
509 ASSERT(flags & XFS_LOG_PERM_RESERV); 434 ASSERT(flags & XFS_LOG_PERM_RESERV);
510 internal_ticket = (xlog_ticket_t *)*ticket; 435 internal_ticket = (xlog_ticket_t *)*ticket;
511 xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)"); 436
437 trace_xfs_log_reserve(log, internal_ticket);
438
512 xlog_grant_push_ail(mp, internal_ticket->t_unit_res); 439 xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
513 retval = xlog_regrant_write_log_space(log, internal_ticket); 440 retval = xlog_regrant_write_log_space(log, internal_ticket);
514 } else { 441 } else {
@@ -519,10 +446,9 @@ xfs_log_reserve(xfs_mount_t *mp,
519 return XFS_ERROR(ENOMEM); 446 return XFS_ERROR(ENOMEM);
520 internal_ticket->t_trans_type = t_type; 447 internal_ticket->t_trans_type = t_type;
521 *ticket = internal_ticket; 448 *ticket = internal_ticket;
522 xlog_trace_loggrant(log, internal_ticket, 449
523 (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ? 450 trace_xfs_log_reserve(log, internal_ticket);
524 "xfs_log_reserve: create new ticket (permanent trans)" : 451
525 "xfs_log_reserve: create new ticket");
526 xlog_grant_push_ail(mp, 452 xlog_grant_push_ail(mp,
527 (internal_ticket->t_unit_res * 453 (internal_ticket->t_unit_res *
528 internal_ticket->t_cnt)); 454 internal_ticket->t_cnt));
@@ -734,7 +660,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
734 spin_unlock(&log->l_icloglock); 660 spin_unlock(&log->l_icloglock);
735 } 661 }
736 if (tic) { 662 if (tic) {
737 xlog_trace_loggrant(log, tic, "unmount rec"); 663 trace_xfs_log_umount_write(log, tic);
738 xlog_ungrant_log_space(log, tic); 664 xlog_ungrant_log_space(log, tic);
739 xfs_log_ticket_put(tic); 665 xfs_log_ticket_put(tic);
740 } 666 }
@@ -1030,7 +956,6 @@ xlog_iodone(xfs_buf_t *bp)
1030 xfs_fs_cmn_err(CE_WARN, l->l_mp, 956 xfs_fs_cmn_err(CE_WARN, l->l_mp,
1031 "xlog_iodone: Barriers are no longer supported" 957 "xlog_iodone: Barriers are no longer supported"
1032 " by device. Disabling barriers\n"); 958 " by device. Disabling barriers\n");
1033 xfs_buftrace("XLOG_IODONE BARRIERS OFF", bp);
1034 } 959 }
1035 960
1036 /* 961 /*
@@ -1085,13 +1010,10 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
1085 return 0; 1010 return 0;
1086 } 1011 }
1087 1012
1088 xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
1089 XFS_BUF_ERROR(bp, EIO); 1013 XFS_BUF_ERROR(bp, EIO);
1090 XFS_BUF_STALE(bp); 1014 XFS_BUF_STALE(bp);
1091 xfs_biodone(bp); 1015 xfs_biodone(bp);
1092 return XFS_ERROR(EIO); 1016 return XFS_ERROR(EIO);
1093
1094
1095} 1017}
1096 1018
1097/* 1019/*
@@ -1246,7 +1168,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1246 spin_lock_init(&log->l_grant_lock); 1168 spin_lock_init(&log->l_grant_lock);
1247 sv_init(&log->l_flush_wait, 0, "flush_wait"); 1169 sv_init(&log->l_flush_wait, 0, "flush_wait");
1248 1170
1249 xlog_trace_loggrant_alloc(log);
1250 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1171 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1251 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); 1172 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
1252 1173
@@ -1305,8 +1226,6 @@ xlog_alloc_log(xfs_mount_t *mp,
1305 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); 1226 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
1306 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); 1227 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
1307 1228
1308 xlog_trace_iclog_alloc(iclog);
1309
1310 iclogp = &iclog->ic_next; 1229 iclogp = &iclog->ic_next;
1311 } 1230 }
1312 *iclogp = log->l_iclog; /* complete ring */ 1231 *iclogp = log->l_iclog; /* complete ring */
@@ -1321,13 +1240,11 @@ out_free_iclog:
1321 sv_destroy(&iclog->ic_force_wait); 1240 sv_destroy(&iclog->ic_force_wait);
1322 sv_destroy(&iclog->ic_write_wait); 1241 sv_destroy(&iclog->ic_write_wait);
1323 xfs_buf_free(iclog->ic_bp); 1242 xfs_buf_free(iclog->ic_bp);
1324 xlog_trace_iclog_dealloc(iclog);
1325 } 1243 }
1326 kmem_free(iclog); 1244 kmem_free(iclog);
1327 } 1245 }
1328 spinlock_destroy(&log->l_icloglock); 1246 spinlock_destroy(&log->l_icloglock);
1329 spinlock_destroy(&log->l_grant_lock); 1247 spinlock_destroy(&log->l_grant_lock);
1330 xlog_trace_loggrant_dealloc(log);
1331 xfs_buf_free(log->l_xbuf); 1248 xfs_buf_free(log->l_xbuf);
1332out_free_log: 1249out_free_log:
1333 kmem_free(log); 1250 kmem_free(log);
@@ -1607,7 +1524,6 @@ xlog_dealloc_log(xlog_t *log)
1607 sv_destroy(&iclog->ic_force_wait); 1524 sv_destroy(&iclog->ic_force_wait);
1608 sv_destroy(&iclog->ic_write_wait); 1525 sv_destroy(&iclog->ic_write_wait);
1609 xfs_buf_free(iclog->ic_bp); 1526 xfs_buf_free(iclog->ic_bp);
1610 xlog_trace_iclog_dealloc(iclog);
1611 next_iclog = iclog->ic_next; 1527 next_iclog = iclog->ic_next;
1612 kmem_free(iclog); 1528 kmem_free(iclog);
1613 iclog = next_iclog; 1529 iclog = next_iclog;
@@ -1616,7 +1532,6 @@ xlog_dealloc_log(xlog_t *log)
1616 spinlock_destroy(&log->l_grant_lock); 1532 spinlock_destroy(&log->l_grant_lock);
1617 1533
1618 xfs_buf_free(log->l_xbuf); 1534 xfs_buf_free(log->l_xbuf);
1619 xlog_trace_loggrant_dealloc(log);
1620 log->l_mp->m_log = NULL; 1535 log->l_mp->m_log = NULL;
1621 kmem_free(log); 1536 kmem_free(log);
1622} /* xlog_dealloc_log */ 1537} /* xlog_dealloc_log */
@@ -2414,7 +2329,6 @@ restart:
2414 2329
2415 iclog = log->l_iclog; 2330 iclog = log->l_iclog;
2416 if (iclog->ic_state != XLOG_STATE_ACTIVE) { 2331 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2417 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
2418 XFS_STATS_INC(xs_log_noiclogs); 2332 XFS_STATS_INC(xs_log_noiclogs);
2419 2333
2420 /* Wait for log writes to have flushed */ 2334 /* Wait for log writes to have flushed */
@@ -2520,13 +2434,15 @@ xlog_grant_log_space(xlog_t *log,
2520 2434
2521 /* Is there space or do we need to sleep? */ 2435 /* Is there space or do we need to sleep? */
2522 spin_lock(&log->l_grant_lock); 2436 spin_lock(&log->l_grant_lock);
2523 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); 2437
2438 trace_xfs_log_grant_enter(log, tic);
2524 2439
2525 /* something is already sleeping; insert new transaction at end */ 2440 /* something is already sleeping; insert new transaction at end */
2526 if (log->l_reserve_headq) { 2441 if (log->l_reserve_headq) {
2527 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2442 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2528 xlog_trace_loggrant(log, tic, 2443
2529 "xlog_grant_log_space: sleep 1"); 2444 trace_xfs_log_grant_sleep1(log, tic);
2445
2530 /* 2446 /*
2531 * Gotta check this before going to sleep, while we're 2447 * Gotta check this before going to sleep, while we're
2532 * holding the grant lock. 2448 * holding the grant lock.
@@ -2540,8 +2456,7 @@ xlog_grant_log_space(xlog_t *log,
2540 * If we got an error, and the filesystem is shutting down, 2456 * If we got an error, and the filesystem is shutting down,
2541 * we'll catch it down below. So just continue... 2457 * we'll catch it down below. So just continue...
2542 */ 2458 */
2543 xlog_trace_loggrant(log, tic, 2459 trace_xfs_log_grant_wake1(log, tic);
2544 "xlog_grant_log_space: wake 1");
2545 spin_lock(&log->l_grant_lock); 2460 spin_lock(&log->l_grant_lock);
2546 } 2461 }
2547 if (tic->t_flags & XFS_LOG_PERM_RESERV) 2462 if (tic->t_flags & XFS_LOG_PERM_RESERV)
@@ -2558,8 +2473,9 @@ redo:
2558 if (free_bytes < need_bytes) { 2473 if (free_bytes < need_bytes) {
2559 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2474 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2560 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2475 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2561 xlog_trace_loggrant(log, tic, 2476
2562 "xlog_grant_log_space: sleep 2"); 2477 trace_xfs_log_grant_sleep2(log, tic);
2478
2563 spin_unlock(&log->l_grant_lock); 2479 spin_unlock(&log->l_grant_lock);
2564 xlog_grant_push_ail(log->l_mp, need_bytes); 2480 xlog_grant_push_ail(log->l_mp, need_bytes);
2565 spin_lock(&log->l_grant_lock); 2481 spin_lock(&log->l_grant_lock);
@@ -2571,8 +2487,8 @@ redo:
2571 if (XLOG_FORCED_SHUTDOWN(log)) 2487 if (XLOG_FORCED_SHUTDOWN(log))
2572 goto error_return; 2488 goto error_return;
2573 2489
2574 xlog_trace_loggrant(log, tic, 2490 trace_xfs_log_grant_wake2(log, tic);
2575 "xlog_grant_log_space: wake 2"); 2491
2576 goto redo; 2492 goto redo;
2577 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2493 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2578 xlog_del_ticketq(&log->l_reserve_headq, tic); 2494 xlog_del_ticketq(&log->l_reserve_headq, tic);
@@ -2592,7 +2508,7 @@ redo:
2592 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); 2508 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2593 } 2509 }
2594#endif 2510#endif
2595 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); 2511 trace_xfs_log_grant_exit(log, tic);
2596 xlog_verify_grant_head(log, 1); 2512 xlog_verify_grant_head(log, 1);
2597 spin_unlock(&log->l_grant_lock); 2513 spin_unlock(&log->l_grant_lock);
2598 return 0; 2514 return 0;
@@ -2600,7 +2516,9 @@ redo:
2600 error_return: 2516 error_return:
2601 if (tic->t_flags & XLOG_TIC_IN_Q) 2517 if (tic->t_flags & XLOG_TIC_IN_Q)
2602 xlog_del_ticketq(&log->l_reserve_headq, tic); 2518 xlog_del_ticketq(&log->l_reserve_headq, tic);
2603 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); 2519
2520 trace_xfs_log_grant_error(log, tic);
2521
2604 /* 2522 /*
2605 * If we are failing, make sure the ticket doesn't have any 2523 * If we are failing, make sure the ticket doesn't have any
2606 * current reservations. We don't want to add this back when 2524 * current reservations. We don't want to add this back when
@@ -2640,7 +2558,8 @@ xlog_regrant_write_log_space(xlog_t *log,
2640#endif 2558#endif
2641 2559
2642 spin_lock(&log->l_grant_lock); 2560 spin_lock(&log->l_grant_lock);
2643 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); 2561
2562 trace_xfs_log_regrant_write_enter(log, tic);
2644 2563
2645 if (XLOG_FORCED_SHUTDOWN(log)) 2564 if (XLOG_FORCED_SHUTDOWN(log))
2646 goto error_return; 2565 goto error_return;
@@ -2669,8 +2588,8 @@ xlog_regrant_write_log_space(xlog_t *log,
2669 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2588 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2670 xlog_ins_ticketq(&log->l_write_headq, tic); 2589 xlog_ins_ticketq(&log->l_write_headq, tic);
2671 2590
2672 xlog_trace_loggrant(log, tic, 2591 trace_xfs_log_regrant_write_sleep1(log, tic);
2673 "xlog_regrant_write_log_space: sleep 1"); 2592
2674 spin_unlock(&log->l_grant_lock); 2593 spin_unlock(&log->l_grant_lock);
2675 xlog_grant_push_ail(log->l_mp, need_bytes); 2594 xlog_grant_push_ail(log->l_mp, need_bytes);
2676 spin_lock(&log->l_grant_lock); 2595 spin_lock(&log->l_grant_lock);
@@ -2685,8 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2685 if (XLOG_FORCED_SHUTDOWN(log)) 2604 if (XLOG_FORCED_SHUTDOWN(log))
2686 goto error_return; 2605 goto error_return;
2687 2606
2688 xlog_trace_loggrant(log, tic, 2607 trace_xfs_log_regrant_write_wake1(log, tic);
2689 "xlog_regrant_write_log_space: wake 1");
2690 } 2608 }
2691 } 2609 }
2692 2610
@@ -2704,6 +2622,8 @@ redo:
2704 spin_lock(&log->l_grant_lock); 2622 spin_lock(&log->l_grant_lock);
2705 2623
2706 XFS_STATS_INC(xs_sleep_logspace); 2624 XFS_STATS_INC(xs_sleep_logspace);
2625 trace_xfs_log_regrant_write_sleep2(log, tic);
2626
2707 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); 2627 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2708 2628
2709 /* If we're shutting down, this tic is already off the queue */ 2629 /* If we're shutting down, this tic is already off the queue */
@@ -2711,8 +2631,7 @@ redo:
2711 if (XLOG_FORCED_SHUTDOWN(log)) 2631 if (XLOG_FORCED_SHUTDOWN(log))
2712 goto error_return; 2632 goto error_return;
2713 2633
2714 xlog_trace_loggrant(log, tic, 2634 trace_xfs_log_regrant_write_wake2(log, tic);
2715 "xlog_regrant_write_log_space: wake 2");
2716 goto redo; 2635 goto redo;
2717 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2636 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2718 xlog_del_ticketq(&log->l_write_headq, tic); 2637 xlog_del_ticketq(&log->l_write_headq, tic);
@@ -2727,7 +2646,8 @@ redo:
2727 } 2646 }
2728#endif 2647#endif
2729 2648
2730 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); 2649 trace_xfs_log_regrant_write_exit(log, tic);
2650
2731 xlog_verify_grant_head(log, 1); 2651 xlog_verify_grant_head(log, 1);
2732 spin_unlock(&log->l_grant_lock); 2652 spin_unlock(&log->l_grant_lock);
2733 return 0; 2653 return 0;
@@ -2736,7 +2656,9 @@ redo:
2736 error_return: 2656 error_return:
2737 if (tic->t_flags & XLOG_TIC_IN_Q) 2657 if (tic->t_flags & XLOG_TIC_IN_Q)
2738 xlog_del_ticketq(&log->l_reserve_headq, tic); 2658 xlog_del_ticketq(&log->l_reserve_headq, tic);
2739 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); 2659
2660 trace_xfs_log_regrant_write_error(log, tic);
2661
2740 /* 2662 /*
2741 * If we are failing, make sure the ticket doesn't have any 2663 * If we are failing, make sure the ticket doesn't have any
2742 * current reservations. We don't want to add this back when 2664 * current reservations. We don't want to add this back when
@@ -2760,8 +2682,8 @@ STATIC void
2760xlog_regrant_reserve_log_space(xlog_t *log, 2682xlog_regrant_reserve_log_space(xlog_t *log,
2761 xlog_ticket_t *ticket) 2683 xlog_ticket_t *ticket)
2762{ 2684{
2763 xlog_trace_loggrant(log, ticket, 2685 trace_xfs_log_regrant_reserve_enter(log, ticket);
2764 "xlog_regrant_reserve_log_space: enter"); 2686
2765 if (ticket->t_cnt > 0) 2687 if (ticket->t_cnt > 0)
2766 ticket->t_cnt--; 2688 ticket->t_cnt--;
2767 2689
@@ -2769,8 +2691,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2769 xlog_grant_sub_space(log, ticket->t_curr_res); 2691 xlog_grant_sub_space(log, ticket->t_curr_res);
2770 ticket->t_curr_res = ticket->t_unit_res; 2692 ticket->t_curr_res = ticket->t_unit_res;
2771 xlog_tic_reset_res(ticket); 2693 xlog_tic_reset_res(ticket);
2772 xlog_trace_loggrant(log, ticket, 2694
2773 "xlog_regrant_reserve_log_space: sub current res"); 2695 trace_xfs_log_regrant_reserve_sub(log, ticket);
2696
2774 xlog_verify_grant_head(log, 1); 2697 xlog_verify_grant_head(log, 1);
2775 2698
2776 /* just return if we still have some of the pre-reserved space */ 2699 /* just return if we still have some of the pre-reserved space */
@@ -2780,8 +2703,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2780 } 2703 }
2781 2704
2782 xlog_grant_add_space_reserve(log, ticket->t_unit_res); 2705 xlog_grant_add_space_reserve(log, ticket->t_unit_res);
2783 xlog_trace_loggrant(log, ticket, 2706
2784 "xlog_regrant_reserve_log_space: exit"); 2707 trace_xfs_log_regrant_reserve_exit(log, ticket);
2708
2785 xlog_verify_grant_head(log, 0); 2709 xlog_verify_grant_head(log, 0);
2786 spin_unlock(&log->l_grant_lock); 2710 spin_unlock(&log->l_grant_lock);
2787 ticket->t_curr_res = ticket->t_unit_res; 2711 ticket->t_curr_res = ticket->t_unit_res;
@@ -2811,11 +2735,11 @@ xlog_ungrant_log_space(xlog_t *log,
2811 ticket->t_cnt--; 2735 ticket->t_cnt--;
2812 2736
2813 spin_lock(&log->l_grant_lock); 2737 spin_lock(&log->l_grant_lock);
2814 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); 2738 trace_xfs_log_ungrant_enter(log, ticket);
2815 2739
2816 xlog_grant_sub_space(log, ticket->t_curr_res); 2740 xlog_grant_sub_space(log, ticket->t_curr_res);
2817 2741
2818 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); 2742 trace_xfs_log_ungrant_sub(log, ticket);
2819 2743
2820 /* If this is a permanent reservation ticket, we may be able to free 2744 /* If this is a permanent reservation ticket, we may be able to free
2821 * up more space based on the remaining count. 2745 * up more space based on the remaining count.
@@ -2825,7 +2749,8 @@ xlog_ungrant_log_space(xlog_t *log,
2825 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); 2749 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
2826 } 2750 }
2827 2751
2828 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); 2752 trace_xfs_log_ungrant_exit(log, ticket);
2753
2829 xlog_verify_grant_head(log, 1); 2754 xlog_verify_grant_head(log, 1);
2830 spin_unlock(&log->l_grant_lock); 2755 spin_unlock(&log->l_grant_lock);
2831 xfs_log_move_tail(log->l_mp, 1); 2756 xfs_log_move_tail(log->l_mp, 1);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 679c7c4926a2..d55662db7077 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,6 @@
19#define __XFS_LOG_PRIV_H__ 19#define __XFS_LOG_PRIV_H__
20 20
21struct xfs_buf; 21struct xfs_buf;
22struct ktrace;
23struct log; 22struct log;
24struct xlog_ticket; 23struct xlog_ticket;
25struct xfs_buf_cancel; 24struct xfs_buf_cancel;
@@ -135,6 +134,12 @@ static inline uint xlog_get_client_id(__be32 i)
135#define XLOG_TIC_INITED 0x1 /* has been initialized */ 134#define XLOG_TIC_INITED 0x1 /* has been initialized */
136#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ 135#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
137#define XLOG_TIC_IN_Q 0x4 136#define XLOG_TIC_IN_Q 0x4
137
138#define XLOG_TIC_FLAGS \
139 { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
140 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \
141 { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
142
138#endif /* __KERNEL__ */ 143#endif /* __KERNEL__ */
139 144
140#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ 145#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
@@ -361,9 +366,6 @@ typedef struct xlog_in_core {
361 int ic_bwritecnt; 366 int ic_bwritecnt;
362 unsigned short ic_state; 367 unsigned short ic_state;
363 char *ic_datap; /* pointer to iclog data */ 368 char *ic_datap; /* pointer to iclog data */
364#ifdef XFS_LOG_TRACE
365 struct ktrace *ic_trace;
366#endif
367 369
368 /* Callback structures need their own cacheline */ 370 /* Callback structures need their own cacheline */
369 spinlock_t ic_callback_lock ____cacheline_aligned_in_smp; 371 spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
@@ -429,10 +431,6 @@ typedef struct log {
429 int l_grant_write_cycle; 431 int l_grant_write_cycle;
430 int l_grant_write_bytes; 432 int l_grant_write_bytes;
431 433
432#ifdef XFS_LOG_TRACE
433 struct ktrace *l_grant_trace;
434#endif
435
436 /* The following field are used for debugging; need to hold icloglock */ 434 /* The following field are used for debugging; need to hold icloglock */
437#ifdef DEBUG 435#ifdef DEBUG
438 char *l_iclog_bak[XLOG_MAX_ICLOGS]; 436 char *l_iclog_bak[XLOG_MAX_ICLOGS];
@@ -456,12 +454,6 @@ extern void xlog_put_bp(struct xfs_buf *);
456 454
457extern kmem_zone_t *xfs_log_ticket_zone; 455extern kmem_zone_t *xfs_log_ticket_zone;
458 456
459/* iclog tracing */
460#define XLOG_TRACE_GRAB_FLUSH 1
461#define XLOG_TRACE_REL_FLUSH 2
462#define XLOG_TRACE_SLEEP_FLUSH 3
463#define XLOG_TRACE_WAKE_FLUSH 4
464
465/* 457/*
466 * Unmount record type is used as a pseudo transaction type for the ticket. 458 * Unmount record type is used as a pseudo transaction type for the ticket.
467 * It's value must be outside the range of XFS_TRANS_* values. 459 * It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1ec98ed914d4..69ac2e5ef20c 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -46,6 +46,7 @@
46#include "xfs_quota.h" 46#include "xfs_quota.h"
47#include "xfs_rw.h" 47#include "xfs_rw.h"
48#include "xfs_utils.h" 48#include "xfs_utils.h"
49#include "xfs_trace.h"
49 50
50STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); 51STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
51STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); 52STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
@@ -225,16 +226,10 @@ xlog_header_check_dump(
225 xfs_mount_t *mp, 226 xfs_mount_t *mp,
226 xlog_rec_header_t *head) 227 xlog_rec_header_t *head)
227{ 228{
228 int b; 229 cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n",
229 230 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
230 cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__); 231 cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n",
231 for (b = 0; b < 16; b++) 232 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
232 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
233 cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
234 cmn_err(CE_DEBUG, " log : uuid = ");
235 for (b = 0; b < 16; b++)
236 cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
237 cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
238} 233}
239#else 234#else
240#define xlog_header_check_dump(mp, head) 235#define xlog_header_check_dump(mp, head)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 66a888a9ad6f..eb403b40e120 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -44,6 +44,8 @@
44#include "xfs_quota.h" 44#include "xfs_quota.h"
45#include "xfs_fsops.h" 45#include "xfs_fsops.h"
46#include "xfs_utils.h" 46#include "xfs_utils.h"
47#include "xfs_trace.h"
48
47 49
48STATIC void xfs_unmountfs_wait(xfs_mount_t *); 50STATIC void xfs_unmountfs_wait(xfs_mount_t *);
49 51
@@ -2389,12 +2391,12 @@ xfs_icsb_modify_counters(
2389{ 2391{
2390 xfs_icsb_cnts_t *icsbp; 2392 xfs_icsb_cnts_t *icsbp;
2391 long long lcounter; /* long counter for 64 bit fields */ 2393 long long lcounter; /* long counter for 64 bit fields */
2392 int cpu, ret = 0; 2394 int ret = 0;
2393 2395
2394 might_sleep(); 2396 might_sleep();
2395again: 2397again:
2396 cpu = get_cpu(); 2398 preempt_disable();
2397 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu); 2399 icsbp = this_cpu_ptr(mp->m_sb_cnts);
2398 2400
2399 /* 2401 /*
2400 * if the counter is disabled, go to slow path 2402 * if the counter is disabled, go to slow path
@@ -2438,11 +2440,11 @@ again:
2438 break; 2440 break;
2439 } 2441 }
2440 xfs_icsb_unlock_cntr(icsbp); 2442 xfs_icsb_unlock_cntr(icsbp);
2441 put_cpu(); 2443 preempt_enable();
2442 return 0; 2444 return 0;
2443 2445
2444slow_path: 2446slow_path:
2445 put_cpu(); 2447 preempt_enable();
2446 2448
2447 /* 2449 /*
2448 * serialise with a mutex so we don't burn lots of cpu on 2450 * serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2492,7 @@ slow_path:
2490 2492
2491balance_counter: 2493balance_counter:
2492 xfs_icsb_unlock_cntr(icsbp); 2494 xfs_icsb_unlock_cntr(icsbp);
2493 put_cpu(); 2495 preempt_enable();
2494 2496
2495 /* 2497 /*
2496 * We may have multiple threads here if multiple per-cpu 2498 * We may have multiple threads here if multiple per-cpu
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 3ec91ac74c2a..91bfd60f4c74 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -92,6 +92,14 @@ typedef struct xfs_dqblk {
92 92
93#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) 93#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
94 94
95#define XFS_DQ_FLAGS \
96 { XFS_DQ_USER, "USER" }, \
97 { XFS_DQ_PROJ, "PROJ" }, \
98 { XFS_DQ_GROUP, "GROUP" }, \
99 { XFS_DQ_DIRTY, "DIRTY" }, \
100 { XFS_DQ_WANT, "WANT" }, \
101 { XFS_DQ_INACTIVE, "INACTIVE" }
102
95/* 103/*
96 * In the worst case, when both user and group quotas are on, 104 * In the worst case, when both user and group quotas are on,
97 * we can have a max of three dquots changing in a single transaction. 105 * we can have a max of three dquots changing in a single transaction.
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index b81deea0ce19..fc1cda23b817 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -39,6 +39,7 @@
39#include "xfs_utils.h" 39#include "xfs_utils.h"
40#include "xfs_trans_space.h" 40#include "xfs_trans_space.h"
41#include "xfs_vnodeops.h" 41#include "xfs_vnodeops.h"
42#include "xfs_trace.h"
42 43
43 44
44/* 45/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 385f6dceba5d..9e15a1185362 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -45,6 +45,7 @@
45#include "xfs_inode_item.h" 45#include "xfs_inode_item.h"
46#include "xfs_trans_space.h" 46#include "xfs_trans_space.h"
47#include "xfs_utils.h" 47#include "xfs_utils.h"
48#include "xfs_trace.h"
48 49
49 50
50/* 51/*
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 4c199d18f850..5aa07caea5f1 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -44,6 +44,7 @@
44#include "xfs_error.h" 44#include "xfs_error.h"
45#include "xfs_buf_item.h" 45#include "xfs_buf_item.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47#include "xfs_trace.h"
47 48
48/* 49/*
49 * This is a subroutine for xfs_write() and other writers (xfs_ioctl) 50 * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
@@ -171,7 +172,6 @@ xfs_bioerror(
171 * No need to wait until the buffer is unpinned. 172 * No need to wait until the buffer is unpinned.
172 * We aren't flushing it. 173 * We aren't flushing it.
173 */ 174 */
174 xfs_buftrace("XFS IOERROR", bp);
175 XFS_BUF_ERROR(bp, EIO); 175 XFS_BUF_ERROR(bp, EIO);
176 /* 176 /*
177 * We're calling biodone, so delete B_DONE flag. Either way 177 * We're calling biodone, so delete B_DONE flag. Either way
@@ -205,7 +205,6 @@ xfs_bioerror_relse(
205 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); 205 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
206 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); 206 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
207 207
208 xfs_buftrace("XFS IOERRELSE", bp);
209 fl = XFS_BUF_BFLAGS(bp); 208 fl = XFS_BUF_BFLAGS(bp);
210 /* 209 /*
211 * No need to wait until the buffer is unpinned. 210 * No need to wait until the buffer is unpinned.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a0574f593f52..ca64f33c63a3 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -100,6 +100,49 @@ typedef struct xfs_trans_header {
100#define XFS_TRANS_TYPE_MAX 41 100#define XFS_TRANS_TYPE_MAX 41
101/* new transaction types need to be reflected in xfs_logprint(8) */ 101/* new transaction types need to be reflected in xfs_logprint(8) */
102 102
103#define XFS_TRANS_TYPES \
104 { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
105 { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
106 { XFS_TRANS_INACTIVE, "INACTIVE" }, \
107 { XFS_TRANS_CREATE, "CREATE" }, \
108 { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
109 { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
110 { XFS_TRANS_REMOVE, "REMOVE" }, \
111 { XFS_TRANS_LINK, "LINK" }, \
112 { XFS_TRANS_RENAME, "RENAME" }, \
113 { XFS_TRANS_MKDIR, "MKDIR" }, \
114 { XFS_TRANS_RMDIR, "RMDIR" }, \
115 { XFS_TRANS_SYMLINK, "SYMLINK" }, \
116 { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
117 { XFS_TRANS_GROWFS, "GROWFS" }, \
118 { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
119 { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
120 { XFS_TRANS_WRITEID, "WRITEID" }, \
121 { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
122 { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
123 { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
124 { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
125 { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
126 { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
127 { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
128 { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
129 { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
130 { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
131 { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
132 { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
133 { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
134 { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
135 { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
136 { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
137 { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
138 { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
139 { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
140 { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
141 { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
142 { XFS_TRANS_DUMMY1, "DUMMY1" }, \
143 { XFS_TRANS_DUMMY2, "DUMMY2" }, \
144 { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
145
103/* 146/*
104 * This structure is used to track log items associated with 147 * This structure is used to track log items associated with
105 * a transaction. It points to the log item and keeps some 148 * a transaction. It points to the log item and keeps some
@@ -782,6 +825,10 @@ typedef struct xfs_log_item {
782#define XFS_LI_IN_AIL 0x1 825#define XFS_LI_IN_AIL 0x1
783#define XFS_LI_ABORTED 0x2 826#define XFS_LI_ABORTED 0x2
784 827
828#define XFS_LI_FLAGS \
829 { XFS_LI_IN_AIL, "IN_AIL" }, \
830 { XFS_LI_ABORTED, "ABORTED" }
831
785typedef struct xfs_item_ops { 832typedef struct xfs_item_ops {
786 uint (*iop_size)(xfs_log_item_t *); 833 uint (*iop_size)(xfs_log_item_t *);
787 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); 834 void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 03a1f701fea8..49130628d5ef 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -38,6 +38,7 @@
38#include "xfs_trans_priv.h" 38#include "xfs_trans_priv.h"
39#include "xfs_error.h" 39#include "xfs_error.h"
40#include "xfs_rw.h" 40#include "xfs_rw.h"
41#include "xfs_trace.h"
41 42
42 43
43STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *, 44STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
@@ -95,26 +96,23 @@ xfs_trans_get_buf(xfs_trans_t *tp,
95 } 96 }
96 if (bp != NULL) { 97 if (bp != NULL) {
97 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 98 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
98 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 99 if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
99 xfs_buftrace("TRANS GET RECUR SHUT", bp);
100 XFS_BUF_SUPER_STALE(bp); 100 XFS_BUF_SUPER_STALE(bp);
101 } 101
102 /* 102 /*
103 * If the buffer is stale then it was binval'ed 103 * If the buffer is stale then it was binval'ed
104 * since last read. This doesn't matter since the 104 * since last read. This doesn't matter since the
105 * caller isn't allowed to use the data anyway. 105 * caller isn't allowed to use the data anyway.
106 */ 106 */
107 else if (XFS_BUF_ISSTALE(bp)) { 107 else if (XFS_BUF_ISSTALE(bp))
108 xfs_buftrace("TRANS GET RECUR STALE", bp);
109 ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); 108 ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
110 } 109
111 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); 110 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
112 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); 111 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
113 ASSERT(bip != NULL); 112 ASSERT(bip != NULL);
114 ASSERT(atomic_read(&bip->bli_refcount) > 0); 113 ASSERT(atomic_read(&bip->bli_refcount) > 0);
115 bip->bli_recur++; 114 bip->bli_recur++;
116 xfs_buftrace("TRANS GET RECUR", bp); 115 trace_xfs_trans_get_buf_recur(bip);
117 xfs_buf_item_trace("GET RECUR", bip);
118 return (bp); 116 return (bp);
119 } 117 }
120 118
@@ -166,8 +164,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
166 */ 164 */
167 XFS_BUF_SET_FSPRIVATE2(bp, tp); 165 XFS_BUF_SET_FSPRIVATE2(bp, tp);
168 166
169 xfs_buftrace("TRANS GET", bp); 167 trace_xfs_trans_get_buf(bip);
170 xfs_buf_item_trace("GET", bip);
171 return (bp); 168 return (bp);
172} 169}
173 170
@@ -207,7 +204,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
207 ASSERT(bip != NULL); 204 ASSERT(bip != NULL);
208 ASSERT(atomic_read(&bip->bli_refcount) > 0); 205 ASSERT(atomic_read(&bip->bli_refcount) > 0);
209 bip->bli_recur++; 206 bip->bli_recur++;
210 xfs_buf_item_trace("GETSB RECUR", bip); 207 trace_xfs_trans_getsb_recur(bip);
211 return (bp); 208 return (bp);
212 } 209 }
213 210
@@ -249,7 +246,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
249 */ 246 */
250 XFS_BUF_SET_FSPRIVATE2(bp, tp); 247 XFS_BUF_SET_FSPRIVATE2(bp, tp);
251 248
252 xfs_buf_item_trace("GETSB", bip); 249 trace_xfs_trans_getsb(bip);
253 return (bp); 250 return (bp);
254} 251}
255 252
@@ -347,7 +344,7 @@ xfs_trans_read_buf(
347 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); 344 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
348 ASSERT((XFS_BUF_ISERROR(bp)) == 0); 345 ASSERT((XFS_BUF_ISERROR(bp)) == 0);
349 if (!(XFS_BUF_ISDONE(bp))) { 346 if (!(XFS_BUF_ISDONE(bp))) {
350 xfs_buftrace("READ_BUF_INCORE !DONE", bp); 347 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
351 ASSERT(!XFS_BUF_ISASYNC(bp)); 348 ASSERT(!XFS_BUF_ISASYNC(bp));
352 XFS_BUF_READ(bp); 349 XFS_BUF_READ(bp);
353 xfsbdstrat(tp->t_mountp, bp); 350 xfsbdstrat(tp->t_mountp, bp);
@@ -372,7 +369,7 @@ xfs_trans_read_buf(
372 * brelse it either. Just get out. 369 * brelse it either. Just get out.
373 */ 370 */
374 if (XFS_FORCED_SHUTDOWN(mp)) { 371 if (XFS_FORCED_SHUTDOWN(mp)) {
375 xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp); 372 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
376 *bpp = NULL; 373 *bpp = NULL;
377 return XFS_ERROR(EIO); 374 return XFS_ERROR(EIO);
378 } 375 }
@@ -382,7 +379,7 @@ xfs_trans_read_buf(
382 bip->bli_recur++; 379 bip->bli_recur++;
383 380
384 ASSERT(atomic_read(&bip->bli_refcount) > 0); 381 ASSERT(atomic_read(&bip->bli_refcount) > 0);
385 xfs_buf_item_trace("READ RECUR", bip); 382 trace_xfs_trans_read_buf_recur(bip);
386 *bpp = bp; 383 *bpp = bp;
387 return 0; 384 return 0;
388 } 385 }
@@ -402,7 +399,6 @@ xfs_trans_read_buf(
402 } 399 }
403 if (XFS_BUF_GETERROR(bp) != 0) { 400 if (XFS_BUF_GETERROR(bp) != 0) {
404 XFS_BUF_SUPER_STALE(bp); 401 XFS_BUF_SUPER_STALE(bp);
405 xfs_buftrace("READ ERROR", bp);
406 error = XFS_BUF_GETERROR(bp); 402 error = XFS_BUF_GETERROR(bp);
407 403
408 xfs_ioerror_alert("xfs_trans_read_buf", mp, 404 xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -461,8 +457,7 @@ xfs_trans_read_buf(
461 */ 457 */
462 XFS_BUF_SET_FSPRIVATE2(bp, tp); 458 XFS_BUF_SET_FSPRIVATE2(bp, tp);
463 459
464 xfs_buftrace("TRANS READ", bp); 460 trace_xfs_trans_read_buf(bip);
465 xfs_buf_item_trace("READ", bip);
466 *bpp = bp; 461 *bpp = bp;
467 return 0; 462 return 0;
468 463
@@ -480,7 +475,7 @@ shutdown_abort:
480 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != 475 ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
481 (XFS_B_STALE|XFS_B_DELWRI)); 476 (XFS_B_STALE|XFS_B_DELWRI));
482 477
483 xfs_buftrace("READ_BUF XFSSHUTDN", bp); 478 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
484 xfs_buf_relse(bp); 479 xfs_buf_relse(bp);
485 *bpp = NULL; 480 *bpp = NULL;
486 return XFS_ERROR(EIO); 481 return XFS_ERROR(EIO);
@@ -546,13 +541,14 @@ xfs_trans_brelse(xfs_trans_t *tp,
546 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); 541 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
547 ASSERT(lidp != NULL); 542 ASSERT(lidp != NULL);
548 543
544 trace_xfs_trans_brelse(bip);
545
549 /* 546 /*
550 * If the release is just for a recursive lock, 547 * If the release is just for a recursive lock,
551 * then decrement the count and return. 548 * then decrement the count and return.
552 */ 549 */
553 if (bip->bli_recur > 0) { 550 if (bip->bli_recur > 0) {
554 bip->bli_recur--; 551 bip->bli_recur--;
555 xfs_buf_item_trace("RELSE RECUR", bip);
556 return; 552 return;
557 } 553 }
558 554
@@ -560,10 +556,8 @@ xfs_trans_brelse(xfs_trans_t *tp,
560 * If the buffer is dirty within this transaction, we can't 556 * If the buffer is dirty within this transaction, we can't
561 * release it until we commit. 557 * release it until we commit.
562 */ 558 */
563 if (lidp->lid_flags & XFS_LID_DIRTY) { 559 if (lidp->lid_flags & XFS_LID_DIRTY)
564 xfs_buf_item_trace("RELSE DIRTY", bip);
565 return; 560 return;
566 }
567 561
568 /* 562 /*
569 * If the buffer has been invalidated, then we can't release 563 * If the buffer has been invalidated, then we can't release
@@ -571,13 +565,10 @@ xfs_trans_brelse(xfs_trans_t *tp,
571 * as part of this transaction. This prevents us from pulling 565 * as part of this transaction. This prevents us from pulling
572 * the item from the AIL before we should. 566 * the item from the AIL before we should.
573 */ 567 */
574 if (bip->bli_flags & XFS_BLI_STALE) { 568 if (bip->bli_flags & XFS_BLI_STALE)
575 xfs_buf_item_trace("RELSE STALE", bip);
576 return; 569 return;
577 }
578 570
579 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 571 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
580 xfs_buf_item_trace("RELSE", bip);
581 572
582 /* 573 /*
583 * Free up the log item descriptor tracking the released item. 574 * Free up the log item descriptor tracking the released item.
@@ -674,7 +665,7 @@ xfs_trans_bjoin(xfs_trans_t *tp,
674 */ 665 */
675 XFS_BUF_SET_FSPRIVATE2(bp, tp); 666 XFS_BUF_SET_FSPRIVATE2(bp, tp);
676 667
677 xfs_buf_item_trace("BJOIN", bip); 668 trace_xfs_trans_bjoin(bip);
678} 669}
679 670
680/* 671/*
@@ -698,7 +689,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
698 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL)); 689 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
699 ASSERT(atomic_read(&bip->bli_refcount) > 0); 690 ASSERT(atomic_read(&bip->bli_refcount) > 0);
700 bip->bli_flags |= XFS_BLI_HOLD; 691 bip->bli_flags |= XFS_BLI_HOLD;
701 xfs_buf_item_trace("BHOLD", bip); 692 trace_xfs_trans_bhold(bip);
702} 693}
703 694
704/* 695/*
@@ -721,7 +712,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
721 ASSERT(atomic_read(&bip->bli_refcount) > 0); 712 ASSERT(atomic_read(&bip->bli_refcount) > 0);
722 ASSERT(bip->bli_flags & XFS_BLI_HOLD); 713 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
723 bip->bli_flags &= ~XFS_BLI_HOLD; 714 bip->bli_flags &= ~XFS_BLI_HOLD;
724 xfs_buf_item_trace("BHOLD RELEASE", bip); 715
716 trace_xfs_trans_bhold_release(bip);
725} 717}
726 718
727/* 719/*
@@ -767,6 +759,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
767 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 759 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
768 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; 760 bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
769 761
762 trace_xfs_trans_log_buf(bip);
763
770 /* 764 /*
771 * If we invalidated the buffer within this transaction, then 765 * If we invalidated the buffer within this transaction, then
772 * cancel the invalidation now that we're dirtying the buffer 766 * cancel the invalidation now that we're dirtying the buffer
@@ -774,7 +768,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
774 * because we have a reference to the buffer this entire time. 768 * because we have a reference to the buffer this entire time.
775 */ 769 */
776 if (bip->bli_flags & XFS_BLI_STALE) { 770 if (bip->bli_flags & XFS_BLI_STALE) {
777 xfs_buf_item_trace("BLOG UNSTALE", bip);
778 bip->bli_flags &= ~XFS_BLI_STALE; 771 bip->bli_flags &= ~XFS_BLI_STALE;
779 ASSERT(XFS_BUF_ISSTALE(bp)); 772 ASSERT(XFS_BUF_ISSTALE(bp));
780 XFS_BUF_UNSTALE(bp); 773 XFS_BUF_UNSTALE(bp);
@@ -789,7 +782,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
789 lidp->lid_flags &= ~XFS_LID_BUF_STALE; 782 lidp->lid_flags &= ~XFS_LID_BUF_STALE;
790 bip->bli_flags |= XFS_BLI_LOGGED; 783 bip->bli_flags |= XFS_BLI_LOGGED;
791 xfs_buf_item_log(bip, first, last); 784 xfs_buf_item_log(bip, first, last);
792 xfs_buf_item_trace("BLOG", bip);
793} 785}
794 786
795 787
@@ -828,6 +820,8 @@ xfs_trans_binval(
828 ASSERT(lidp != NULL); 820 ASSERT(lidp != NULL);
829 ASSERT(atomic_read(&bip->bli_refcount) > 0); 821 ASSERT(atomic_read(&bip->bli_refcount) > 0);
830 822
823 trace_xfs_trans_binval(bip);
824
831 if (bip->bli_flags & XFS_BLI_STALE) { 825 if (bip->bli_flags & XFS_BLI_STALE) {
832 /* 826 /*
833 * If the buffer is already invalidated, then 827 * If the buffer is already invalidated, then
@@ -840,8 +834,6 @@ xfs_trans_binval(
840 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL); 834 ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
841 ASSERT(lidp->lid_flags & XFS_LID_DIRTY); 835 ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
842 ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 836 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
843 xfs_buftrace("XFS_BINVAL RECUR", bp);
844 xfs_buf_item_trace("BINVAL RECUR", bip);
845 return; 837 return;
846 } 838 }
847 839
@@ -875,8 +867,6 @@ xfs_trans_binval(
875 (bip->bli_format.blf_map_size * sizeof(uint))); 867 (bip->bli_format.blf_map_size * sizeof(uint)));
876 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE; 868 lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
877 tp->t_flags |= XFS_TRANS_DIRTY; 869 tp->t_flags |= XFS_TRANS_DIRTY;
878 xfs_buftrace("XFS_BINVAL", bp);
879 xfs_buf_item_trace("BINVAL", bip);
880} 870}
881 871
882/* 872/*
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 578f3f59b789..6558ffd8d140 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -53,6 +53,7 @@
53#include "xfs_log_priv.h" 53#include "xfs_log_priv.h"
54#include "xfs_filestream.h" 54#include "xfs_filestream.h"
55#include "xfs_vnodeops.h" 55#include "xfs_vnodeops.h"
56#include "xfs_trace.h"
56 57
57int 58int
58xfs_setattr( 59xfs_setattr(
@@ -1397,7 +1398,6 @@ xfs_lookup(
1397 if (error) 1398 if (error)
1398 goto out_free_name; 1399 goto out_free_name;
1399 1400
1400 xfs_itrace_ref(*ipp);
1401 return 0; 1401 return 0;
1402 1402
1403out_free_name: 1403out_free_name:
@@ -1543,7 +1543,6 @@ xfs_create(
1543 * At this point, we've gotten a newly allocated inode. 1543 * At this point, we've gotten a newly allocated inode.
1544 * It is locked (and joined to the transaction). 1544 * It is locked (and joined to the transaction).
1545 */ 1545 */
1546 xfs_itrace_ref(ip);
1547 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1546 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1548 1547
1549 /* 1548 /*
@@ -2003,9 +2002,6 @@ xfs_remove(
2003 if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) 2002 if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
2004 xfs_filestream_deassociate(ip); 2003 xfs_filestream_deassociate(ip);
2005 2004
2006 xfs_itrace_exit(ip);
2007 xfs_itrace_exit(dp);
2008
2009 std_return: 2005 std_return:
2010 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 2006 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
2011 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, 2007 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
@@ -2302,7 +2298,6 @@ xfs_symlink(
2302 goto error_return; 2298 goto error_return;
2303 goto error1; 2299 goto error1;
2304 } 2300 }
2305 xfs_itrace_ref(ip);
2306 2301
2307 /* 2302 /*
2308 * An error after we've joined dp to the transaction will result in the 2303 * An error after we've joined dp to the transaction will result in the
@@ -2845,7 +2840,6 @@ xfs_free_file_space(
2845 ioffset = offset & ~(rounding - 1); 2840 ioffset = offset & ~(rounding - 1);
2846 2841
2847 if (VN_CACHED(VFS_I(ip)) != 0) { 2842 if (VN_CACHED(VFS_I(ip)) != 0) {
2848 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
2849 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 2843 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
2850 if (error) 2844 if (error)
2851 goto out_unlock_iolock; 2845 goto out_unlock_iolock;