aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/support/ktrace.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2008-03-05 21:45:35 -0500
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-04-17 21:39:55 -0400
commit6ee4752ffe782be6e86bea1403a2fe0f682aa71a (patch)
treeaea65e2a9f1f6ca5c3f8dd44b78c5a9dd5adaee3 /fs/xfs/support/ktrace.c
parent44d814ced4cffbfe6a775c5bb8b941a6e734e7d9 (diff)
[XFS] Use atomic counters for ktrace buffer indexes
ktrace_enter() is consuming vast amounts of CPU time due to the use of a single global lock for protecting buffer index increments. Change it to use per-buffer atomic counters - this reduces ktrace_enter() overhead during a trace intensive test on a 4p machine from 58% of all CPU time to 12% and halves test runtime. SGI-PV: 977546 SGI-Modid: xfs-linux-melb:xfs-kern:30537a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/support/ktrace.c')
-rw-r--r--fs/xfs/support/ktrace.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 129067cfcb86..4e0444c0aca6 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -92,7 +92,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
92 92
93 ktp->kt_entries = ktep; 93 ktp->kt_entries = ktep;
94 ktp->kt_nentries = nentries; 94 ktp->kt_nentries = nentries;
95 ktp->kt_index = 0; 95 atomic_set(&ktp->kt_index, 0);
96 ktp->kt_rollover = 0; 96 ktp->kt_rollover = 0;
97 return ktp; 97 return ktp;
98} 98}
@@ -151,8 +151,6 @@ ktrace_enter(
151 void *val14, 151 void *val14,
152 void *val15) 152 void *val15)
153{ 153{
154 static DEFINE_SPINLOCK(wrap_lock);
155 unsigned long flags;
156 int index; 154 int index;
157 ktrace_entry_t *ktep; 155 ktrace_entry_t *ktep;
158 156
@@ -161,12 +159,8 @@ ktrace_enter(
161 /* 159 /*
162 * Grab an entry by pushing the index up to the next one. 160 * Grab an entry by pushing the index up to the next one.
163 */ 161 */
164 spin_lock_irqsave(&wrap_lock, flags); 162 index = atomic_add_return(1, &ktp->kt_index);
165 index = ktp->kt_index; 163 index = (index - 1) % ktp->kt_nentries;
166 if (++ktp->kt_index == ktp->kt_nentries)
167 ktp->kt_index = 0;
168 spin_unlock_irqrestore(&wrap_lock, flags);
169
170 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) 164 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
171 ktp->kt_rollover = 1; 165 ktp->kt_rollover = 1;
172 166
@@ -199,11 +193,12 @@ int
199ktrace_nentries( 193ktrace_nentries(
200 ktrace_t *ktp) 194 ktrace_t *ktp)
201{ 195{
202 if (ktp == NULL) { 196 int index;
197 if (ktp == NULL)
203 return 0; 198 return 0;
204 }
205 199
206 return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); 200 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
201 return (ktp->kt_rollover ? ktp->kt_nentries : index);
207} 202}
208 203
209/* 204/*
@@ -228,7 +223,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
228 int nentries; 223 int nentries;
229 224
230 if (ktp->kt_rollover) 225 if (ktp->kt_rollover)
231 index = ktp->kt_index; 226 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
232 else 227 else
233 index = 0; 228 index = 0;
234 229