aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/support/ktrace.c
diff options
context:
space:
mode:
authorDavid Chinner <dgc@sgi.com>2008-03-05 21:45:43 -0500
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-04-17 21:40:04 -0400
commitd234154125197053d5215711b5df867979e55ebd (patch)
tree366a6ff91b514a8f9846ac82926e0bbfd0a1fbed /fs/xfs/support/ktrace.c
parent6ee4752ffe782be6e86bea1403a2fe0f682aa71a (diff)
[XFS] Use power-of-2 sized buffers to reduce overhead
Now that the ktrace_enter() code is using atomics, the non-power-of-2 buffer sizes - which require modulus operations to get the index - are showing up as using substantial CPU in the profiles. Force the buffer sizes to be rounded up to the nearest power of two and use masking rather than modulus operations to convert the index counter to the buffer index. This reduces ktrace_enter overhead to 8% of a CPU time, and again almost halves the trace intensive test runtime. SGI-PV: 977546 SGI-Modid: xfs-linux-melb:xfs-kern:30538a Signed-off-by: David Chinner <dgc@sgi.com> Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs/support/ktrace.c')
-rw-r--r--fs/xfs/support/ktrace.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 4e0444c0aca6..0b75d302508f 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -24,7 +24,7 @@ static int ktrace_zentries;
24void __init 24void __init
25ktrace_init(int zentries) 25ktrace_init(int zentries)
26{ 26{
27 ktrace_zentries = zentries; 27 ktrace_zentries = roundup_pow_of_two(zentries);
28 28
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), 29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr"); 30 "ktrace_hdr");
@@ -47,13 +47,16 @@ ktrace_uninit(void)
47 * ktrace_alloc() 47 * ktrace_alloc()
48 * 48 *
49 * Allocate a ktrace header and enough buffering for the given 49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries. 50 * number of entries. Round the number of entries up to a
51 * power of 2 so we can do fast masking to get the index from
52 * the atomic index counter.
51 */ 53 */
52ktrace_t * 54ktrace_t *
53ktrace_alloc(int nentries, unsigned int __nocast sleep) 55ktrace_alloc(int nentries, unsigned int __nocast sleep)
54{ 56{
55 ktrace_t *ktp; 57 ktrace_t *ktp;
56 ktrace_entry_t *ktep; 58 ktrace_entry_t *ktep;
59 int entries;
57 60
58 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); 61 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
59 62
@@ -70,11 +73,12 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
70 /* 73 /*
71 * Special treatment for buffers with the ktrace_zentries entries 74 * Special treatment for buffers with the ktrace_zentries entries
72 */ 75 */
73 if (nentries == ktrace_zentries) { 76 entries = roundup_pow_of_two(nentries);
77 if (entries == ktrace_zentries) {
74 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, 78 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
75 sleep); 79 sleep);
76 } else { 80 } else {
77 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), 81 ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
78 sleep | KM_LARGE); 82 sleep | KM_LARGE);
79 } 83 }
80 84
@@ -91,7 +95,9 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
91 } 95 }
92 96
93 ktp->kt_entries = ktep; 97 ktp->kt_entries = ktep;
94 ktp->kt_nentries = nentries; 98 ktp->kt_nentries = entries;
99 ASSERT(is_power_of_2(entries));
100 ktp->kt_index_mask = entries - 1;
95 atomic_set(&ktp->kt_index, 0); 101 atomic_set(&ktp->kt_index, 0);
96 ktp->kt_rollover = 0; 102 ktp->kt_rollover = 0;
97 return ktp; 103 return ktp;
@@ -160,7 +166,7 @@ ktrace_enter(
160 * Grab an entry by pushing the index up to the next one. 166 * Grab an entry by pushing the index up to the next one.
161 */ 167 */
162 index = atomic_add_return(1, &ktp->kt_index); 168 index = atomic_add_return(1, &ktp->kt_index);
163 index = (index - 1) % ktp->kt_nentries; 169 index = (index - 1) & ktp->kt_index_mask;
164 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) 170 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
165 ktp->kt_rollover = 1; 171 ktp->kt_rollover = 1;
166 172
@@ -197,7 +203,7 @@ ktrace_nentries(
197 if (ktp == NULL) 203 if (ktp == NULL)
198 return 0; 204 return 0;
199 205
200 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries; 206 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
201 return (ktp->kt_rollover ? ktp->kt_nentries : index); 207 return (ktp->kt_rollover ? ktp->kt_nentries : index);
202} 208}
203 209
@@ -223,7 +229,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
223 int nentries; 229 int nentries;
224 230
225 if (ktp->kt_rollover) 231 if (ktp->kt_rollover)
226 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries; 232 index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
227 else 233 else
228 index = 0; 234 index = 0;
229 235