diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 11:39:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 11:39:39 -0400 |
commit | 253ba4e79edc695b2925bd2ef34de06ff4d4070c (patch) | |
tree | 259667140ca702d6a218cc54f4df275fbbda747b /fs/xfs/support | |
parent | 188da98800893691e47eea9335a234378e32aceb (diff) | |
parent | 65e67f5165c8a156b34ee7adf65d5ed3b16a910d (diff) |
Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (87 commits)
[XFS] Fix merge failure
[XFS] The forward declarations for the xfs_ioctl() helpers and the
[XFS] Update XFS documentation for noikeep/ikeep.
[XFS] Update XFS Documentation for ikeep and ihashsize
[XFS] Remove unused HAVE_SPLICE macro.
[XFS] Remove CONFIG_XFS_SECURITY.
[XFS] xfs_bmap_compute_maxlevels should be based on di_forkoff
[XFS] Always use di_forkoff when checking for attr space.
[XFS] Ensure the inode is joined in xfs_itruncate_finish
[XFS] Remove periodic logging of in-core superblock counters.
[XFS] fix logic error in xfs_alloc_ag_vextent_near()
[XFS] Don't error out on good I/Os.
[XFS] Catch log unmount failures.
[XFS] Sanitise xfs_log_force error checking.
[XFS] Check for errors when changing buffer pointers.
[XFS] Don't allow silent errors in xfs_inactive().
[XFS] Catch errors from xfs_imap().
[XFS] xfs_bulkstat_one_dinode() never returns an error.
[XFS] xfs_iflush_fork() never returns an error.
[XFS] Catch unwritten extent conversion errors.
...
Diffstat (limited to 'fs/xfs/support')
-rw-r--r-- | fs/xfs/support/ktrace.c | 37 | ||||
-rw-r--r-- | fs/xfs/support/ktrace.h | 3 |
2 files changed, 21 insertions, 19 deletions
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 129067cfcb86..0b75d302508f 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c | |||
@@ -24,7 +24,7 @@ static int ktrace_zentries; | |||
24 | void __init | 24 | void __init |
25 | ktrace_init(int zentries) | 25 | ktrace_init(int zentries) |
26 | { | 26 | { |
27 | ktrace_zentries = zentries; | 27 | ktrace_zentries = roundup_pow_of_two(zentries); |
28 | 28 | ||
29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), | 29 | ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t), |
30 | "ktrace_hdr"); | 30 | "ktrace_hdr"); |
@@ -47,13 +47,16 @@ ktrace_uninit(void) | |||
47 | * ktrace_alloc() | 47 | * ktrace_alloc() |
48 | * | 48 | * |
49 | * Allocate a ktrace header and enough buffering for the given | 49 | * Allocate a ktrace header and enough buffering for the given |
50 | * number of entries. | 50 | * number of entries. Round the number of entries up to a |
51 | * power of 2 so we can do fast masking to get the index from | ||
52 | * the atomic index counter. | ||
51 | */ | 53 | */ |
52 | ktrace_t * | 54 | ktrace_t * |
53 | ktrace_alloc(int nentries, unsigned int __nocast sleep) | 55 | ktrace_alloc(int nentries, unsigned int __nocast sleep) |
54 | { | 56 | { |
55 | ktrace_t *ktp; | 57 | ktrace_t *ktp; |
56 | ktrace_entry_t *ktep; | 58 | ktrace_entry_t *ktep; |
59 | int entries; | ||
57 | 60 | ||
58 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); | 61 | ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep); |
59 | 62 | ||
@@ -70,11 +73,12 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) | |||
70 | /* | 73 | /* |
71 | * Special treatment for buffers with the ktrace_zentries entries | 74 | * Special treatment for buffers with the ktrace_zentries entries |
72 | */ | 75 | */ |
73 | if (nentries == ktrace_zentries) { | 76 | entries = roundup_pow_of_two(nentries); |
77 | if (entries == ktrace_zentries) { | ||
74 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, | 78 | ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone, |
75 | sleep); | 79 | sleep); |
76 | } else { | 80 | } else { |
77 | ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)), | 81 | ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)), |
78 | sleep | KM_LARGE); | 82 | sleep | KM_LARGE); |
79 | } | 83 | } |
80 | 84 | ||
@@ -91,8 +95,10 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) | |||
91 | } | 95 | } |
92 | 96 | ||
93 | ktp->kt_entries = ktep; | 97 | ktp->kt_entries = ktep; |
94 | ktp->kt_nentries = nentries; | 98 | ktp->kt_nentries = entries; |
95 | ktp->kt_index = 0; | 99 | ASSERT(is_power_of_2(entries)); |
100 | ktp->kt_index_mask = entries - 1; | ||
101 | atomic_set(&ktp->kt_index, 0); | ||
96 | ktp->kt_rollover = 0; | 102 | ktp->kt_rollover = 0; |
97 | return ktp; | 103 | return ktp; |
98 | } | 104 | } |
@@ -151,8 +157,6 @@ ktrace_enter( | |||
151 | void *val14, | 157 | void *val14, |
152 | void *val15) | 158 | void *val15) |
153 | { | 159 | { |
154 | static DEFINE_SPINLOCK(wrap_lock); | ||
155 | unsigned long flags; | ||
156 | int index; | 160 | int index; |
157 | ktrace_entry_t *ktep; | 161 | ktrace_entry_t *ktep; |
158 | 162 | ||
@@ -161,12 +165,8 @@ ktrace_enter( | |||
161 | /* | 165 | /* |
162 | * Grab an entry by pushing the index up to the next one. | 166 | * Grab an entry by pushing the index up to the next one. |
163 | */ | 167 | */ |
164 | spin_lock_irqsave(&wrap_lock, flags); | 168 | index = atomic_add_return(1, &ktp->kt_index); |
165 | index = ktp->kt_index; | 169 | index = (index - 1) & ktp->kt_index_mask; |
166 | if (++ktp->kt_index == ktp->kt_nentries) | ||
167 | ktp->kt_index = 0; | ||
168 | spin_unlock_irqrestore(&wrap_lock, flags); | ||
169 | |||
170 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) | 170 | if (!ktp->kt_rollover && index == ktp->kt_nentries - 1) |
171 | ktp->kt_rollover = 1; | 171 | ktp->kt_rollover = 1; |
172 | 172 | ||
@@ -199,11 +199,12 @@ int | |||
199 | ktrace_nentries( | 199 | ktrace_nentries( |
200 | ktrace_t *ktp) | 200 | ktrace_t *ktp) |
201 | { | 201 | { |
202 | if (ktp == NULL) { | 202 | int index; |
203 | if (ktp == NULL) | ||
203 | return 0; | 204 | return 0; |
204 | } | ||
205 | 205 | ||
206 | return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index); | 206 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
207 | return (ktp->kt_rollover ? ktp->kt_nentries : index); | ||
207 | } | 208 | } |
208 | 209 | ||
209 | /* | 210 | /* |
@@ -228,7 +229,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp) | |||
228 | int nentries; | 229 | int nentries; |
229 | 230 | ||
230 | if (ktp->kt_rollover) | 231 | if (ktp->kt_rollover) |
231 | index = ktp->kt_index; | 232 | index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; |
232 | else | 233 | else |
233 | index = 0; | 234 | index = 0; |
234 | 235 | ||
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h index 56e72b40a859..741d6947ca60 100644 --- a/fs/xfs/support/ktrace.h +++ b/fs/xfs/support/ktrace.h | |||
@@ -30,7 +30,8 @@ typedef struct ktrace_entry { | |||
30 | */ | 30 | */ |
31 | typedef struct ktrace { | 31 | typedef struct ktrace { |
32 | int kt_nentries; /* number of entries in trace buf */ | 32 | int kt_nentries; /* number of entries in trace buf */ |
33 | int kt_index; /* current index in entries */ | 33 | atomic_t kt_index; /* current index in entries */ |
34 | unsigned int kt_index_mask; | ||
34 | int kt_rollover; | 35 | int kt_rollover; |
35 | ktrace_entry_t *kt_entries; /* buffer of entries */ | 36 | ktrace_entry_t *kt_entries; /* buffer of entries */ |
36 | } ktrace_t; | 37 | } ktrace_t; |