aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/rgrp.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-02-17 09:13:35 -0500
committerSteven Whitehouse <steve@dolmen.chygwyn.com>2009-03-24 07:21:22 -0400
commit223b2b889f379dcea9cef722336a57e8b398bc95 (patch)
treea377d9be5eb756bac3d1866cbb6171cae6ef7dde /fs/gfs2/rgrp.c
parent64d576ba23bfd9b770cbb0279200f479272eb859 (diff)
GFS2: Fix alignment issue and tidy gfs2_bitfit
An alignment issue with the existing bitfit algorithm was reported on IA64. This patch attempts to fix that, and also to tidy up the code a bit. There is now more documentation about how this works and it has survived a number of different tests. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/rgrp.c')
-rw-r--r--fs/gfs2/rgrp.c132
1 files changed, 70 insertions, 62 deletions
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index a068ac940de1..c0abe698af82 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -132,81 +132,89 @@ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
132} 132}
133 133
134/** 134/**
135 * gfs2_bit_search
136 * @ptr: Pointer to bitmap data
137 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
138 * @state: The state we are searching for
139 *
140 * We xor the bitmap data with a patter which is the bitwise opposite
141 * of what we are looking for, this gives rise to a pattern of ones
142 * wherever there is a match. Since we have two bits per entry, we
143 * take this pattern, shift it down by one place and then and it with
144 * the original. All the even bit positions (0,2,4, etc) then represent
145 * successful matches, so we mask with 0x55555..... to remove the unwanted
146 * odd bit positions.
147 *
148 * This allows searching of a whole u64 at once (32 blocks) with a
149 * single test (on 64 bit arches).
150 */
151
152static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
153{
154 u64 tmp;
155 static const u64 search[] = {
156 [0] = 0xffffffffffffffff,
157 [1] = 0xaaaaaaaaaaaaaaaa,
158 [2] = 0x5555555555555555,
159 [3] = 0x0000000000000000,
160 };
161 tmp = le64_to_cpu(*ptr) ^ search[state];
162 tmp &= (tmp >> 1);
163 tmp &= mask;
164 return tmp;
165}
166
167/**
135 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing 168 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
136 * a block in a given allocation state. 169 * a block in a given allocation state.
137 * @buffer: the buffer that holds the bitmaps 170 * @buffer: the buffer that holds the bitmaps
138 * @buflen: the length (in bytes) of the buffer 171 * @len: the length (in bytes) of the buffer
139 * @goal: start search at this block's bit-pair (within @buffer) 172 * @goal: start search at this block's bit-pair (within @buffer)
140 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for. 173 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
141 * 174 *
142 * Scope of @goal and returned block number is only within this bitmap buffer, 175 * Scope of @goal and returned block number is only within this bitmap buffer,
143 * not entire rgrp or filesystem. @buffer will be offset from the actual 176 * not entire rgrp or filesystem. @buffer will be offset from the actual
144 * beginning of a bitmap block buffer, skipping any header structures. 177 * beginning of a bitmap block buffer, skipping any header structures, but
178 * headers are always a multiple of 64 bits long so that the buffer is
179 * always aligned to a 64 bit boundary.
180 *
181 * The size of the buffer is in bytes, but is it assumed that it is
182 * always ok to to read a complete multiple of 64 bits at the end
183 * of the block in case the end is no aligned to a natural boundary.
145 * 184 *
146 * Return: the block number (bitmap buffer scope) that was found 185 * Return: the block number (bitmap buffer scope) that was found
147 */ 186 */
148 187
149static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal, 188u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u32 goal, u8 state)
150 u8 old_state)
151{ 189{
152 const u8 *byte, *start, *end; 190 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
153 int bit, startbit; 191 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
154 u32 g1, g2, misaligned; 192 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
155 unsigned long *plong; 193 u64 tmp;
156 unsigned long lskipval; 194 u64 mask = 0x5555555555555555;
157 195 u32 bit;
158 lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55; 196
159 g1 = (goal / GFS2_NBBY); 197 BUG_ON(state > 3);
160 start = buffer + g1; 198
161 byte = start; 199 /* Mask off bits we don't care about at the start of the search */
162 end = buffer + buflen; 200 mask <<= spoint;
163 g2 = ALIGN(g1, sizeof(unsigned long)); 201 tmp = gfs2_bit_search(ptr, mask, state);
164 plong = (unsigned long *)(buffer + g2); 202 ptr++;
165 startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE; 203 while(tmp == 0 && ptr < end) {
166 misaligned = g2 - g1; 204 tmp = gfs2_bit_search(ptr, 0x5555555555555555, state);
167 if (!misaligned) 205 ptr++;
168 goto ulong_aligned;
169/* parse the bitmap a byte at a time */
170misaligned:
171 while (byte < end) {
172 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
173 return goal +
174 (((byte - start) * GFS2_NBBY) +
175 ((bit - startbit) >> 1));
176 }
177 bit += GFS2_BIT_SIZE;
178 if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
179 bit = 0;
180 byte++;
181 misaligned--;
182 if (!misaligned) {
183 plong = (unsigned long *)byte;
184 goto ulong_aligned;
185 }
186 }
187 }
188 return BFITNOENT;
189
190/* parse the bitmap a unsigned long at a time */
191ulong_aligned:
192 /* Stop at "end - 1" or else prefetch can go past the end and segfault.
193 We could "if" it but we'd lose some of the performance gained.
194 This way will only slow down searching the very last 4/8 bytes
195 depending on architecture. I've experimented with several ways
196 of writing this section such as using an else before the goto
197 but this one seems to be the fastest. */
198 while ((unsigned char *)plong < end - sizeof(unsigned long)) {
199 prefetch(plong + 1);
200 if (((*plong) & LBITMASK) != lskipval)
201 break;
202 plong++;
203 }
204 if ((unsigned char *)plong < end) {
205 byte = (const u8 *)plong;
206 misaligned += sizeof(unsigned long) - 1;
207 goto misaligned;
208 } 206 }
209 return BFITNOENT; 207 /* Mask off any bits which are more than len bytes from the start */
208 if (ptr == end && (len & (sizeof(u64) - 1)))
209 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
210 /* Didn't find anything, so return */
211 if (tmp == 0)
212 return BFITNOENT;
213 ptr--;
214 bit = fls64(tmp);
215 bit--; /* fls64 always adds one to the bit count */
216 bit /= 2; /* two bits per entry in the bitmap */
217 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
210} 218}
211 219
212/** 220/**