aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2009-08-31 19:56:51 -0400
committerFelix Blyakher <felixb@sgi.com>2009-09-01 13:44:27 -0400
commit85c0b2ab5e69ca6133380ead1c50e0840d136b39 (patch)
tree915fe86af550398d131071456c772053917e298c /fs/xfs
parent1da1daed813c534263a87ffc36d5b775e65231ad (diff)
xfs: factor out inode initialisation
Factor out code to initialize new inode clusters into a function of it's own. This keeps xfs_ialloc_ag_alloc smaller and better structured and enables a future inode cluster initialization transaction. Also initialize the agno variable earlier in xfs_ialloc_ag_alloc to avoid repeated byte swaps. [hch: The original patch is from Dave from his unpublished inode create transaction patch series, with some modifcations by me to apply stand-alone] Signed-off-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com> Signed-off-by: Felix Blyakher <felixb@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_ialloc.c175
1 files changed, 95 insertions, 80 deletions
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 3120a3a5e20f..ce9edf7f4cb4 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -153,6 +153,87 @@ xfs_inobt_get_rec(
153} 153}
154 154
155/* 155/*
156 * Initialise a new set of inodes.
157 */
158STATIC void
159xfs_ialloc_inode_init(
160 struct xfs_mount *mp,
161 struct xfs_trans *tp,
162 xfs_agnumber_t agno,
163 xfs_agblock_t agbno,
164 xfs_agblock_t length,
165 unsigned int gen)
166{
167 struct xfs_buf *fbuf;
168 struct xfs_dinode *free;
169 int blks_per_cluster, nbufs, ninodes;
170 int version;
171 int i, j;
172 xfs_daddr_t d;
173
174 /*
175 * Loop over the new block(s), filling in the inodes.
176 * For small block sizes, manipulate the inodes in buffers
177 * which are multiples of the blocks size.
178 */
179 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
180 blks_per_cluster = 1;
181 nbufs = length;
182 ninodes = mp->m_sb.sb_inopblock;
183 } else {
184 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
185 mp->m_sb.sb_blocksize;
186 nbufs = length / blks_per_cluster;
187 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
188 }
189
190 /*
191 * Figure out what version number to use in the inodes we create.
192 * If the superblock version has caught up to the one that supports
193 * the new inode format, then use the new inode version. Otherwise
194 * use the old version so that old kernels will continue to be
195 * able to use the file system.
196 */
197 if (xfs_sb_version_hasnlink(&mp->m_sb))
198 version = 2;
199 else
200 version = 1;
201
202 for (j = 0; j < nbufs; j++) {
203 /*
204 * Get the block.
205 */
206 d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
207 fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
208 mp->m_bsize * blks_per_cluster,
209 XFS_BUF_LOCK);
210 ASSERT(fbuf);
211 ASSERT(!XFS_BUF_GETERROR(fbuf));
212
213 /*
214 * Initialize all inodes in this buffer and then log them.
215 *
216 * XXX: It would be much better if we had just one transaction
217 * to log a whole cluster of inodes instead of all the
218 * individual transactions causing a lot of log traffic.
219 */
220 xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
221 for (i = 0; i < ninodes; i++) {
222 int ioffset = i << mp->m_sb.sb_inodelog;
223 uint isize = sizeof(struct xfs_dinode);
224
225 free = xfs_make_iptr(mp, fbuf, i);
226 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
227 free->di_version = version;
228 free->di_gen = cpu_to_be32(gen);
229 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
230 xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
231 }
232 xfs_trans_inode_alloc_buf(tp, fbuf);
233 }
234}
235
236/*
156 * Allocate new inodes in the allocation group specified by agbp. 237 * Allocate new inodes in the allocation group specified by agbp.
157 * Return 0 for success, else error code. 238 * Return 0 for success, else error code.
158 */ 239 */
@@ -164,24 +245,15 @@ xfs_ialloc_ag_alloc(
164{ 245{
165 xfs_agi_t *agi; /* allocation group header */ 246 xfs_agi_t *agi; /* allocation group header */
166 xfs_alloc_arg_t args; /* allocation argument structure */ 247 xfs_alloc_arg_t args; /* allocation argument structure */
167 int blks_per_cluster; /* fs blocks per inode cluster */
168 xfs_btree_cur_t *cur; /* inode btree cursor */ 248 xfs_btree_cur_t *cur; /* inode btree cursor */
169 xfs_daddr_t d; /* disk addr of buffer */
170 xfs_agnumber_t agno; 249 xfs_agnumber_t agno;
171 int error; 250 int error;
172 xfs_buf_t *fbuf; /* new free inodes' buffer */ 251 int i;
173 xfs_dinode_t *free; /* new free inode structure */
174 int i; /* inode counter */
175 int j; /* block counter */
176 int nbufs; /* num bufs of new inodes */
177 xfs_agino_t newino; /* new first inode's number */ 252 xfs_agino_t newino; /* new first inode's number */
178 xfs_agino_t newlen; /* new number of inodes */ 253 xfs_agino_t newlen; /* new number of inodes */
179 int ninodes; /* num inodes per buf */
180 xfs_agino_t thisino; /* current inode number, for loop */ 254 xfs_agino_t thisino; /* current inode number, for loop */
181 int version; /* inode version number to use */
182 int isaligned = 0; /* inode allocation at stripe unit */ 255 int isaligned = 0; /* inode allocation at stripe unit */
183 /* boundary */ 256 /* boundary */
184 unsigned int gen;
185 257
186 args.tp = tp; 258 args.tp = tp;
187 args.mp = tp->t_mountp; 259 args.mp = tp->t_mountp;
@@ -202,12 +274,12 @@ xfs_ialloc_ag_alloc(
202 */ 274 */
203 agi = XFS_BUF_TO_AGI(agbp); 275 agi = XFS_BUF_TO_AGI(agbp);
204 newino = be32_to_cpu(agi->agi_newino); 276 newino = be32_to_cpu(agi->agi_newino);
277 agno = be32_to_cpu(agi->agi_seqno);
205 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + 278 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
206 XFS_IALLOC_BLOCKS(args.mp); 279 XFS_IALLOC_BLOCKS(args.mp);
207 if (likely(newino != NULLAGINO && 280 if (likely(newino != NULLAGINO &&
208 (args.agbno < be32_to_cpu(agi->agi_length)))) { 281 (args.agbno < be32_to_cpu(agi->agi_length)))) {
209 args.fsbno = XFS_AGB_TO_FSB(args.mp, 282 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
210 be32_to_cpu(agi->agi_seqno), args.agbno);
211 args.type = XFS_ALLOCTYPE_THIS_BNO; 283 args.type = XFS_ALLOCTYPE_THIS_BNO;
212 args.mod = args.total = args.wasdel = args.isfl = 284 args.mod = args.total = args.wasdel = args.isfl =
213 args.userdata = args.minalignslop = 0; 285 args.userdata = args.minalignslop = 0;
@@ -258,8 +330,7 @@ xfs_ialloc_ag_alloc(
258 * For now, just allocate blocks up front. 330 * For now, just allocate blocks up front.
259 */ 331 */
260 args.agbno = be32_to_cpu(agi->agi_root); 332 args.agbno = be32_to_cpu(agi->agi_root);
261 args.fsbno = XFS_AGB_TO_FSB(args.mp, 333 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
262 be32_to_cpu(agi->agi_seqno), args.agbno);
263 /* 334 /*
264 * Allocate a fixed-size extent of inodes. 335 * Allocate a fixed-size extent of inodes.
265 */ 336 */
@@ -282,8 +353,7 @@ xfs_ialloc_ag_alloc(
282 if (isaligned && args.fsbno == NULLFSBLOCK) { 353 if (isaligned && args.fsbno == NULLFSBLOCK) {
283 args.type = XFS_ALLOCTYPE_NEAR_BNO; 354 args.type = XFS_ALLOCTYPE_NEAR_BNO;
284 args.agbno = be32_to_cpu(agi->agi_root); 355 args.agbno = be32_to_cpu(agi->agi_root);
285 args.fsbno = XFS_AGB_TO_FSB(args.mp, 356 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
286 be32_to_cpu(agi->agi_seqno), args.agbno);
287 args.alignment = xfs_ialloc_cluster_alignment(&args); 357 args.alignment = xfs_ialloc_cluster_alignment(&args);
288 if ((error = xfs_alloc_vextent(&args))) 358 if ((error = xfs_alloc_vextent(&args)))
289 return error; 359 return error;
@@ -294,85 +364,30 @@ xfs_ialloc_ag_alloc(
294 return 0; 364 return 0;
295 } 365 }
296 ASSERT(args.len == args.minlen); 366 ASSERT(args.len == args.minlen);
297 /*
298 * Convert the results.
299 */
300 newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
301 /*
302 * Loop over the new block(s), filling in the inodes.
303 * For small block sizes, manipulate the inodes in buffers
304 * which are multiples of the blocks size.
305 */
306 if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
307 blks_per_cluster = 1;
308 nbufs = (int)args.len;
309 ninodes = args.mp->m_sb.sb_inopblock;
310 } else {
311 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
312 args.mp->m_sb.sb_blocksize;
313 nbufs = (int)args.len / blks_per_cluster;
314 ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
315 }
316 /*
317 * Figure out what version number to use in the inodes we create.
318 * If the superblock version has caught up to the one that supports
319 * the new inode format, then use the new inode version. Otherwise
320 * use the old version so that old kernels will continue to be
321 * able to use the file system.
322 */
323 if (xfs_sb_version_hasnlink(&args.mp->m_sb))
324 version = 2;
325 else
326 version = 1;
327 367
328 /* 368 /*
369 * Stamp and write the inode buffers.
370 *
329 * Seed the new inode cluster with a random generation number. This 371 * Seed the new inode cluster with a random generation number. This
330 * prevents short-term reuse of generation numbers if a chunk is 372 * prevents short-term reuse of generation numbers if a chunk is
331 * freed and then immediately reallocated. We use random numbers 373 * freed and then immediately reallocated. We use random numbers
332 * rather than a linear progression to prevent the next generation 374 * rather than a linear progression to prevent the next generation
333 * number from being easily guessable. 375 * number from being easily guessable.
334 */ 376 */
335 gen = random32(); 377 xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len,
336 for (j = 0; j < nbufs; j++) { 378 random32());
337 /*
338 * Get the block.
339 */
340 d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),
341 args.agbno + (j * blks_per_cluster));
342 fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
343 args.mp->m_bsize * blks_per_cluster,
344 XFS_BUF_LOCK);
345 ASSERT(fbuf);
346 ASSERT(!XFS_BUF_GETERROR(fbuf));
347 379
348 /* 380 /*
349 * Initialize all inodes in this buffer and then log them. 381 * Convert the results.
350 * 382 */
351 * XXX: It would be much better if we had just one transaction to 383 newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
352 * log a whole cluster of inodes instead of all the individual
353 * transactions causing a lot of log traffic.
354 */
355 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
356 for (i = 0; i < ninodes; i++) {
357 int ioffset = i << args.mp->m_sb.sb_inodelog;
358 uint isize = sizeof(struct xfs_dinode);
359
360 free = xfs_make_iptr(args.mp, fbuf, i);
361 free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
362 free->di_version = version;
363 free->di_gen = cpu_to_be32(gen);
364 free->di_next_unlinked = cpu_to_be32(NULLAGINO);
365 xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
366 }
367 xfs_trans_inode_alloc_buf(tp, fbuf);
368 }
369 be32_add_cpu(&agi->agi_count, newlen); 384 be32_add_cpu(&agi->agi_count, newlen);
370 be32_add_cpu(&agi->agi_freecount, newlen); 385 be32_add_cpu(&agi->agi_freecount, newlen);
371 agno = be32_to_cpu(agi->agi_seqno);
372 down_read(&args.mp->m_peraglock); 386 down_read(&args.mp->m_peraglock);
373 args.mp->m_perag[agno].pagi_freecount += newlen; 387 args.mp->m_perag[agno].pagi_freecount += newlen;
374 up_read(&args.mp->m_peraglock); 388 up_read(&args.mp->m_peraglock);
375 agi->agi_newino = cpu_to_be32(newino); 389 agi->agi_newino = cpu_to_be32(newino);
390
376 /* 391 /*
377 * Insert records describing the new inode chunk into the btree. 392 * Insert records describing the new inode chunk into the btree.
378 */ 393 */