aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_bmap_util.c72
-rw-r--r--fs/xfs/xfs_itable.c250
-rw-r--r--fs/xfs/xfs_itable.h16
3 files changed, 142 insertions, 196 deletions
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 92e8f99a5857..281002689d64 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1338,7 +1338,10 @@ xfs_free_file_space(
1338 goto out; 1338 goto out;
1339} 1339}
1340 1340
1341 1341/*
1342 * Preallocate and zero a range of a file. This mechanism has the allocation
1343 * semantics of fallocate and in addition converts data in the range to zeroes.
1344 */
1342int 1345int
1343xfs_zero_file_space( 1346xfs_zero_file_space(
1344 struct xfs_inode *ip, 1347 struct xfs_inode *ip,
@@ -1346,65 +1349,30 @@ xfs_zero_file_space(
1346 xfs_off_t len) 1349 xfs_off_t len)
1347{ 1350{
1348 struct xfs_mount *mp = ip->i_mount; 1351 struct xfs_mount *mp = ip->i_mount;
1349 uint granularity; 1352 uint blksize;
1350 xfs_off_t start_boundary;
1351 xfs_off_t end_boundary;
1352 int error; 1353 int error;
1353 1354
1354 trace_xfs_zero_file_space(ip); 1355 trace_xfs_zero_file_space(ip);
1355 1356
1356 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1357 blksize = 1 << mp->m_sb.sb_blocklog;
1357 1358
1358 /* 1359 /*
1359 * Round the range of extents we are going to convert inwards. If the 1360 * Punch a hole and prealloc the range. We use hole punch rather than
1360 * offset is aligned, then it doesn't get changed so we zero from the 1361 * unwritten extent conversion for two reasons:
1361 * start of the block offset points to. 1362 *
1363 * 1.) Hole punch handles partial block zeroing for us.
1364 *
1365 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1366 * by virtue of the hole punch.
1362 */ 1367 */
1363 start_boundary = round_up(offset, granularity); 1368 error = xfs_free_file_space(ip, offset, len);
1364 end_boundary = round_down(offset + len, granularity); 1369 if (error)
1365 1370 goto out;
1366 ASSERT(start_boundary >= offset);
1367 ASSERT(end_boundary <= offset + len);
1368
1369 if (start_boundary < end_boundary - 1) {
1370 /*
1371 * Writeback the range to ensure any inode size updates due to
1372 * appending writes make it to disk (otherwise we could just
1373 * punch out the delalloc blocks).
1374 */
1375 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1376 start_boundary, end_boundary - 1);
1377 if (error)
1378 goto out;
1379 truncate_pagecache_range(VFS_I(ip), start_boundary,
1380 end_boundary - 1);
1381
1382 /* convert the blocks */
1383 error = xfs_alloc_file_space(ip, start_boundary,
1384 end_boundary - start_boundary - 1,
1385 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1386 if (error)
1387 goto out;
1388
1389 /* We've handled the interior of the range, now for the edges */
1390 if (start_boundary != offset) {
1391 error = xfs_iozero(ip, offset, start_boundary - offset);
1392 if (error)
1393 goto out;
1394 }
1395
1396 if (end_boundary != offset + len)
1397 error = xfs_iozero(ip, end_boundary,
1398 offset + len - end_boundary);
1399
1400 } else {
1401 /*
1402 * It's either a sub-granularity range or the range spanned lies
1403 * partially across two adjacent blocks.
1404 */
1405 error = xfs_iozero(ip, offset, len);
1406 }
1407 1371
1372 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1373 round_up(offset + len, blksize) -
1374 round_down(offset, blksize),
1375 XFS_BMAPI_PREALLOC);
1408out: 1376out:
1409 return error; 1377 return error;
1410 1378
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f1deb961a296..894924a5129b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk(
236 XFS_WANT_CORRUPTED_RETURN(stat == 1); 236 XFS_WANT_CORRUPTED_RETURN(stat == 1);
237 237
238 /* Check if the record contains the inode in request */ 238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) 239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
240 return -EINVAL; 240 *icount = 0;
241 return 0;
242 }
241 243
242 idx = agino - irec->ir_startino + 1; 244 idx = agino - irec->ir_startino + 1;
243 if (idx < XFS_INODES_PER_CHUNK && 245 if (idx < XFS_INODES_PER_CHUNK &&
@@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk(
262 264
263#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 265#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
264 266
267struct xfs_bulkstat_agichunk {
268 char __user **ac_ubuffer;/* pointer into user's buffer */
269 int ac_ubleft; /* bytes left in user's buffer */
270 int ac_ubelem; /* spaces used in user's buffer */
271};
272
265/* 273/*
266 * Process inodes in chunk with a pointer to a formatter function 274 * Process inodes in chunk with a pointer to a formatter function
267 * that will iget the inode and fill in the appropriate structure. 275 * that will iget the inode and fill in the appropriate structure.
268 */ 276 */
269int 277static int
270xfs_bulkstat_ag_ichunk( 278xfs_bulkstat_ag_ichunk(
271 struct xfs_mount *mp, 279 struct xfs_mount *mp,
272 xfs_agnumber_t agno, 280 xfs_agnumber_t agno,
273 struct xfs_inobt_rec_incore *irbp, 281 struct xfs_inobt_rec_incore *irbp,
274 bulkstat_one_pf formatter, 282 bulkstat_one_pf formatter,
275 size_t statstruct_size, 283 size_t statstruct_size,
276 struct xfs_bulkstat_agichunk *acp) 284 struct xfs_bulkstat_agichunk *acp,
285 xfs_agino_t *last_agino)
277{ 286{
278 xfs_ino_t lastino = acp->ac_lastino;
279 char __user **ubufp = acp->ac_ubuffer; 287 char __user **ubufp = acp->ac_ubuffer;
280 int ubleft = acp->ac_ubleft; 288 int chunkidx;
281 int ubelem = acp->ac_ubelem;
282 int chunkidx, clustidx;
283 int error = 0; 289 int error = 0;
284 xfs_agino_t agino; 290 xfs_agino_t agino = irbp->ir_startino;
285 291
286 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 292 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
287 XFS_BULKSTAT_UBLEFT(ubleft) && 293 chunkidx++, agino++) {
288 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 294 int fmterror;
289 chunkidx++, clustidx++, agino++) {
290 int fmterror; /* bulkstat formatter result */
291 int ubused; 295 int ubused;
292 xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
293 296
294 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 297 /* inode won't fit in buffer, we are done */
298 if (acp->ac_ubleft < statstruct_size)
299 break;
295 300
296 /* Skip if this inode is free */ 301 /* Skip if this inode is free */
297 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 302 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
298 lastino = ino;
299 continue; 303 continue;
300 }
301
302 /*
303 * Count used inodes as free so we can tell when the
304 * chunk is used up.
305 */
306 irbp->ir_freecount++;
307 304
308 /* Get the inode and fill in a single buffer */ 305 /* Get the inode and fill in a single buffer */
309 ubused = statstruct_size; 306 ubused = statstruct_size;
310 error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); 307 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
311 if (fmterror == BULKSTAT_RV_NOTHING) { 308 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
312 if (error && error != -ENOENT && error != -EINVAL) { 309
313 ubleft = 0; 310 if (fmterror == BULKSTAT_RV_GIVEUP ||
314 break; 311 (error && error != -ENOENT && error != -EINVAL)) {
315 } 312 acp->ac_ubleft = 0;
316 lastino = ino;
317 continue;
318 }
319 if (fmterror == BULKSTAT_RV_GIVEUP) {
320 ubleft = 0;
321 ASSERT(error); 313 ASSERT(error);
322 break; 314 break;
323 } 315 }
324 if (*ubufp) 316
325 *ubufp += ubused; 317 /* be careful not to leak error if at end of chunk */
326 ubleft -= ubused; 318 if (fmterror == BULKSTAT_RV_NOTHING || error) {
327 ubelem++; 319 error = 0;
328 lastino = ino; 320 continue;
321 }
322
323 *ubufp += ubused;
324 acp->ac_ubleft -= ubused;
325 acp->ac_ubelem++;
329 } 326 }
330 327
331 acp->ac_lastino = lastino; 328 /*
332 acp->ac_ubleft = ubleft; 329 * Post-update *last_agino. At this point, agino will always point one
333 acp->ac_ubelem = ubelem; 330 * inode past the last inode we processed successfully. Hence we
331 * substract that inode when setting the *last_agino cursor so that we
332 * return the correct cookie to userspace. On the next bulkstat call,
333 * the inode under the lastino cookie will be skipped as we have already
334 * processed it here.
335 */
336 *last_agino = agino - 1;
334 337
335 return error; 338 return error;
336} 339}
@@ -353,45 +356,33 @@ xfs_bulkstat(
353 xfs_agino_t agino; /* inode # in allocation group */ 356 xfs_agino_t agino; /* inode # in allocation group */
354 xfs_agnumber_t agno; /* allocation group number */ 357 xfs_agnumber_t agno; /* allocation group number */
355 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 358 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
356 int end_of_ag; /* set if we've seen the ag end */
357 int error; /* error code */
358 int fmterror;/* bulkstat formatter result */
359 int i; /* loop index */
360 int icount; /* count of inodes good in irbuf */
361 size_t irbsize; /* size of irec buffer in bytes */ 359 size_t irbsize; /* size of irec buffer in bytes */
362 xfs_ino_t ino; /* inode number (filesystem) */
363 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
364 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 360 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
365 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
366 xfs_ino_t lastino; /* last inode number returned */
367 int nirbuf; /* size of irbuf */ 361 int nirbuf; /* size of irbuf */
368 int rval; /* return value error code */
369 int tmp; /* result value from btree calls */
370 int ubcount; /* size of user's buffer */ 362 int ubcount; /* size of user's buffer */
371 int ubleft; /* bytes left in user's buffer */ 363 struct xfs_bulkstat_agichunk ac;
372 char __user *ubufp; /* pointer into user's buffer */ 364 int error = 0;
373 int ubelem; /* spaces used in user's buffer */
374 365
375 /* 366 /*
376 * Get the last inode value, see if there's nothing to do. 367 * Get the last inode value, see if there's nothing to do.
377 */ 368 */
378 ino = (xfs_ino_t)*lastinop; 369 agno = XFS_INO_TO_AGNO(mp, *lastinop);
379 lastino = ino; 370 agino = XFS_INO_TO_AGINO(mp, *lastinop);
380 agno = XFS_INO_TO_AGNO(mp, ino);
381 agino = XFS_INO_TO_AGINO(mp, ino);
382 if (agno >= mp->m_sb.sb_agcount || 371 if (agno >= mp->m_sb.sb_agcount ||
383 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 372 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
384 *done = 1; 373 *done = 1;
385 *ubcountp = 0; 374 *ubcountp = 0;
386 return 0; 375 return 0;
387 } 376 }
388 377
389 ubcount = *ubcountp; /* statstruct's */ 378 ubcount = *ubcountp; /* statstruct's */
390 ubleft = ubcount * statstruct_size; /* bytes */ 379 ac.ac_ubuffer = &ubuffer;
391 *ubcountp = ubelem = 0; 380 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
381 ac.ac_ubelem = 0;
382
383 *ubcountp = 0;
392 *done = 0; 384 *done = 0;
393 fmterror = 0; 385
394 ubufp = ubuffer;
395 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 386 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
396 if (!irbuf) 387 if (!irbuf)
397 return -ENOMEM; 388 return -ENOMEM;
@@ -402,9 +393,13 @@ xfs_bulkstat(
402 * Loop over the allocation groups, starting from the last 393 * Loop over the allocation groups, starting from the last
403 * inode returned; 0 means start of the allocation group. 394 * inode returned; 0 means start of the allocation group.
404 */ 395 */
405 rval = 0; 396 while (agno < mp->m_sb.sb_agcount) {
406 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 397 struct xfs_inobt_rec_incore *irbp = irbuf;
407 cond_resched(); 398 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
399 bool end_of_ag = false;
400 int icount = 0;
401 int stat;
402
408 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 403 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
409 if (error) 404 if (error)
410 break; 405 break;
@@ -414,10 +409,6 @@ xfs_bulkstat(
414 */ 409 */
415 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 410 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
416 XFS_BTNUM_INO); 411 XFS_BTNUM_INO);
417 irbp = irbuf;
418 irbufend = irbuf + nirbuf;
419 end_of_ag = 0;
420 icount = 0;
421 if (agino > 0) { 412 if (agino > 0) {
422 /* 413 /*
423 * In the middle of an allocation group, we need to get 414 * In the middle of an allocation group, we need to get
@@ -427,22 +418,23 @@ xfs_bulkstat(
427 418
428 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 419 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
429 if (error) 420 if (error)
430 break; 421 goto del_cursor;
431 if (icount) { 422 if (icount) {
432 irbp->ir_startino = r.ir_startino; 423 irbp->ir_startino = r.ir_startino;
433 irbp->ir_freecount = r.ir_freecount; 424 irbp->ir_freecount = r.ir_freecount;
434 irbp->ir_free = r.ir_free; 425 irbp->ir_free = r.ir_free;
435 irbp++; 426 irbp++;
436 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
437 } 427 }
438 /* Increment to the next record */ 428 /* Increment to the next record */
439 error = xfs_btree_increment(cur, 0, &tmp); 429 error = xfs_btree_increment(cur, 0, &stat);
440 } else { 430 } else {
441 /* Start of ag. Lookup the first inode chunk */ 431 /* Start of ag. Lookup the first inode chunk */
442 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 432 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
433 }
434 if (error || stat == 0) {
435 end_of_ag = true;
436 goto del_cursor;
443 } 437 }
444 if (error)
445 break;
446 438
447 /* 439 /*
448 * Loop through inode btree records in this ag, 440 * Loop through inode btree records in this ag,
@@ -451,10 +443,10 @@ xfs_bulkstat(
451 while (irbp < irbufend && icount < ubcount) { 443 while (irbp < irbufend && icount < ubcount) {
452 struct xfs_inobt_rec_incore r; 444 struct xfs_inobt_rec_incore r;
453 445
454 error = xfs_inobt_get_rec(cur, &r, &i); 446 error = xfs_inobt_get_rec(cur, &r, &stat);
455 if (error || i == 0) { 447 if (error || stat == 0) {
456 end_of_ag = 1; 448 end_of_ag = true;
457 break; 449 goto del_cursor;
458 } 450 }
459 451
460 /* 452 /*
@@ -469,77 +461,79 @@ xfs_bulkstat(
469 irbp++; 461 irbp++;
470 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 462 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
471 } 463 }
472 /* 464 error = xfs_btree_increment(cur, 0, &stat);
473 * Set agino to after this chunk and bump the cursor. 465 if (error || stat == 0) {
474 */ 466 end_of_ag = true;
475 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 467 goto del_cursor;
476 error = xfs_btree_increment(cur, 0, &tmp); 468 }
477 cond_resched(); 469 cond_resched();
478 } 470 }
471
479 /* 472 /*
480 * Drop the btree buffers and the agi buffer. 473 * Drop the btree buffers and the agi buffer as we can't hold any
481 * We can't hold any of the locks these represent 474 * of the locks these represent when calling iget. If there is a
482 * when calling iget. 475 * pending error, then we are done.
483 */ 476 */
477del_cursor:
484 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 478 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
485 xfs_buf_relse(agbp); 479 xfs_buf_relse(agbp);
480 if (error)
481 break;
486 /* 482 /*
487 * Now format all the good inodes into the user's buffer. 483 * Now format all the good inodes into the user's buffer. The
484 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
485 * for the next loop iteration.
488 */ 486 */
489 irbufend = irbp; 487 irbufend = irbp;
490 for (irbp = irbuf; 488 for (irbp = irbuf;
491 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 489 irbp < irbufend && ac.ac_ubleft >= statstruct_size;
492 struct xfs_bulkstat_agichunk ac; 490 irbp++) {
493
494 ac.ac_lastino = lastino;
495 ac.ac_ubuffer = &ubuffer;
496 ac.ac_ubleft = ubleft;
497 ac.ac_ubelem = ubelem;
498 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 491 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
499 formatter, statstruct_size, &ac); 492 formatter, statstruct_size, &ac,
493 &agino);
500 if (error) 494 if (error)
501 rval = error; 495 break;
502
503 lastino = ac.ac_lastino;
504 ubleft = ac.ac_ubleft;
505 ubelem = ac.ac_ubelem;
506 496
507 cond_resched(); 497 cond_resched();
508 } 498 }
499
509 /* 500 /*
510 * Set up for the next loop iteration. 501 * If we've run out of space or had a formatting error, we
502 * are now done
511 */ 503 */
512 if (XFS_BULKSTAT_UBLEFT(ubleft)) { 504 if (ac.ac_ubleft < statstruct_size || error)
513 if (end_of_ag) {
514 agno++;
515 agino = 0;
516 } else
517 agino = XFS_INO_TO_AGINO(mp, lastino);
518 } else
519 break; 505 break;
506
507 if (end_of_ag) {
508 agno++;
509 agino = 0;
510 }
520 } 511 }
521 /* 512 /*
522 * Done, we're either out of filesystem or space to put the data. 513 * Done, we're either out of filesystem or space to put the data.
523 */ 514 */
524 kmem_free(irbuf); 515 kmem_free(irbuf);
525 *ubcountp = ubelem; 516 *ubcountp = ac.ac_ubelem;
517
526 /* 518 /*
527 * Found some inodes, return them now and return the error next time. 519 * We found some inodes, so clear the error status and return them.
520 * The lastino pointer will point directly at the inode that triggered
521 * any error that occurred, so on the next call the error will be
522 * triggered again and propagated to userspace as there will be no
523 * formatted inodes in the buffer.
528 */ 524 */
529 if (ubelem) 525 if (ac.ac_ubelem)
530 rval = 0; 526 error = 0;
531 if (agno >= mp->m_sb.sb_agcount) { 527
532 /* 528 /*
533 * If we ran out of filesystem, mark lastino as off 529 * If we ran out of filesystem, lastino will point off the end of
534 * the end of the filesystem, so the next call 530 * the filesystem so the next call will return immediately.
535 * will return immediately. 531 */
536 */ 532 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
537 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); 533 if (agno >= mp->m_sb.sb_agcount)
538 *done = 1; 534 *done = 1;
539 } else
540 *lastinop = (xfs_ino_t)lastino;
541 535
542 return rval; 536 return error;
543} 537}
544 538
545int 539int
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index aaed08022eb9..6ea8b3912fa4 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
30 int *ubused, 30 int *ubused,
31 int *stat); 31 int *stat);
32 32
33struct xfs_bulkstat_agichunk {
34 xfs_ino_t ac_lastino; /* last inode returned */
35 char __user **ac_ubuffer;/* pointer into user's buffer */
36 int ac_ubleft; /* bytes left in user's buffer */
37 int ac_ubelem; /* spaces used in user's buffer */
38};
39
40int
41xfs_bulkstat_ag_ichunk(
42 struct xfs_mount *mp,
43 xfs_agnumber_t agno,
44 struct xfs_inobt_rec_incore *irbp,
45 bulkstat_one_pf formatter,
46 size_t statstruct_size,
47 struct xfs_bulkstat_agichunk *acp);
48
49/* 33/*
50 * Values for stat return value. 34 * Values for stat return value.
51 */ 35 */