aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/libxfs/xfs_bmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs/xfs_bmap.c')
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c172
1 files changed, 120 insertions, 52 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index ef00156f4f96..041b6948aecc 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -477,10 +477,7 @@ xfs_bmap_check_leaf_extents(
477 } 477 }
478 block = XFS_BUF_TO_BLOCK(bp); 478 block = XFS_BUF_TO_BLOCK(bp);
479 } 479 }
480 if (bp_release) { 480
481 bp_release = 0;
482 xfs_trans_brelse(NULL, bp);
483 }
484 return; 481 return;
485 482
486error0: 483error0:
@@ -912,7 +909,7 @@ xfs_bmap_local_to_extents(
912 * We don't want to deal with the case of keeping inode data inline yet. 909 * We don't want to deal with the case of keeping inode data inline yet.
913 * So sending the data fork of a regular inode is invalid. 910 * So sending the data fork of a regular inode is invalid.
914 */ 911 */
915 ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK)); 912 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
916 ifp = XFS_IFORK_PTR(ip, whichfork); 913 ifp = XFS_IFORK_PTR(ip, whichfork);
917 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); 914 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
918 915
@@ -1079,7 +1076,7 @@ xfs_bmap_add_attrfork_local(
1079 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) 1076 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1080 return 0; 1077 return 0;
1081 1078
1082 if (S_ISDIR(ip->i_d.di_mode)) { 1079 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1083 memset(&dargs, 0, sizeof(dargs)); 1080 memset(&dargs, 0, sizeof(dargs));
1084 dargs.geo = ip->i_mount->m_dir_geo; 1081 dargs.geo = ip->i_mount->m_dir_geo;
1085 dargs.dp = ip; 1082 dargs.dp = ip;
@@ -1091,7 +1088,7 @@ xfs_bmap_add_attrfork_local(
1091 return xfs_dir2_sf_to_block(&dargs); 1088 return xfs_dir2_sf_to_block(&dargs);
1092 } 1089 }
1093 1090
1094 if (S_ISLNK(ip->i_d.di_mode)) 1091 if (S_ISLNK(VFS_I(ip)->i_mode))
1095 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, 1092 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1096 flags, XFS_DATA_FORK, 1093 flags, XFS_DATA_FORK,
1097 xfs_symlink_local_to_remote); 1094 xfs_symlink_local_to_remote);
@@ -4721,6 +4718,66 @@ error0:
4721} 4718}
4722 4719
4723/* 4720/*
4721 * When a delalloc extent is split (e.g., due to a hole punch), the original
4722 * indlen reservation must be shared across the two new extents that are left
4723 * behind.
4724 *
4725 * Given the original reservation and the worst case indlen for the two new
4726 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4727 * reservation fairly across the two new extents. If necessary, steal available
4728 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4729 * ores == 1). The number of stolen blocks is returned. The availability and
4730 * subsequent accounting of stolen blocks is the responsibility of the caller.
4731 */
4732static xfs_filblks_t
4733xfs_bmap_split_indlen(
4734 xfs_filblks_t ores, /* original res. */
4735 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4736 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4737 xfs_filblks_t avail) /* stealable blocks */
4738{
4739 xfs_filblks_t len1 = *indlen1;
4740 xfs_filblks_t len2 = *indlen2;
4741 xfs_filblks_t nres = len1 + len2; /* new total res. */
4742 xfs_filblks_t stolen = 0;
4743
4744 /*
4745 * Steal as many blocks as we can to try and satisfy the worst case
4746 * indlen for both new extents.
4747 */
4748 while (nres > ores && avail) {
4749 nres--;
4750 avail--;
4751 stolen++;
4752 }
4753
4754 /*
4755 * The only blocks available are those reserved for the original
4756 * extent and what we can steal from the extent being removed.
4757 * If this still isn't enough to satisfy the combined
4758 * requirements for the two new extents, skim blocks off of each
4759 * of the new reservations until they match what is available.
4760 */
4761 while (nres > ores) {
4762 if (len1) {
4763 len1--;
4764 nres--;
4765 }
4766 if (nres == ores)
4767 break;
4768 if (len2) {
4769 len2--;
4770 nres--;
4771 }
4772 }
4773
4774 *indlen1 = len1;
4775 *indlen2 = len2;
4776
4777 return stolen;
4778}
4779
4780/*
4724 * Called by xfs_bmapi to update file extent records and the btree 4781 * Called by xfs_bmapi to update file extent records and the btree
4725 * after removing space (or undoing a delayed allocation). 4782 * after removing space (or undoing a delayed allocation).
4726 */ 4783 */
@@ -4984,28 +5041,29 @@ xfs_bmap_del_extent(
4984 XFS_IFORK_NEXT_SET(ip, whichfork, 5041 XFS_IFORK_NEXT_SET(ip, whichfork,
4985 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 5042 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
4986 } else { 5043 } else {
5044 xfs_filblks_t stolen;
4987 ASSERT(whichfork == XFS_DATA_FORK); 5045 ASSERT(whichfork == XFS_DATA_FORK);
4988 temp = xfs_bmap_worst_indlen(ip, temp); 5046
5047 /*
5048 * Distribute the original indlen reservation across the
5049 * two new extents. Steal blocks from the deleted extent
5050 * if necessary. Stealing blocks simply fudges the
5051 * fdblocks accounting in xfs_bunmapi().
5052 */
5053 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5054 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5055 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5056 del->br_blockcount);
5057 da_new = temp + temp2 - stolen;
5058 del->br_blockcount -= stolen;
5059
5060 /*
5061 * Set the reservation for each extent. Warn if either
5062 * is zero as this can lead to delalloc problems.
5063 */
5064 WARN_ON_ONCE(!temp || !temp2);
4989 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 5065 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4990 temp2 = xfs_bmap_worst_indlen(ip, temp2);
4991 new.br_startblock = nullstartblock((int)temp2); 5066 new.br_startblock = nullstartblock((int)temp2);
4992 da_new = temp + temp2;
4993 while (da_new > da_old) {
4994 if (temp) {
4995 temp--;
4996 da_new--;
4997 xfs_bmbt_set_startblock(ep,
4998 nullstartblock((int)temp));
4999 }
5000 if (da_new == da_old)
5001 break;
5002 if (temp2) {
5003 temp2--;
5004 da_new--;
5005 new.br_startblock =
5006 nullstartblock((int)temp2);
5007 }
5008 }
5009 } 5067 }
5010 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 5068 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5011 xfs_iext_insert(ip, *idx + 1, 1, &new, state); 5069 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
@@ -5210,7 +5268,7 @@ xfs_bunmapi(
5210 * This is better than zeroing it. 5268 * This is better than zeroing it.
5211 */ 5269 */
5212 ASSERT(del.br_state == XFS_EXT_NORM); 5270 ASSERT(del.br_state == XFS_EXT_NORM);
5213 ASSERT(xfs_trans_get_block_res(tp) > 0); 5271 ASSERT(tp->t_blk_res > 0);
5214 /* 5272 /*
5215 * If this spans a realtime extent boundary, 5273 * If this spans a realtime extent boundary,
5216 * chop it back to the start of the one we end at. 5274 * chop it back to the start of the one we end at.
@@ -5241,7 +5299,7 @@ xfs_bunmapi(
5241 del.br_startblock += mod; 5299 del.br_startblock += mod;
5242 } else if ((del.br_startoff == start && 5300 } else if ((del.br_startoff == start &&
5243 (del.br_state == XFS_EXT_UNWRITTEN || 5301 (del.br_state == XFS_EXT_UNWRITTEN ||
5244 xfs_trans_get_block_res(tp) == 0)) || 5302 tp->t_blk_res == 0)) ||
5245 !xfs_sb_version_hasextflgbit(&mp->m_sb)) { 5303 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5246 /* 5304 /*
5247 * Can't make it unwritten. There isn't 5305 * Can't make it unwritten. There isn't
@@ -5296,9 +5354,37 @@ xfs_bunmapi(
5296 goto nodelete; 5354 goto nodelete;
5297 } 5355 }
5298 } 5356 }
5357
5358 /*
5359 * If it's the case where the directory code is running
5360 * with no block reservation, and the deleted block is in
5361 * the middle of its extent, and the resulting insert
5362 * of an extent would cause transformation to btree format,
5363 * then reject it. The calling code will then swap
5364 * blocks around instead.
5365 * We have to do this now, rather than waiting for the
5366 * conversion to btree format, since the transaction
5367 * will be dirty.
5368 */
5369 if (!wasdel && tp->t_blk_res == 0 &&
5370 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5371 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5372 XFS_IFORK_MAXEXT(ip, whichfork) &&
5373 del.br_startoff > got.br_startoff &&
5374 del.br_startoff + del.br_blockcount <
5375 got.br_startoff + got.br_blockcount) {
5376 error = -ENOSPC;
5377 goto error0;
5378 }
5379
5380 /*
5381 * Unreserve quota and update realtime free space, if
5382 * appropriate. If delayed allocation, update the inode delalloc
5383 * counter now and wait to update the sb counters as
5384 * xfs_bmap_del_extent() might need to borrow some blocks.
5385 */
5299 if (wasdel) { 5386 if (wasdel) {
5300 ASSERT(startblockval(del.br_startblock) > 0); 5387 ASSERT(startblockval(del.br_startblock) > 0);
5301 /* Update realtime/data freespace, unreserve quota */
5302 if (isrt) { 5388 if (isrt) {
5303 xfs_filblks_t rtexts; 5389 xfs_filblks_t rtexts;
5304 5390
@@ -5309,8 +5395,6 @@ xfs_bunmapi(
5309 ip, -((long)del.br_blockcount), 0, 5395 ip, -((long)del.br_blockcount), 0,
5310 XFS_QMOPT_RES_RTBLKS); 5396 XFS_QMOPT_RES_RTBLKS);
5311 } else { 5397 } else {
5312 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount,
5313 false);
5314 (void)xfs_trans_reserve_quota_nblks(NULL, 5398 (void)xfs_trans_reserve_quota_nblks(NULL,
5315 ip, -((long)del.br_blockcount), 0, 5399 ip, -((long)del.br_blockcount), 0,
5316 XFS_QMOPT_RES_REGBLKS); 5400 XFS_QMOPT_RES_REGBLKS);
@@ -5321,32 +5405,16 @@ xfs_bunmapi(
5321 XFS_BTCUR_BPRV_WASDEL; 5405 XFS_BTCUR_BPRV_WASDEL;
5322 } else if (cur) 5406 } else if (cur)
5323 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL; 5407 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5324 /* 5408
5325 * If it's the case where the directory code is running
5326 * with no block reservation, and the deleted block is in
5327 * the middle of its extent, and the resulting insert
5328 * of an extent would cause transformation to btree format,
5329 * then reject it. The calling code will then swap
5330 * blocks around instead.
5331 * We have to do this now, rather than waiting for the
5332 * conversion to btree format, since the transaction
5333 * will be dirty.
5334 */
5335 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5336 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5337 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5338 XFS_IFORK_MAXEXT(ip, whichfork) &&
5339 del.br_startoff > got.br_startoff &&
5340 del.br_startoff + del.br_blockcount <
5341 got.br_startoff + got.br_blockcount) {
5342 error = -ENOSPC;
5343 goto error0;
5344 }
5345 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, 5409 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5346 &tmp_logflags, whichfork); 5410 &tmp_logflags, whichfork);
5347 logflags |= tmp_logflags; 5411 logflags |= tmp_logflags;
5348 if (error) 5412 if (error)
5349 goto error0; 5413 goto error0;
5414
5415 if (!isrt && wasdel)
5416 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5417
5350 bno = del.br_startoff - 1; 5418 bno = del.br_startoff - 1;
5351nodelete: 5419nodelete:
5352 /* 5420 /*