aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_vnodeops.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /fs/xfs/xfs_vnodeops.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'fs/xfs/xfs_vnodeops.c')
-rw-r--r--fs/xfs/xfs_vnodeops.c212
1 files changed, 106 insertions, 106 deletions
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4c7c7bfb2b2f..619720705bc6 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -114,7 +114,7 @@ xfs_setattr(
114 */ 114 */
115 ASSERT(udqp == NULL); 115 ASSERT(udqp == NULL);
116 ASSERT(gdqp == NULL); 116 ASSERT(gdqp == NULL);
117 code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid, 117 code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
118 qflags, &udqp, &gdqp); 118 qflags, &udqp, &gdqp);
119 if (code) 119 if (code)
120 return code; 120 return code;
@@ -184,8 +184,11 @@ xfs_setattr(
184 ip->i_size == 0 && ip->i_d.di_nextents == 0) { 184 ip->i_size == 0 && ip->i_d.di_nextents == 0) {
185 xfs_iunlock(ip, XFS_ILOCK_EXCL); 185 xfs_iunlock(ip, XFS_ILOCK_EXCL);
186 lock_flags &= ~XFS_ILOCK_EXCL; 186 lock_flags &= ~XFS_ILOCK_EXCL;
187 if (mask & ATTR_CTIME) 187 if (mask & ATTR_CTIME) {
188 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 188 inode->i_mtime = inode->i_ctime =
189 current_fs_time(inode->i_sb);
190 xfs_mark_inode_dirty_sync(ip);
191 }
189 code = 0; 192 code = 0;
190 goto error_return; 193 goto error_return;
191 } 194 }
@@ -950,40 +953,62 @@ xfs_release(
950 * If we previously truncated this file and removed old data 953 * If we previously truncated this file and removed old data
951 * in the process, we want to initiate "early" writeout on 954 * in the process, we want to initiate "early" writeout on
952 * the last close. This is an attempt to combat the notorious 955 * the last close. This is an attempt to combat the notorious
953 * NULL files problem which is particularly noticable from a 956 * NULL files problem which is particularly noticeable from a
954 * truncate down, buffered (re-)write (delalloc), followed by 957 * truncate down, buffered (re-)write (delalloc), followed by
955 * a crash. What we are effectively doing here is 958 * a crash. What we are effectively doing here is
956 * significantly reducing the time window where we'd otherwise 959 * significantly reducing the time window where we'd otherwise
957 * be exposed to that problem. 960 * be exposed to that problem.
958 */ 961 */
959 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 962 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
960 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) 963 if (truncated) {
961 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); 964 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
965 if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
966 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
967 }
962 } 968 }
963 969
964 if (ip->i_d.di_nlink != 0) { 970 if (ip->i_d.di_nlink == 0)
965 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 971 return 0;
966 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
967 ip->i_delayed_blks > 0)) &&
968 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
969 (!(ip->i_d.di_flags &
970 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
971 972
972 /* 973 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
973 * If we can't get the iolock just skip truncating 974 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
974 * the blocks past EOF because we could deadlock 975 ip->i_delayed_blks > 0)) &&
975 * with the mmap_sem otherwise. We'll get another 976 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
976 * chance to drop them once the last reference to 977 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
977 * the inode is dropped, so we'll never leak blocks 978
978 * permanently. 979 /*
979 */ 980 * If we can't get the iolock just skip truncating the blocks
980 error = xfs_free_eofblocks(mp, ip, 981 * past EOF because we could deadlock with the mmap_sem
981 XFS_FREE_EOF_TRYLOCK); 982 * otherwise. We'll get another chance to drop them once the
982 if (error) 983 * last reference to the inode is dropped, so we'll never leak
983 return error; 984 * blocks permanently.
984 } 985 *
985 } 986 * Further, check if the inode is being opened, written and
987 * closed frequently and we have delayed allocation blocks
988 * outstanding (e.g. streaming writes from the NFS server),
989 * truncating the blocks past EOF will cause fragmentation to
990 * occur.
991 *
992 * In this case don't do the truncation, either, but we have to
993 * be careful how we detect this case. Blocks beyond EOF show
994 * up as i_delayed_blks even when the inode is clean, so we
995 * need to truncate them away first before checking for a dirty
996 * release. Hence on the first dirty close we will still remove
997 * the speculative allocation, but after that we will leave it
998 * in place.
999 */
1000 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1001 return 0;
1002
1003 error = xfs_free_eofblocks(mp, ip,
1004 XFS_FREE_EOF_TRYLOCK);
1005 if (error)
1006 return error;
986 1007
1008 /* delalloc blocks after truncation means it really is dirty */
1009 if (ip->i_delayed_blks)
1010 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1011 }
987 return 0; 1012 return 0;
988} 1013}
989 1014
@@ -1167,9 +1192,8 @@ xfs_inactive(
1167 * inode might be lost for a long time or forever. 1192 * inode might be lost for a long time or forever.
1168 */ 1193 */
1169 if (!XFS_FORCED_SHUTDOWN(mp)) { 1194 if (!XFS_FORCED_SHUTDOWN(mp)) {
1170 cmn_err(CE_NOTE, 1195 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1171 "xfs_inactive: xfs_ifree() returned an error = %d on %s", 1196 __func__, error);
1172 error, mp->m_fsname);
1173 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1197 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1174 } 1198 }
1175 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); 1199 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
@@ -1186,12 +1210,12 @@ xfs_inactive(
1186 */ 1210 */
1187 error = xfs_bmap_finish(&tp, &free_list, &committed); 1211 error = xfs_bmap_finish(&tp, &free_list, &committed);
1188 if (error) 1212 if (error)
1189 xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " 1213 xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
1190 "xfs_bmap_finish() returned error %d", error); 1214 __func__, error);
1191 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1215 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1192 if (error) 1216 if (error)
1193 xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " 1217 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1194 "xfs_trans_commit() returned error %d", error); 1218 __func__, error);
1195 } 1219 }
1196 1220
1197 /* 1221 /*
@@ -1253,8 +1277,7 @@ xfs_create(
1253 struct xfs_name *name, 1277 struct xfs_name *name,
1254 mode_t mode, 1278 mode_t mode,
1255 xfs_dev_t rdev, 1279 xfs_dev_t rdev,
1256 xfs_inode_t **ipp, 1280 xfs_inode_t **ipp)
1257 cred_t *credp)
1258{ 1281{
1259 int is_dir = S_ISDIR(mode); 1282 int is_dir = S_ISDIR(mode);
1260 struct xfs_mount *mp = dp->i_mount; 1283 struct xfs_mount *mp = dp->i_mount;
@@ -1266,7 +1289,7 @@ xfs_create(
1266 boolean_t unlock_dp_on_error = B_FALSE; 1289 boolean_t unlock_dp_on_error = B_FALSE;
1267 uint cancel_flags; 1290 uint cancel_flags;
1268 int committed; 1291 int committed;
1269 xfs_prid_t prid; 1292 prid_t prid;
1270 struct xfs_dquot *udqp = NULL; 1293 struct xfs_dquot *udqp = NULL;
1271 struct xfs_dquot *gdqp = NULL; 1294 struct xfs_dquot *gdqp = NULL;
1272 uint resblks; 1295 uint resblks;
@@ -1279,9 +1302,9 @@ xfs_create(
1279 return XFS_ERROR(EIO); 1302 return XFS_ERROR(EIO);
1280 1303
1281 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1304 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1282 prid = dp->i_d.di_projid; 1305 prid = xfs_get_projid(dp);
1283 else 1306 else
1284 prid = dfltprid; 1307 prid = XFS_PROJID_DEFAULT;
1285 1308
1286 /* 1309 /*
1287 * Make sure that we have allocated dquot(s) on disk. 1310 * Make sure that we have allocated dquot(s) on disk.
@@ -1289,7 +1312,7 @@ xfs_create(
1289 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 1312 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1290 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 1313 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
1291 if (error) 1314 if (error)
1292 goto std_return; 1315 return error;
1293 1316
1294 if (is_dir) { 1317 if (is_dir) {
1295 rdev = 0; 1318 rdev = 0;
@@ -1360,7 +1383,7 @@ xfs_create(
1360 * entry pointing to them, but a directory also the "." entry 1383 * entry pointing to them, but a directory also the "." entry
1361 * pointing to itself. 1384 * pointing to itself.
1362 */ 1385 */
1363 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp, 1386 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
1364 prid, resblks > 0, &ip, &committed); 1387 prid, resblks > 0, &ip, &committed);
1365 if (error) { 1388 if (error) {
1366 if (error == ENOSPC) 1389 if (error == ENOSPC)
@@ -1369,12 +1392,6 @@ xfs_create(
1369 } 1392 }
1370 1393
1371 /* 1394 /*
1372 * At this point, we've gotten a newly allocated inode.
1373 * It is locked (and joined to the transaction).
1374 */
1375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1376
1377 /*
1378 * Now we join the directory inode to the transaction. We do not do it 1395 * Now we join the directory inode to the transaction. We do not do it
1379 * earlier because xfs_dir_ialloc might commit the previous transaction 1396 * earlier because xfs_dir_ialloc might commit the previous transaction
1380 * (and release all the locks). An error from here on will result in 1397 * (and release all the locks). An error from here on will result in
@@ -1391,7 +1408,7 @@ xfs_create(
1391 ASSERT(error != ENOSPC); 1408 ASSERT(error != ENOSPC);
1392 goto out_trans_abort; 1409 goto out_trans_abort;
1393 } 1410 }
1394 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1411 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1395 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1412 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1396 1413
1397 if (is_dir) { 1414 if (is_dir) {
@@ -1419,22 +1436,13 @@ xfs_create(
1419 */ 1436 */
1420 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); 1437 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
1421 1438
1422 /*
1423 * xfs_trans_commit normally decrements the vnode ref count
1424 * when it unlocks the inode. Since we want to return the
1425 * vnode to the caller, we bump the vnode ref count now.
1426 */
1427 IHOLD(ip);
1428
1429 error = xfs_bmap_finish(&tp, &free_list, &committed); 1439 error = xfs_bmap_finish(&tp, &free_list, &committed);
1430 if (error) 1440 if (error)
1431 goto out_abort_rele; 1441 goto out_bmap_cancel;
1432 1442
1433 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1443 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1434 if (error) { 1444 if (error)
1435 IRELE(ip); 1445 goto out_release_inode;
1436 goto out_dqrele;
1437 }
1438 1446
1439 xfs_qm_dqrele(udqp); 1447 xfs_qm_dqrele(udqp);
1440 xfs_qm_dqrele(gdqp); 1448 xfs_qm_dqrele(gdqp);
@@ -1448,27 +1456,21 @@ xfs_create(
1448 cancel_flags |= XFS_TRANS_ABORT; 1456 cancel_flags |= XFS_TRANS_ABORT;
1449 out_trans_cancel: 1457 out_trans_cancel:
1450 xfs_trans_cancel(tp, cancel_flags); 1458 xfs_trans_cancel(tp, cancel_flags);
1451 out_dqrele: 1459 out_release_inode:
1460 /*
1461 * Wait until after the current transaction is aborted to
1462 * release the inode. This prevents recursive transactions
1463 * and deadlocks from xfs_inactive.
1464 */
1465 if (ip)
1466 IRELE(ip);
1467
1452 xfs_qm_dqrele(udqp); 1468 xfs_qm_dqrele(udqp);
1453 xfs_qm_dqrele(gdqp); 1469 xfs_qm_dqrele(gdqp);
1454 1470
1455 if (unlock_dp_on_error) 1471 if (unlock_dp_on_error)
1456 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1472 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1457 std_return:
1458 return error; 1473 return error;
1459
1460 out_abort_rele:
1461 /*
1462 * Wait until after the current transaction is aborted to
1463 * release the inode. This prevents recursive transactions
1464 * and deadlocks from xfs_inactive.
1465 */
1466 xfs_bmap_cancel(&free_list);
1467 cancel_flags |= XFS_TRANS_ABORT;
1468 xfs_trans_cancel(tp, cancel_flags);
1469 IRELE(ip);
1470 unlock_dp_on_error = B_FALSE;
1471 goto out_dqrele;
1472} 1474}
1473 1475
1474#ifdef DEBUG 1476#ifdef DEBUG
@@ -1742,7 +1744,7 @@ xfs_remove(
1742 ASSERT(error != ENOENT); 1744 ASSERT(error != ENOENT);
1743 goto out_bmap_cancel; 1745 goto out_bmap_cancel;
1744 } 1746 }
1745 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1747 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1746 1748
1747 if (is_dir) { 1749 if (is_dir) {
1748 /* 1750 /*
@@ -1880,7 +1882,7 @@ xfs_link(
1880 * the tree quota mechanism could be circumvented. 1882 * the tree quota mechanism could be circumvented.
1881 */ 1883 */
1882 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1884 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1883 (tdp->i_d.di_projid != sip->i_d.di_projid))) { 1885 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1884 error = XFS_ERROR(EXDEV); 1886 error = XFS_ERROR(EXDEV);
1885 goto error_return; 1887 goto error_return;
1886 } 1888 }
@@ -1895,7 +1897,7 @@ xfs_link(
1895 &first_block, &free_list, resblks); 1897 &first_block, &free_list, resblks);
1896 if (error) 1898 if (error)
1897 goto abort_return; 1899 goto abort_return;
1898 xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1900 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1899 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); 1901 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1900 1902
1901 error = xfs_bumplink(tp, sip); 1903 error = xfs_bumplink(tp, sip);
@@ -1933,8 +1935,7 @@ xfs_symlink(
1933 struct xfs_name *link_name, 1935 struct xfs_name *link_name,
1934 const char *target_path, 1936 const char *target_path,
1935 mode_t mode, 1937 mode_t mode,
1936 xfs_inode_t **ipp, 1938 xfs_inode_t **ipp)
1937 cred_t *credp)
1938{ 1939{
1939 xfs_mount_t *mp = dp->i_mount; 1940 xfs_mount_t *mp = dp->i_mount;
1940 xfs_trans_t *tp; 1941 xfs_trans_t *tp;
@@ -1955,7 +1956,7 @@ xfs_symlink(
1955 int byte_cnt; 1956 int byte_cnt;
1956 int n; 1957 int n;
1957 xfs_buf_t *bp; 1958 xfs_buf_t *bp;
1958 xfs_prid_t prid; 1959 prid_t prid;
1959 struct xfs_dquot *udqp, *gdqp; 1960 struct xfs_dquot *udqp, *gdqp;
1960 uint resblks; 1961 uint resblks;
1961 1962
@@ -1978,9 +1979,9 @@ xfs_symlink(
1978 1979
1979 udqp = gdqp = NULL; 1980 udqp = gdqp = NULL;
1980 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 1981 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1981 prid = dp->i_d.di_projid; 1982 prid = xfs_get_projid(dp);
1982 else 1983 else
1983 prid = (xfs_prid_t)dfltprid; 1984 prid = XFS_PROJID_DEFAULT;
1984 1985
1985 /* 1986 /*
1986 * Make sure that we have allocated dquot(s) on disk. 1987 * Make sure that we have allocated dquot(s) on disk.
@@ -2046,8 +2047,8 @@ xfs_symlink(
2046 /* 2047 /*
2047 * Allocate an inode for the symlink. 2048 * Allocate an inode for the symlink.
2048 */ 2049 */
2049 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 2050 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
2050 1, 0, credp, prid, resblks > 0, &ip, NULL); 2051 prid, resblks > 0, &ip, NULL);
2051 if (error) { 2052 if (error) {
2052 if (error == ENOSPC) 2053 if (error == ENOSPC)
2053 goto error_return; 2054 goto error_return;
@@ -2094,9 +2095,8 @@ xfs_symlink(
2094 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, 2095 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
2095 &first_block, resblks, mval, &nmaps, 2096 &first_block, resblks, mval, &nmaps,
2096 &free_list); 2097 &free_list);
2097 if (error) { 2098 if (error)
2098 goto error1; 2099 goto error2;
2099 }
2100 2100
2101 if (resblks) 2101 if (resblks)
2102 resblks -= fs_blocks; 2102 resblks -= fs_blocks;
@@ -2128,8 +2128,8 @@ xfs_symlink(
2128 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, 2128 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
2129 &first_block, &free_list, resblks); 2129 &first_block, &free_list, resblks);
2130 if (error) 2130 if (error)
2131 goto error1; 2131 goto error2;
2132 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2132 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2133 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2133 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2134 2134
2135 /* 2135 /*
@@ -2141,13 +2141,6 @@ xfs_symlink(
2141 xfs_trans_set_sync(tp); 2141 xfs_trans_set_sync(tp);
2142 } 2142 }
2143 2143
2144 /*
2145 * xfs_trans_commit normally decrements the vnode ref count
2146 * when it unlocks the inode. Since we want to return the
2147 * vnode to the caller, we bump the vnode ref count now.
2148 */
2149 IHOLD(ip);
2150
2151 error = xfs_bmap_finish(&tp, &free_list, &committed); 2144 error = xfs_bmap_finish(&tp, &free_list, &committed);
2152 if (error) { 2145 if (error) {
2153 goto error2; 2146 goto error2;
@@ -2272,7 +2265,7 @@ xfs_alloc_file_space(
2272 count = len; 2265 count = len;
2273 imapp = &imaps[0]; 2266 imapp = &imaps[0];
2274 nimaps = 1; 2267 nimaps = 1;
2275 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 2268 bmapi_flag = XFS_BMAPI_WRITE | alloc_type;
2276 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 2269 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
2277 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 2270 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
2278 2271
@@ -2431,9 +2424,9 @@ xfs_zero_remaining_bytes(
2431 if (endoff > ip->i_size) 2424 if (endoff > ip->i_size)
2432 endoff = ip->i_size; 2425 endoff = ip->i_size;
2433 2426
2434 bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, 2427 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
2435 XFS_IS_REALTIME_INODE(ip) ? 2428 mp->m_rtdev_targp : mp->m_ddev_targp,
2436 mp->m_rtdev_targp : mp->m_ddev_targp); 2429 mp->m_sb.sb_blocksize, XBF_DONT_BLOCK);
2437 if (!bp) 2430 if (!bp)
2438 return XFS_ERROR(ENOMEM); 2431 return XFS_ERROR(ENOMEM);
2439 2432
@@ -2459,7 +2452,7 @@ xfs_zero_remaining_bytes(
2459 XFS_BUF_READ(bp); 2452 XFS_BUF_READ(bp);
2460 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 2453 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
2461 xfsbdstrat(mp, bp); 2454 xfsbdstrat(mp, bp);
2462 error = xfs_iowait(bp); 2455 error = xfs_buf_iowait(bp);
2463 if (error) { 2456 if (error) {
2464 xfs_ioerror_alert("xfs_zero_remaining_bytes(read)", 2457 xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
2465 mp, bp, XFS_BUF_ADDR(bp)); 2458 mp, bp, XFS_BUF_ADDR(bp));
@@ -2472,7 +2465,7 @@ xfs_zero_remaining_bytes(
2472 XFS_BUF_UNREAD(bp); 2465 XFS_BUF_UNREAD(bp);
2473 XFS_BUF_WRITE(bp); 2466 XFS_BUF_WRITE(bp);
2474 xfsbdstrat(mp, bp); 2467 xfsbdstrat(mp, bp);
2475 error = xfs_iowait(bp); 2468 error = xfs_buf_iowait(bp);
2476 if (error) { 2469 if (error) {
2477 xfs_ioerror_alert("xfs_zero_remaining_bytes(write)", 2470 xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
2478 mp, bp, XFS_BUF_ADDR(bp)); 2471 mp, bp, XFS_BUF_ADDR(bp));
@@ -2711,6 +2704,7 @@ xfs_change_file_space(
2711 xfs_off_t llen; 2704 xfs_off_t llen;
2712 xfs_trans_t *tp; 2705 xfs_trans_t *tp;
2713 struct iattr iattr; 2706 struct iattr iattr;
2707 int prealloc_type;
2714 2708
2715 if (!S_ISREG(ip->i_d.di_mode)) 2709 if (!S_ISREG(ip->i_d.di_mode))
2716 return XFS_ERROR(EINVAL); 2710 return XFS_ERROR(EINVAL);
@@ -2753,12 +2747,17 @@ xfs_change_file_space(
2753 * size to be changed. 2747 * size to be changed.
2754 */ 2748 */
2755 setprealloc = clrprealloc = 0; 2749 setprealloc = clrprealloc = 0;
2750 prealloc_type = XFS_BMAPI_PREALLOC;
2756 2751
2757 switch (cmd) { 2752 switch (cmd) {
2753 case XFS_IOC_ZERO_RANGE:
2754 prealloc_type |= XFS_BMAPI_CONVERT;
2755 xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0);
2756 /* FALLTHRU */
2758 case XFS_IOC_RESVSP: 2757 case XFS_IOC_RESVSP:
2759 case XFS_IOC_RESVSP64: 2758 case XFS_IOC_RESVSP64:
2760 error = xfs_alloc_file_space(ip, startoffset, bf->l_len, 2759 error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
2761 1, attr_flags); 2760 prealloc_type, attr_flags);
2762 if (error) 2761 if (error)
2763 return error; 2762 return error;
2764 setprealloc = 1; 2763 setprealloc = 1;
@@ -2827,7 +2826,7 @@ xfs_change_file_space(
2827 if (ip->i_d.di_mode & S_IXGRP) 2826 if (ip->i_d.di_mode & S_IXGRP)
2828 ip->i_d.di_mode &= ~S_ISGID; 2827 ip->i_d.di_mode &= ~S_ISGID;
2829 2828
2830 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2829 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2831 } 2830 }
2832 if (setprealloc) 2831 if (setprealloc)
2833 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 2832 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
@@ -2835,7 +2834,8 @@ xfs_change_file_space(
2835 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 2834 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
2836 2835
2837 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2836 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2838 xfs_trans_set_sync(tp); 2837 if (attr_flags & XFS_ATTR_SYNC)
2838 xfs_trans_set_sync(tp);
2839 2839
2840 error = xfs_trans_commit(tp, 0); 2840 error = xfs_trans_commit(tp, 0);
2841 2841