diff options
author | Christoph Hellwig <hch@lst.de> | 2011-07-08 08:34:30 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2011-07-08 08:34:30 -0400 |
commit | 857b9778d86ccba7d7b42c9d8aeecde794ec8a6b (patch) | |
tree | 8e336f66e4e712da3f6d3f9a2f87bd2e0a8ca781 /fs/xfs/xfs_inode.c | |
parent | 681b120018e3c7e2680c93e8188c5ee34215df2f (diff) |
xfs: kill xfs_itruncate_start
xfs_itruncate_start is a rather length wrapper that evaluates to a call
to xfs_ioend_wait and xfs_tosspages, and only has two callers.
Instead of using the complicated checks left over from IRIX where we
can to truncate the pagecache just call xfs_tosspages
(aka truncate_inode_pages) directly as we want to get rid of all data
after i_size, and truncate_inode_pages handles incorrect alignments
and too large offsets just fine.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Diffstat (limited to 'fs/xfs/xfs_inode.c')
-rw-r--r-- | fs/xfs/xfs_inode.c | 163 |
1 files changed, 3 insertions, 160 deletions
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a098a20ca63e..82a282ab63dc 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1217,165 +1217,8 @@ xfs_isize_check( | |||
1217 | #endif /* DEBUG */ | 1217 | #endif /* DEBUG */ |
1218 | 1218 | ||
1219 | /* | 1219 | /* |
1220 | * Calculate the last possible buffered byte in a file. This must | 1220 | * Free up the underlying blocks past new_size. The new size must be |
1221 | * include data that was buffered beyond the EOF by the write code. | 1221 | * smaller than the current size. |
1222 | * This also needs to deal with overflowing the xfs_fsize_t type | ||
1223 | * which can happen for sizes near the limit. | ||
1224 | * | ||
1225 | * We also need to take into account any blocks beyond the EOF. It | ||
1226 | * may be the case that they were buffered by a write which failed. | ||
1227 | * In that case the pages will still be in memory, but the inode size | ||
1228 | * will never have been updated. | ||
1229 | */ | ||
1230 | STATIC xfs_fsize_t | ||
1231 | xfs_file_last_byte( | ||
1232 | xfs_inode_t *ip) | ||
1233 | { | ||
1234 | xfs_mount_t *mp; | ||
1235 | xfs_fsize_t last_byte; | ||
1236 | xfs_fileoff_t last_block; | ||
1237 | xfs_fileoff_t size_last_block; | ||
1238 | int error; | ||
1239 | |||
1240 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); | ||
1241 | |||
1242 | mp = ip->i_mount; | ||
1243 | /* | ||
1244 | * Only check for blocks beyond the EOF if the extents have | ||
1245 | * been read in. This eliminates the need for the inode lock, | ||
1246 | * and it also saves us from looking when it really isn't | ||
1247 | * necessary. | ||
1248 | */ | ||
1249 | if (ip->i_df.if_flags & XFS_IFEXTENTS) { | ||
1250 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
1251 | error = xfs_bmap_last_offset(NULL, ip, &last_block, | ||
1252 | XFS_DATA_FORK); | ||
1253 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
1254 | if (error) { | ||
1255 | last_block = 0; | ||
1256 | } | ||
1257 | } else { | ||
1258 | last_block = 0; | ||
1259 | } | ||
1260 | size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); | ||
1261 | last_block = XFS_FILEOFF_MAX(last_block, size_last_block); | ||
1262 | |||
1263 | last_byte = XFS_FSB_TO_B(mp, last_block); | ||
1264 | if (last_byte < 0) { | ||
1265 | return XFS_MAXIOFFSET(mp); | ||
1266 | } | ||
1267 | last_byte += (1 << mp->m_writeio_log); | ||
1268 | if (last_byte < 0) { | ||
1269 | return XFS_MAXIOFFSET(mp); | ||
1270 | } | ||
1271 | return last_byte; | ||
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * Start the truncation of the file to new_size. The new size | ||
1276 | * must be smaller than the current size. This routine will | ||
1277 | * clear the buffer and page caches of file data in the removed | ||
1278 | * range, and xfs_itruncate_finish() will remove the underlying | ||
1279 | * disk blocks. | ||
1280 | * | ||
1281 | * The inode must have its I/O lock locked EXCLUSIVELY, and it | ||
1282 | * must NOT have the inode lock held at all. This is because we're | ||
1283 | * calling into the buffer/page cache code and we can't hold the | ||
1284 | * inode lock when we do so. | ||
1285 | * | ||
1286 | * We need to wait for any direct I/Os in flight to complete before we | ||
1287 | * proceed with the truncate. This is needed to prevent the extents | ||
1288 | * being read or written by the direct I/Os from being removed while the | ||
1289 | * I/O is in flight as there is no other method of synchronising | ||
1290 | * direct I/O with the truncate operation. Also, because we hold | ||
1291 | * the IOLOCK in exclusive mode, we prevent new direct I/Os from being | ||
1292 | * started until the truncate completes and drops the lock. Essentially, | ||
1293 | * the xfs_ioend_wait() call forms an I/O barrier that provides strict | ||
1294 | * ordering between direct I/Os and the truncate operation. | ||
1295 | * | ||
1296 | * The flags parameter can have either the value XFS_ITRUNC_DEFINITE | ||
1297 | * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used | ||
1298 | * in the case that the caller is locking things out of order and | ||
1299 | * may not be able to call xfs_itruncate_finish() with the inode lock | ||
1300 | * held without dropping the I/O lock. If the caller must drop the | ||
1301 | * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() | ||
1302 | * must be called again with all the same restrictions as the initial | ||
1303 | * call. | ||
1304 | */ | ||
1305 | int | ||
1306 | xfs_itruncate_start( | ||
1307 | xfs_inode_t *ip, | ||
1308 | uint flags, | ||
1309 | xfs_fsize_t new_size) | ||
1310 | { | ||
1311 | xfs_fsize_t last_byte; | ||
1312 | xfs_off_t toss_start; | ||
1313 | xfs_mount_t *mp; | ||
1314 | int error = 0; | ||
1315 | |||
1316 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | ||
1317 | ASSERT((new_size == 0) || (new_size <= ip->i_size)); | ||
1318 | ASSERT((flags == XFS_ITRUNC_DEFINITE) || | ||
1319 | (flags == XFS_ITRUNC_MAYBE)); | ||
1320 | |||
1321 | mp = ip->i_mount; | ||
1322 | |||
1323 | /* wait for the completion of any pending DIOs */ | ||
1324 | if (new_size == 0 || new_size < ip->i_size) | ||
1325 | xfs_ioend_wait(ip); | ||
1326 | |||
1327 | /* | ||
1328 | * Call toss_pages or flushinval_pages to get rid of pages | ||
1329 | * overlapping the region being removed. We have to use | ||
1330 | * the less efficient flushinval_pages in the case that the | ||
1331 | * caller may not be able to finish the truncate without | ||
1332 | * dropping the inode's I/O lock. Make sure | ||
1333 | * to catch any pages brought in by buffers overlapping | ||
1334 | * the EOF by searching out beyond the isize by our | ||
1335 | * block size. We round new_size up to a block boundary | ||
1336 | * so that we don't toss things on the same block as | ||
1337 | * new_size but before it. | ||
1338 | * | ||
1339 | * Before calling toss_page or flushinval_pages, make sure to | ||
1340 | * call remapf() over the same region if the file is mapped. | ||
1341 | * This frees up mapped file references to the pages in the | ||
1342 | * given range and for the flushinval_pages case it ensures | ||
1343 | * that we get the latest mapped changes flushed out. | ||
1344 | */ | ||
1345 | toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); | ||
1346 | toss_start = XFS_FSB_TO_B(mp, toss_start); | ||
1347 | if (toss_start < 0) { | ||
1348 | /* | ||
1349 | * The place to start tossing is beyond our maximum | ||
1350 | * file size, so there is no way that the data extended | ||
1351 | * out there. | ||
1352 | */ | ||
1353 | return 0; | ||
1354 | } | ||
1355 | last_byte = xfs_file_last_byte(ip); | ||
1356 | trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte); | ||
1357 | if (last_byte > toss_start) { | ||
1358 | if (flags & XFS_ITRUNC_DEFINITE) { | ||
1359 | xfs_tosspages(ip, toss_start, | ||
1360 | -1, FI_REMAPF_LOCKED); | ||
1361 | } else { | ||
1362 | error = xfs_flushinval_pages(ip, toss_start, | ||
1363 | -1, FI_REMAPF_LOCKED); | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | #ifdef DEBUG | ||
1368 | if (new_size == 0) { | ||
1369 | ASSERT(VN_CACHED(VFS_I(ip)) == 0); | ||
1370 | } | ||
1371 | #endif | ||
1372 | return error; | ||
1373 | } | ||
1374 | |||
1375 | /* | ||
1376 | * Shrink the file to the given new_size. The new size must be smaller than | ||
1377 | * the current size. This will free up the underlying blocks in the removed | ||
1378 | * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). | ||
1379 | * | 1222 | * |
1380 | * The transaction passed to this routine must have made a permanent log | 1223 | * The transaction passed to this routine must have made a permanent log |
1381 | * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the | 1224 | * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the |
@@ -1387,7 +1230,7 @@ xfs_itruncate_start( | |||
1387 | * will be "held" within the returned transaction. This routine does NOT | 1230 | * will be "held" within the returned transaction. This routine does NOT |
1388 | * require any disk space to be reserved for it within the transaction. | 1231 | * require any disk space to be reserved for it within the transaction. |
1389 | * | 1232 | * |
1390 | * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it | 1233 | * The fork parameter must be either XFS_ATTR_FORK or XFS_DATA_FORK, and it |
1391 | * indicates the fork which is to be truncated. For the attribute fork we only | 1234 | * indicates the fork which is to be truncated. For the attribute fork we only |
1392 | * support truncation to size 0. | 1235 | * support truncation to size 0. |
1393 | * | 1236 | * |