aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2015-04-16 07:58:21 -0400
committerDave Chinner <david@fromorbit.com>2015-04-16 07:58:21 -0400
commit1fdca9c21198b2c2869086ac3629612492476f28 (patch)
treeb2ec1608fd65a331eb518c13420bc75c4f02ee52 /fs/xfs
parenta719370be52d1152a5f6e19c4af3b73280e25475 (diff)
xfs: move DIO mapping size calculation
The mapping size calculation is done last in __xfs_get_blocks(), but we are going to need the actual mapping size we will use to map the direct IO correctly in xfs_map_direct(). Factor out the calculation for code clarity, and move the call to be the first operation in mapping the extent to the returned buffer. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_aops.c79
1 files changed, 46 insertions, 33 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 489ed200bbbb..4a29399ed549 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1249,6 +1249,47 @@ xfs_map_direct(
1249 } 1249 }
1250} 1250}
1251 1251
1252
1253/*
1254 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1255 * is, so that we can avoid repeated get_blocks calls.
1256 *
1257 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1258 * for blocks beyond EOF must be marked new so that sub block regions can be
1259 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1260 * was just allocated or is unwritten, otherwise the callers would overwrite
1261 * existing data with zeros. Hence we have to split the mapping into a range up
1262 * to and including EOF, and a second mapping for beyond EOF.
1263 */
1264static void
1265xfs_map_trim_size(
1266 struct inode *inode,
1267 sector_t iblock,
1268 struct buffer_head *bh_result,
1269 struct xfs_bmbt_irec *imap,
1270 xfs_off_t offset,
1271 ssize_t size)
1272{
1273 xfs_off_t mapping_size;
1274
1275 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1276 mapping_size <<= inode->i_blkbits;
1277
1278 ASSERT(mapping_size > 0);
1279 if (mapping_size > size)
1280 mapping_size = size;
1281 if (offset < i_size_read(inode) &&
1282 offset + mapping_size >= i_size_read(inode)) {
1283 /* limit mapping to block that spans EOF */
1284 mapping_size = roundup_64(i_size_read(inode) - offset,
1285 1 << inode->i_blkbits);
1286 }
1287 if (mapping_size > LONG_MAX)
1288 mapping_size = LONG_MAX;
1289
1290 bh_result->b_size = mapping_size;
1291}
1292
1252STATIC int 1293STATIC int
1253__xfs_get_blocks( 1294__xfs_get_blocks(
1254 struct inode *inode, 1295 struct inode *inode,
@@ -1347,6 +1388,11 @@ __xfs_get_blocks(
1347 goto out_unlock; 1388 goto out_unlock;
1348 } 1389 }
1349 1390
1391 /* trim mapping down to size requested */
1392 if (direct || size > (1 << inode->i_blkbits))
1393 xfs_map_trim_size(inode, iblock, bh_result,
1394 &imap, offset, size);
1395
1350 /* 1396 /*
1351 * For unwritten extents do not report a disk address in the buffered 1397 * For unwritten extents do not report a disk address in the buffered
1352 * read case (treat as if we're reading into a hole). 1398 * read case (treat as if we're reading into a hole).
@@ -1392,39 +1438,6 @@ __xfs_get_blocks(
1392 } 1438 }
1393 } 1439 }
1394 1440
1395 /*
1396 * If this is O_DIRECT or the mpage code calling tell them how large
1397 * the mapping is, so that we can avoid repeated get_blocks calls.
1398 *
1399 * If the mapping spans EOF, then we have to break the mapping up as the
1400 * mapping for blocks beyond EOF must be marked new so that sub block
1401 * regions can be correctly zeroed. We can't do this for mappings within
1402 * EOF unless the mapping was just allocated or is unwritten, otherwise
1403 * the callers would overwrite existing data with zeros. Hence we have
1404 * to split the mapping into a range up to and including EOF, and a
1405 * second mapping for beyond EOF.
1406 */
1407 if (direct || size > (1 << inode->i_blkbits)) {
1408 xfs_off_t mapping_size;
1409
1410 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1411 mapping_size <<= inode->i_blkbits;
1412
1413 ASSERT(mapping_size > 0);
1414 if (mapping_size > size)
1415 mapping_size = size;
1416 if (offset < i_size_read(inode) &&
1417 offset + mapping_size >= i_size_read(inode)) {
1418 /* limit mapping to block that spans EOF */
1419 mapping_size = roundup_64(i_size_read(inode) - offset,
1420 1 << inode->i_blkbits);
1421 }
1422 if (mapping_size > LONG_MAX)
1423 mapping_size = LONG_MAX;
1424
1425 bh_result->b_size = mapping_size;
1426 }
1427
1428 return 0; 1441 return 0;
1429 1442
1430out_unlock: 1443out_unlock: