aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c02f7c5b7462..6cbbd165c60d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
372 * assumes that all buffers on the page are started at the same time. 372 * assumes that all buffers on the page are started at the same time.
373 * 373 *
374 * The fix is two passes across the ioend list - one to start writeback on the 374 * The fix is two passes across the ioend list - one to start writeback on the
375 * bufferheads, and then the second one submit them for I/O. 375 * buffer_heads, and then submit them for I/O on the second pass.
376 */ 376 */
377STATIC void 377STATIC void
378xfs_submit_ioend( 378xfs_submit_ioend(
@@ -699,7 +699,7 @@ xfs_convert_page(
699 699
700 /* 700 /*
701 * page_dirty is initially a count of buffers on the page before 701 * page_dirty is initially a count of buffers on the page before
702 * EOF and is decrememted as we move each into a cleanable state. 702 * EOF and is decremented as we move each into a cleanable state.
703 * 703 *
704 * Derivation: 704 * Derivation:
705 * 705 *
@@ -842,7 +842,7 @@ xfs_cluster_write(
842 * page if possible. 842 * page if possible.
843 * The bh->b_state's cannot know if any of the blocks or which block for 843 * The bh->b_state's cannot know if any of the blocks or which block for
844 * that matter are dirty due to mmap writes, and therefore bh uptodate is 844 * that matter are dirty due to mmap writes, and therefore bh uptodate is
845 * only vaild if the page itself isn't completely uptodate. Some layers 845 * only valid if the page itself isn't completely uptodate. Some layers
846 * may clear the page dirty flag prior to calling write page, under the 846 * may clear the page dirty flag prior to calling write page, under the
847 * assumption the entire page will be written out; by not writing out the 847 * assumption the entire page will be written out; by not writing out the
848 * whole page the page can be reused before all valid dirty data is 848 * whole page the page can be reused before all valid dirty data is
@@ -892,7 +892,7 @@ xfs_page_state_convert(
892 892
893 /* 893 /*
894 * page_dirty is initially a count of buffers on the page before 894 * page_dirty is initially a count of buffers on the page before
895 * EOF and is decrememted as we move each into a cleanable state. 895 * EOF and is decremented as we move each into a cleanable state.
896 * 896 *
897 * Derivation: 897 * Derivation:
898 * 898 *
@@ -1223,10 +1223,9 @@ free_buffers:
1223} 1223}
1224 1224
1225STATIC int 1225STATIC int
1226__xfs_get_block( 1226__xfs_get_blocks(
1227 struct inode *inode, 1227 struct inode *inode,
1228 sector_t iblock, 1228 sector_t iblock,
1229 unsigned long blocks,
1230 struct buffer_head *bh_result, 1229 struct buffer_head *bh_result,
1231 int create, 1230 int create,
1232 int direct, 1231 int direct,
@@ -1236,22 +1235,17 @@ __xfs_get_block(
1236 xfs_iomap_t iomap; 1235 xfs_iomap_t iomap;
1237 xfs_off_t offset; 1236 xfs_off_t offset;
1238 ssize_t size; 1237 ssize_t size;
1239 int retpbbm = 1; 1238 int niomap = 1;
1240 int error; 1239 int error;
1241 1240
1242 offset = (xfs_off_t)iblock << inode->i_blkbits; 1241 offset = (xfs_off_t)iblock << inode->i_blkbits;
1243 if (blocks) 1242 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1244 size = (ssize_t) min_t(xfs_off_t, LONG_MAX, 1243 size = bh_result->b_size;
1245 (xfs_off_t)blocks << inode->i_blkbits);
1246 else
1247 size = 1 << inode->i_blkbits;
1248
1249 VOP_BMAP(vp, offset, size, 1244 VOP_BMAP(vp, offset, size,
1250 create ? flags : BMAPI_READ, &iomap, &retpbbm, error); 1245 create ? flags : BMAPI_READ, &iomap, &niomap, error);
1251 if (error) 1246 if (error)
1252 return -error; 1247 return -error;
1253 1248 if (niomap == 0)
1254 if (retpbbm == 0)
1255 return 0; 1249 return 0;
1256 1250
1257 if (iomap.iomap_bn != IOMAP_DADDR_NULL) { 1251 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
@@ -1271,12 +1265,16 @@ __xfs_get_block(
1271 } 1265 }
1272 } 1266 }
1273 1267
1274 /* If this is a realtime file, data might be on a new device */ 1268 /*
1269 * If this is a realtime file, data may be on a different device.
1270 * to that pointed to from the buffer_head b_bdev currently.
1271 */
1275 bh_result->b_bdev = iomap.iomap_target->bt_bdev; 1272 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1276 1273
1277 /* If we previously allocated a block out beyond eof and 1274 /*
1278 * we are now coming back to use it then we will need to 1275 * If we previously allocated a block out beyond eof and we are
1279 * flag it as new even if it has a disk address. 1276 * now coming back to use it then we will need to flag it as new
1277 * even if it has a disk address.
1280 */ 1278 */
1281 if (create && 1279 if (create &&
1282 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || 1280 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
@@ -1292,26 +1290,24 @@ __xfs_get_block(
1292 } 1290 }
1293 } 1291 }
1294 1292
1295 if (blocks) { 1293 if (direct || size > (1 << inode->i_blkbits)) {
1296 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); 1294 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1297 offset = min_t(xfs_off_t, 1295 offset = min_t(xfs_off_t,
1298 iomap.iomap_bsize - iomap.iomap_delta, 1296 iomap.iomap_bsize - iomap.iomap_delta, size);
1299 (xfs_off_t)blocks << inode->i_blkbits); 1297 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1300 bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
1301 } 1298 }
1302 1299
1303 return 0; 1300 return 0;
1304} 1301}
1305 1302
1306int 1303int
1307xfs_get_block( 1304xfs_get_blocks(
1308 struct inode *inode, 1305 struct inode *inode,
1309 sector_t iblock, 1306 sector_t iblock,
1310 struct buffer_head *bh_result, 1307 struct buffer_head *bh_result,
1311 int create) 1308 int create)
1312{ 1309{
1313 return __xfs_get_block(inode, iblock, 1310 return __xfs_get_blocks(inode, iblock,
1314 bh_result->b_size >> inode->i_blkbits,
1315 bh_result, create, 0, BMAPI_WRITE); 1311 bh_result, create, 0, BMAPI_WRITE);
1316} 1312}
1317 1313
@@ -1322,8 +1318,7 @@ xfs_get_blocks_direct(
1322 struct buffer_head *bh_result, 1318 struct buffer_head *bh_result,
1323 int create) 1319 int create)
1324{ 1320{
1325 return __xfs_get_block(inode, iblock, 1321 return __xfs_get_blocks(inode, iblock,
1326 bh_result->b_size >> inode->i_blkbits,
1327 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1322 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1328} 1323}
1329 1324
@@ -1339,9 +1334,9 @@ xfs_end_io_direct(
1339 /* 1334 /*
1340 * Non-NULL private data means we need to issue a transaction to 1335 * Non-NULL private data means we need to issue a transaction to
1341 * convert a range from unwritten to written extents. This needs 1336 * convert a range from unwritten to written extents. This needs
1342 * to happen from process contect but aio+dio I/O completion 1337 * to happen from process context but aio+dio I/O completion
1343 * happens from irq context so we need to defer it to a workqueue. 1338 * happens from irq context so we need to defer it to a workqueue.
1344 * This is not nessecary for synchronous direct I/O, but we do 1339 * This is not necessary for synchronous direct I/O, but we do
1345 * it anyway to keep the code uniform and simpler. 1340 * it anyway to keep the code uniform and simpler.
1346 * 1341 *
1347 * The core direct I/O code might be changed to always call the 1342 * The core direct I/O code might be changed to always call the
@@ -1358,7 +1353,7 @@ xfs_end_io_direct(
1358 } 1353 }
1359 1354
1360 /* 1355 /*
1361 * blockdev_direct_IO can return an error even afer the I/O 1356 * blockdev_direct_IO can return an error even after the I/O
1362 * completion handler was called. Thus we need to protect 1357 * completion handler was called. Thus we need to protect
1363 * against double-freeing. 1358 * against double-freeing.
1364 */ 1359 */
@@ -1405,7 +1400,7 @@ xfs_vm_prepare_write(
1405 unsigned int from, 1400 unsigned int from,
1406 unsigned int to) 1401 unsigned int to)
1407{ 1402{
1408 return block_prepare_write(page, from, to, xfs_get_block); 1403 return block_prepare_write(page, from, to, xfs_get_blocks);
1409} 1404}
1410 1405
1411STATIC sector_t 1406STATIC sector_t
@@ -1422,7 +1417,7 @@ xfs_vm_bmap(
1422 VOP_RWLOCK(vp, VRWLOCK_READ); 1417 VOP_RWLOCK(vp, VRWLOCK_READ);
1423 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); 1418 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1424 VOP_RWUNLOCK(vp, VRWLOCK_READ); 1419 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1425 return generic_block_bmap(mapping, block, xfs_get_block); 1420 return generic_block_bmap(mapping, block, xfs_get_blocks);
1426} 1421}
1427 1422
1428STATIC int 1423STATIC int
@@ -1430,7 +1425,7 @@ xfs_vm_readpage(
1430 struct file *unused, 1425 struct file *unused,
1431 struct page *page) 1426 struct page *page)
1432{ 1427{
1433 return mpage_readpage(page, xfs_get_block); 1428 return mpage_readpage(page, xfs_get_blocks);
1434} 1429}
1435 1430
1436STATIC int 1431STATIC int
@@ -1440,7 +1435,7 @@ xfs_vm_readpages(
1440 struct list_head *pages, 1435 struct list_head *pages,
1441 unsigned nr_pages) 1436 unsigned nr_pages)
1442{ 1437{
1443 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); 1438 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1444} 1439}
1445 1440
1446STATIC void 1441STATIC void