aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c86
1 files changed, 38 insertions, 48 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 9614adc7e754..20805db2c987 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -128,21 +128,15 @@ __clear_page_buffers(struct page *page)
128 page_cache_release(page); 128 page_cache_release(page);
129} 129}
130 130
131 131static void buffer_io_error(struct buffer_head *bh, char *msg)
132static int quiet_error(struct buffer_head *bh)
133{
134 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
135 return 0;
136 return 1;
137}
138
139
140static void buffer_io_error(struct buffer_head *bh)
141{ 132{
142 char b[BDEVNAME_SIZE]; 133 char b[BDEVNAME_SIZE];
143 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 134
135 if (!test_bit(BH_Quiet, &bh->b_state))
136 printk_ratelimited(KERN_ERR
137 "Buffer I/O error on dev %s, logical block %llu%s\n",
144 bdevname(bh->b_bdev, b), 138 bdevname(bh->b_bdev, b),
145 (unsigned long long)bh->b_blocknr); 139 (unsigned long long)bh->b_blocknr, msg);
146} 140}
147 141
148/* 142/*
@@ -177,17 +171,10 @@ EXPORT_SYMBOL(end_buffer_read_sync);
177 171
178void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 172void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
179{ 173{
180 char b[BDEVNAME_SIZE];
181
182 if (uptodate) { 174 if (uptodate) {
183 set_buffer_uptodate(bh); 175 set_buffer_uptodate(bh);
184 } else { 176 } else {
185 if (!quiet_error(bh)) { 177 buffer_io_error(bh, ", lost sync page write");
186 buffer_io_error(bh);
187 printk(KERN_WARNING "lost page write due to "
188 "I/O error on %s\n",
189 bdevname(bh->b_bdev, b));
190 }
191 set_buffer_write_io_error(bh); 178 set_buffer_write_io_error(bh);
192 clear_buffer_uptodate(bh); 179 clear_buffer_uptodate(bh);
193 } 180 }
@@ -304,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
304 set_buffer_uptodate(bh); 291 set_buffer_uptodate(bh);
305 } else { 292 } else {
306 clear_buffer_uptodate(bh); 293 clear_buffer_uptodate(bh);
307 if (!quiet_error(bh)) 294 buffer_io_error(bh, ", async page read");
308 buffer_io_error(bh);
309 SetPageError(page); 295 SetPageError(page);
310 } 296 }
311 297
@@ -353,7 +339,6 @@ still_busy:
353 */ 339 */
354void end_buffer_async_write(struct buffer_head *bh, int uptodate) 340void end_buffer_async_write(struct buffer_head *bh, int uptodate)
355{ 341{
356 char b[BDEVNAME_SIZE];
357 unsigned long flags; 342 unsigned long flags;
358 struct buffer_head *first; 343 struct buffer_head *first;
359 struct buffer_head *tmp; 344 struct buffer_head *tmp;
@@ -365,12 +350,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
365 if (uptodate) { 350 if (uptodate) {
366 set_buffer_uptodate(bh); 351 set_buffer_uptodate(bh);
367 } else { 352 } else {
368 if (!quiet_error(bh)) { 353 buffer_io_error(bh, ", lost async page write");
369 buffer_io_error(bh);
370 printk(KERN_WARNING "lost page write due to "
371 "I/O error on %s\n",
372 bdevname(bh->b_bdev, b));
373 }
374 set_bit(AS_EIO, &page->mapping->flags); 354 set_bit(AS_EIO, &page->mapping->flags);
375 set_buffer_write_io_error(bh); 355 set_buffer_write_io_error(bh);
376 clear_buffer_uptodate(bh); 356 clear_buffer_uptodate(bh);
@@ -993,7 +973,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
993 */ 973 */
994static int 974static int
995grow_dev_page(struct block_device *bdev, sector_t block, 975grow_dev_page(struct block_device *bdev, sector_t block,
996 pgoff_t index, int size, int sizebits) 976 pgoff_t index, int size, int sizebits, gfp_t gfp)
997{ 977{
998 struct inode *inode = bdev->bd_inode; 978 struct inode *inode = bdev->bd_inode;
999 struct page *page; 979 struct page *page;
@@ -1002,8 +982,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
1002 int ret = 0; /* Will call free_more_memory() */ 982 int ret = 0; /* Will call free_more_memory() */
1003 gfp_t gfp_mask; 983 gfp_t gfp_mask;
1004 984
1005 gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS; 985 gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp;
1006 gfp_mask |= __GFP_MOVABLE; 986
1007 /* 987 /*
1008 * XXX: __getblk_slow() can not really deal with failure and 988 * XXX: __getblk_slow() can not really deal with failure and
1009 * will endlessly loop on improvised global reclaim. Prefer 989 * will endlessly loop on improvised global reclaim. Prefer
@@ -1060,7 +1040,7 @@ failed:
1060 * that page was dirty, the buffers are set dirty also. 1040 * that page was dirty, the buffers are set dirty also.
1061 */ 1041 */
1062static int 1042static int
1063grow_buffers(struct block_device *bdev, sector_t block, int size) 1043grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1064{ 1044{
1065 pgoff_t index; 1045 pgoff_t index;
1066 int sizebits; 1046 int sizebits;
@@ -1087,11 +1067,12 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1087 } 1067 }
1088 1068
1089 /* Create a page with the proper size buffers.. */ 1069 /* Create a page with the proper size buffers.. */
1090 return grow_dev_page(bdev, block, index, size, sizebits); 1070 return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1091} 1071}
1092 1072
1093static struct buffer_head * 1073struct buffer_head *
1094__getblk_slow(struct block_device *bdev, sector_t block, int size) 1074__getblk_slow(struct block_device *bdev, sector_t block,
1075 unsigned size, gfp_t gfp)
1095{ 1076{
1096 /* Size must be multiple of hard sectorsize */ 1077 /* Size must be multiple of hard sectorsize */
1097 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1078 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
@@ -1113,13 +1094,14 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1113 if (bh) 1094 if (bh)
1114 return bh; 1095 return bh;
1115 1096
1116 ret = grow_buffers(bdev, block, size); 1097 ret = grow_buffers(bdev, block, size, gfp);
1117 if (ret < 0) 1098 if (ret < 0)
1118 return NULL; 1099 return NULL;
1119 if (ret == 0) 1100 if (ret == 0)
1120 free_more_memory(); 1101 free_more_memory();
1121 } 1102 }
1122} 1103}
1104EXPORT_SYMBOL(__getblk_slow);
1123 1105
1124/* 1106/*
1125 * The relationship between dirty buffers and dirty pages: 1107 * The relationship between dirty buffers and dirty pages:
@@ -1373,24 +1355,25 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1373EXPORT_SYMBOL(__find_get_block); 1355EXPORT_SYMBOL(__find_get_block);
1374 1356
1375/* 1357/*
1376 * __getblk will locate (and, if necessary, create) the buffer_head 1358 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1377 * which corresponds to the passed block_device, block and size. The 1359 * which corresponds to the passed block_device, block and size. The
1378 * returned buffer has its reference count incremented. 1360 * returned buffer has its reference count incremented.
1379 * 1361 *
1380 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1362 * __getblk_gfp() will lock up the machine if grow_dev_page's
1381 * attempt is failing. FIXME, perhaps? 1363 * try_to_free_buffers() attempt is failing. FIXME, perhaps?
1382 */ 1364 */
1383struct buffer_head * 1365struct buffer_head *
1384__getblk(struct block_device *bdev, sector_t block, unsigned size) 1366__getblk_gfp(struct block_device *bdev, sector_t block,
1367 unsigned size, gfp_t gfp)
1385{ 1368{
1386 struct buffer_head *bh = __find_get_block(bdev, block, size); 1369 struct buffer_head *bh = __find_get_block(bdev, block, size);
1387 1370
1388 might_sleep(); 1371 might_sleep();
1389 if (bh == NULL) 1372 if (bh == NULL)
1390 bh = __getblk_slow(bdev, block, size); 1373 bh = __getblk_slow(bdev, block, size, gfp);
1391 return bh; 1374 return bh;
1392} 1375}
1393EXPORT_SYMBOL(__getblk); 1376EXPORT_SYMBOL(__getblk_gfp);
1394 1377
1395/* 1378/*
1396 * Do async read-ahead on a buffer.. 1379 * Do async read-ahead on a buffer..
@@ -1406,24 +1389,28 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1406EXPORT_SYMBOL(__breadahead); 1389EXPORT_SYMBOL(__breadahead);
1407 1390
1408/** 1391/**
1409 * __bread() - reads a specified block and returns the bh 1392 * __bread_gfp() - reads a specified block and returns the bh
1410 * @bdev: the block_device to read from 1393 * @bdev: the block_device to read from
1411 * @block: number of block 1394 * @block: number of block
1412 * @size: size (in bytes) to read 1395 * @size: size (in bytes) to read
1413 * 1396 * @gfp: page allocation flag
1397 *
1414 * Reads a specified block, and returns buffer head that contains it. 1398 * Reads a specified block, and returns buffer head that contains it.
1399 * The page cache can be allocated from non-movable area
1400 * not to prevent page migration if you set gfp to zero.
1415 * It returns NULL if the block was unreadable. 1401 * It returns NULL if the block was unreadable.
1416 */ 1402 */
1417struct buffer_head * 1403struct buffer_head *
1418__bread(struct block_device *bdev, sector_t block, unsigned size) 1404__bread_gfp(struct block_device *bdev, sector_t block,
1405 unsigned size, gfp_t gfp)
1419{ 1406{
1420 struct buffer_head *bh = __getblk(bdev, block, size); 1407 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1421 1408
1422 if (likely(bh) && !buffer_uptodate(bh)) 1409 if (likely(bh) && !buffer_uptodate(bh))
1423 bh = __bread_slow(bh); 1410 bh = __bread_slow(bh);
1424 return bh; 1411 return bh;
1425} 1412}
1426EXPORT_SYMBOL(__bread); 1413EXPORT_SYMBOL(__bread_gfp);
1427 1414
1428/* 1415/*
1429 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1416 * invalidate_bh_lrus() is called rarely - but not only at unmount.
@@ -2082,6 +2069,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2082 struct page *page, void *fsdata) 2069 struct page *page, void *fsdata)
2083{ 2070{
2084 struct inode *inode = mapping->host; 2071 struct inode *inode = mapping->host;
2072 loff_t old_size = inode->i_size;
2085 int i_size_changed = 0; 2073 int i_size_changed = 0;
2086 2074
2087 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2075 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
@@ -2101,6 +2089,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2101 unlock_page(page); 2089 unlock_page(page);
2102 page_cache_release(page); 2090 page_cache_release(page);
2103 2091
2092 if (old_size < pos)
2093 pagecache_isize_extended(inode, old_size, pos);
2104 /* 2094 /*
2105 * Don't mark the inode dirty under page lock. First, it unnecessarily 2095 * Don't mark the inode dirty under page lock. First, it unnecessarily
2106 * makes the holding time of page lock longer. Second, it forces lock 2096 * makes the holding time of page lock longer. Second, it forces lock