aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c94
1 files changed, 40 insertions, 54 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index c7062c896d7c..58e2e7b77372 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
914/* 914/*
915 * Initialise the state of a blockdev page's buffers. 915 * Initialise the state of a blockdev page's buffers.
916 */ 916 */
917static void 917static sector_t
918init_page_buffers(struct page *page, struct block_device *bdev, 918init_page_buffers(struct page *page, struct block_device *bdev,
919 sector_t block, int size) 919 sector_t block, int size)
920{ 920{
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
936 block++; 936 block++;
937 bh = bh->b_this_page; 937 bh = bh->b_this_page;
938 } while (bh != head); 938 } while (bh != head);
939
940 /*
941 * Caller needs to validate requested block against end of device.
942 */
943 return end_block;
939} 944}
940 945
941/* 946/*
942 * Create the page-cache page that contains the requested block. 947 * Create the page-cache page that contains the requested block.
943 * 948 *
944 * This is user purely for blockdev mappings. 949 * This is used purely for blockdev mappings.
945 */ 950 */
946static struct page * 951static int
947grow_dev_page(struct block_device *bdev, sector_t block, 952grow_dev_page(struct block_device *bdev, sector_t block,
948 pgoff_t index, int size) 953 pgoff_t index, int size, int sizebits)
949{ 954{
950 struct inode *inode = bdev->bd_inode; 955 struct inode *inode = bdev->bd_inode;
951 struct page *page; 956 struct page *page;
952 struct buffer_head *bh; 957 struct buffer_head *bh;
958 sector_t end_block;
959 int ret = 0; /* Will call free_more_memory() */
953 960
954 page = find_or_create_page(inode->i_mapping, index, 961 page = find_or_create_page(inode->i_mapping, index,
955 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); 962 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
956 if (!page) 963 if (!page)
957 return NULL; 964 return ret;
958 965
959 BUG_ON(!PageLocked(page)); 966 BUG_ON(!PageLocked(page));
960 967
961 if (page_has_buffers(page)) { 968 if (page_has_buffers(page)) {
962 bh = page_buffers(page); 969 bh = page_buffers(page);
963 if (bh->b_size == size) { 970 if (bh->b_size == size) {
964 init_page_buffers(page, bdev, block, size); 971 end_block = init_page_buffers(page, bdev,
965 return page; 972 index << sizebits, size);
973 goto done;
966 } 974 }
967 if (!try_to_free_buffers(page)) 975 if (!try_to_free_buffers(page))
968 goto failed; 976 goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
982 */ 990 */
983 spin_lock(&inode->i_mapping->private_lock); 991 spin_lock(&inode->i_mapping->private_lock);
984 link_dev_buffers(page, bh); 992 link_dev_buffers(page, bh);
985 init_page_buffers(page, bdev, block, size); 993 end_block = init_page_buffers(page, bdev, index << sizebits, size);
986 spin_unlock(&inode->i_mapping->private_lock); 994 spin_unlock(&inode->i_mapping->private_lock);
987 return page; 995done:
988 996 ret = (block < end_block) ? 1 : -ENXIO;
989failed: 997failed:
990 unlock_page(page); 998 unlock_page(page);
991 page_cache_release(page); 999 page_cache_release(page);
992 return NULL; 1000 return ret;
993} 1001}
994 1002
995/* 1003/*
@@ -999,7 +1007,6 @@ failed:
999static int 1007static int
1000grow_buffers(struct block_device *bdev, sector_t block, int size) 1008grow_buffers(struct block_device *bdev, sector_t block, int size)
1001{ 1009{
1002 struct page *page;
1003 pgoff_t index; 1010 pgoff_t index;
1004 int sizebits; 1011 int sizebits;
1005 1012
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1023 bdevname(bdev, b)); 1030 bdevname(bdev, b));
1024 return -EIO; 1031 return -EIO;
1025 } 1032 }
1026 block = index << sizebits; 1033
1027 /* Create a page with the proper size buffers.. */ 1034 /* Create a page with the proper size buffers.. */
1028 page = grow_dev_page(bdev, block, index, size); 1035 return grow_dev_page(bdev, block, index, size, sizebits);
1029 if (!page)
1030 return 0;
1031 unlock_page(page);
1032 page_cache_release(page);
1033 return 1;
1034} 1036}
1035 1037
1036static struct buffer_head * 1038static struct buffer_head *
1037__getblk_slow(struct block_device *bdev, sector_t block, int size) 1039__getblk_slow(struct block_device *bdev, sector_t block, int size)
1038{ 1040{
1039 int ret;
1040 struct buffer_head *bh;
1041
1042 /* Size must be multiple of hard sectorsize */ 1041 /* Size must be multiple of hard sectorsize */
1043 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1042 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1044 (size < 512 || size > PAGE_SIZE))) { 1043 (size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1051 return NULL; 1050 return NULL;
1052 } 1051 }
1053 1052
1054retry: 1053 for (;;) {
1055 bh = __find_get_block(bdev, block, size); 1054 struct buffer_head *bh;
1056 if (bh) 1055 int ret;
1057 return bh;
1058 1056
1059 ret = grow_buffers(bdev, block, size);
1060 if (ret == 0) {
1061 free_more_memory();
1062 goto retry;
1063 } else if (ret > 0) {
1064 bh = __find_get_block(bdev, block, size); 1057 bh = __find_get_block(bdev, block, size);
1065 if (bh) 1058 if (bh)
1066 return bh; 1059 return bh;
1060
1061 ret = grow_buffers(bdev, block, size);
1062 if (ret < 0)
1063 return NULL;
1064 if (ret == 0)
1065 free_more_memory();
1067 } 1066 }
1068 return NULL;
1069} 1067}
1070 1068
1071/* 1069/*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
1321 * which corresponds to the passed block_device, block and size. The 1319 * which corresponds to the passed block_device, block and size. The
1322 * returned buffer has its reference count incremented. 1320 * returned buffer has its reference count incremented.
1323 * 1321 *
1324 * __getblk() cannot fail - it just keeps trying. If you pass it an
1325 * illegal block number, __getblk() will happily return a buffer_head
1326 * which represents the non-existent block. Very weird.
1327 *
1328 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1322 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1329 * attempt is failing. FIXME, perhaps? 1323 * attempt is failing. FIXME, perhaps?
1330 */ 1324 */
@@ -2306,8 +2300,8 @@ EXPORT_SYMBOL(block_commit_write);
2306 * beyond EOF, then the page is guaranteed safe against truncation until we 2300 * beyond EOF, then the page is guaranteed safe against truncation until we
2307 * unlock the page. 2301 * unlock the page.
2308 * 2302 *
2309 * Direct callers of this function should call vfs_check_frozen() so that page 2303 * Direct callers of this function should protect against filesystem freezing
2310 * fault does not busyloop until the fs is thawed. 2304 * using sb_start_write() - sb_end_write() functions.
2311 */ 2305 */
2312int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2306int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2313 get_block_t get_block) 2307 get_block_t get_block)
@@ -2318,6 +2312,12 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2318 loff_t size; 2312 loff_t size;
2319 int ret; 2313 int ret;
2320 2314
2315 /*
2316 * Update file times before taking page lock. We may end up failing the
2317 * fault so this update may be superfluous but who really cares...
2318 */
2319 file_update_time(vma->vm_file);
2320
2321 lock_page(page); 2321 lock_page(page);
2322 size = i_size_read(inode); 2322 size = i_size_read(inode);
2323 if ((page->mapping != inode->i_mapping) || 2323 if ((page->mapping != inode->i_mapping) ||
@@ -2339,18 +2339,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2339 2339
2340 if (unlikely(ret < 0)) 2340 if (unlikely(ret < 0))
2341 goto out_unlock; 2341 goto out_unlock;
2342 /*
2343 * Freezing in progress? We check after the page is marked dirty and
2344 * with page lock held so if the test here fails, we are sure freezing
2345 * code will wait during syncing until the page fault is done - at that
2346 * point page will be dirty and unlocked so freezing code will write it
2347 * and writeprotect it again.
2348 */
2349 set_page_dirty(page); 2342 set_page_dirty(page);
2350 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2351 ret = -EAGAIN;
2352 goto out_unlock;
2353 }
2354 wait_on_page_writeback(page); 2343 wait_on_page_writeback(page);
2355 return 0; 2344 return 0;
2356out_unlock: 2345out_unlock:
@@ -2365,12 +2354,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2365 int ret; 2354 int ret;
2366 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; 2355 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2367 2356
2368 /* 2357 sb_start_pagefault(sb);
2369 * This check is racy but catches the common case. The check in
2370 * __block_page_mkwrite() is reliable.
2371 */
2372 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2373 ret = __block_page_mkwrite(vma, vmf, get_block); 2358 ret = __block_page_mkwrite(vma, vmf, get_block);
2359 sb_end_pagefault(sb);
2374 return block_page_mkwrite_return(ret); 2360 return block_page_mkwrite_return(ret);
2375} 2361}
2376EXPORT_SYMBOL(block_page_mkwrite); 2362EXPORT_SYMBOL(block_page_mkwrite);