aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c83
1 files changed, 55 insertions, 28 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 5287be18633b..b9bb7ad6897b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -26,6 +26,7 @@
26#include <linux/percpu.h> 26#include <linux/percpu.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
29#include <linux/capability.h>
29#include <linux/blkdev.h> 30#include <linux/blkdev.h>
30#include <linux/file.h> 31#include <linux/file.h>
31#include <linux/quotaops.h> 32#include <linux/quotaops.h>
@@ -153,14 +154,8 @@ int sync_blockdev(struct block_device *bdev)
153{ 154{
154 int ret = 0; 155 int ret = 0;
155 156
156 if (bdev) { 157 if (bdev)
157 int err; 158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
158
159 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
160 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
161 if (!ret)
162 ret = err;
163 }
164 return ret; 159 return ret;
165} 160}
166EXPORT_SYMBOL(sync_blockdev); 161EXPORT_SYMBOL(sync_blockdev);
@@ -358,11 +353,11 @@ static long do_fsync(unsigned int fd, int datasync)
358 * We need to protect against concurrent writers, 353 * We need to protect against concurrent writers,
359 * which could cause livelocks in fsync_buffers_list 354 * which could cause livelocks in fsync_buffers_list
360 */ 355 */
361 down(&mapping->host->i_sem); 356 mutex_lock(&mapping->host->i_mutex);
362 err = file->f_op->fsync(file, file->f_dentry, datasync); 357 err = file->f_op->fsync(file, file->f_dentry, datasync);
363 if (!ret) 358 if (!ret)
364 ret = err; 359 ret = err;
365 up(&mapping->host->i_sem); 360 mutex_unlock(&mapping->host->i_mutex);
366 err = filemap_fdatawait(mapping); 361 err = filemap_fdatawait(mapping);
367 if (!ret) 362 if (!ret)
368 ret = err; 363 ret = err;
@@ -1768,7 +1763,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1768 * handle that here by just cleaning them. 1763 * handle that here by just cleaning them.
1769 */ 1764 */
1770 1765
1771 block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1766 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1772 head = page_buffers(page); 1767 head = page_buffers(page);
1773 bh = head; 1768 bh = head;
1774 1769
@@ -2160,11 +2155,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2160 * truncates. Uses prepare/commit_write to allow the filesystem to 2155 * truncates. Uses prepare/commit_write to allow the filesystem to
2161 * deal with the hole. 2156 * deal with the hole.
2162 */ 2157 */
2163int generic_cont_expand(struct inode *inode, loff_t size) 2158static int __generic_cont_expand(struct inode *inode, loff_t size,
2159 pgoff_t index, unsigned int offset)
2164{ 2160{
2165 struct address_space *mapping = inode->i_mapping; 2161 struct address_space *mapping = inode->i_mapping;
2166 struct page *page; 2162 struct page *page;
2167 unsigned long index, offset, limit; 2163 unsigned long limit;
2168 int err; 2164 int err;
2169 2165
2170 err = -EFBIG; 2166 err = -EFBIG;
@@ -2176,24 +2172,24 @@ int generic_cont_expand(struct inode *inode, loff_t size)
2176 if (size > inode->i_sb->s_maxbytes) 2172 if (size > inode->i_sb->s_maxbytes)
2177 goto out; 2173 goto out;
2178 2174
2179 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2180
2181 /* ugh. in prepare/commit_write, if from==to==start of block, we
2182 ** skip the prepare. make sure we never send an offset for the start
2183 ** of a block
2184 */
2185 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2186 offset++;
2187 }
2188 index = size >> PAGE_CACHE_SHIFT;
2189 err = -ENOMEM; 2175 err = -ENOMEM;
2190 page = grab_cache_page(mapping, index); 2176 page = grab_cache_page(mapping, index);
2191 if (!page) 2177 if (!page)
2192 goto out; 2178 goto out;
2193 err = mapping->a_ops->prepare_write(NULL, page, offset, offset); 2179 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2194 if (!err) { 2180 if (err) {
2195 err = mapping->a_ops->commit_write(NULL, page, offset, offset); 2181 /*
2182 * ->prepare_write() may have instantiated a few blocks
2183 * outside i_size. Trim these off again.
2184 */
2185 unlock_page(page);
2186 page_cache_release(page);
2187 vmtruncate(inode, inode->i_size);
2188 goto out;
2196 } 2189 }
2190
2191 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2192
2197 unlock_page(page); 2193 unlock_page(page);
2198 page_cache_release(page); 2194 page_cache_release(page);
2199 if (err > 0) 2195 if (err > 0)
@@ -2202,6 +2198,36 @@ out:
2202 return err; 2198 return err;
2203} 2199}
2204 2200
2201int generic_cont_expand(struct inode *inode, loff_t size)
2202{
2203 pgoff_t index;
2204 unsigned int offset;
2205
2206 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2207
2208 /* ugh. in prepare/commit_write, if from==to==start of block, we
2209 ** skip the prepare. make sure we never send an offset for the start
2210 ** of a block
2211 */
2212 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2213 /* caller must handle this extra byte. */
2214 offset++;
2215 }
2216 index = size >> PAGE_CACHE_SHIFT;
2217
2218 return __generic_cont_expand(inode, size, index, offset);
2219}
2220
2221int generic_cont_expand_simple(struct inode *inode, loff_t size)
2222{
2223 loff_t pos = size - 1;
2224 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2225 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2226
2227 /* prepare/commit_write can handle even if from==to==start of block. */
2228 return __generic_cont_expand(inode, size, index, offset);
2229}
2230
2205/* 2231/*
2206 * For moronic filesystems that do not allow holes in file. 2232 * For moronic filesystems that do not allow holes in file.
2207 * We may have to extend the file. 2233 * We may have to extend the file.
@@ -2313,7 +2339,7 @@ int generic_commit_write(struct file *file, struct page *page,
2313 __block_commit_write(inode,page,from,to); 2339 __block_commit_write(inode,page,from,to);
2314 /* 2340 /*
2315 * No need to use i_size_read() here, the i_size 2341 * No need to use i_size_read() here, the i_size
2316 * cannot change under us because we hold i_sem. 2342 * cannot change under us because we hold i_mutex.
2317 */ 2343 */
2318 if (pos > inode->i_size) { 2344 if (pos > inode->i_size) {
2319 i_size_write(inode, pos); 2345 i_size_write(inode, pos);
@@ -2610,7 +2636,7 @@ int block_truncate_page(struct address_space *mapping,
2610 pgoff_t index = from >> PAGE_CACHE_SHIFT; 2636 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2611 unsigned offset = from & (PAGE_CACHE_SIZE-1); 2637 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2612 unsigned blocksize; 2638 unsigned blocksize;
2613 pgoff_t iblock; 2639 sector_t iblock;
2614 unsigned length, pos; 2640 unsigned length, pos;
2615 struct inode *inode = mapping->host; 2641 struct inode *inode = mapping->host;
2616 struct page *page; 2642 struct page *page;
@@ -2626,7 +2652,7 @@ int block_truncate_page(struct address_space *mapping,
2626 return 0; 2652 return 0;
2627 2653
2628 length = blocksize - length; 2654 length = blocksize - length;
2629 iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2655 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2630 2656
2631 page = grab_cache_page(mapping, index); 2657 page = grab_cache_page(mapping, index);
2632 err = -ENOMEM; 2658 err = -ENOMEM;
@@ -3145,6 +3171,7 @@ EXPORT_SYMBOL(fsync_bdev);
3145EXPORT_SYMBOL(generic_block_bmap); 3171EXPORT_SYMBOL(generic_block_bmap);
3146EXPORT_SYMBOL(generic_commit_write); 3172EXPORT_SYMBOL(generic_commit_write);
3147EXPORT_SYMBOL(generic_cont_expand); 3173EXPORT_SYMBOL(generic_cont_expand);
3174EXPORT_SYMBOL(generic_cont_expand_simple);
3148EXPORT_SYMBOL(init_buffer); 3175EXPORT_SYMBOL(init_buffer);
3149EXPORT_SYMBOL(invalidate_bdev); 3176EXPORT_SYMBOL(invalidate_bdev);
3150EXPORT_SYMBOL(ll_rw_block); 3177EXPORT_SYMBOL(ll_rw_block);