aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-06-20 19:23:11 -0400
committerDave Chinner <david@fromorbit.com>2016-06-20 19:23:11 -0400
commitae259a9c8593f98aa60d045df978a5482a67c53f (patch)
treea3c07fa9fb8c61475ff85f4d8812d83c287258ff /fs/buffer.c
parent199a31c6d93ba9dc6f831fa1e77d9926f34f4e8a (diff)
fs: introduce iomap infrastructure
Add infrastructure for multipage buffered writes. This is implemented using an main iterator that applies an actor function to a range that can be written. This infrastucture is used to implement a buffered write helper, one to zero file ranges and one to implement the ->page_mkwrite VM operations. All of them borrow a fair amount of code from fs/buffers. for now by using an internal version of __block_write_begin that gets passed an iomap and builds the corresponding buffer head. The file system is gets a set of paired ->iomap_begin and ->iomap_end calls which allow it to map/reserve a range and get a notification once the write code is finished with it. Based on earlier code from Dave Chinner. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c76
1 files changed, 71 insertions, 5 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 754813a6962b..228288a7de38 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/fs.h> 23#include <linux/fs.h>
24#include <linux/iomap.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
25#include <linux/percpu.h> 26#include <linux/percpu.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -1891,8 +1892,62 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1891} 1892}
1892EXPORT_SYMBOL(page_zero_new_buffers); 1893EXPORT_SYMBOL(page_zero_new_buffers);
1893 1894
1894int __block_write_begin(struct page *page, loff_t pos, unsigned len, 1895static void
1895 get_block_t *get_block) 1896iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1897 struct iomap *iomap)
1898{
1899 loff_t offset = block << inode->i_blkbits;
1900
1901 bh->b_bdev = iomap->bdev;
1902
1903 /*
1904 * Block points to offset in file we need to map, iomap contains
1905 * the offset at which the map starts. If the map ends before the
1906 * current block, then do not map the buffer and let the caller
1907 * handle it.
1908 */
1909 BUG_ON(offset >= iomap->offset + iomap->length);
1910
1911 switch (iomap->type) {
1912 case IOMAP_HOLE:
1913 /*
1914 * If the buffer is not up to date or beyond the current EOF,
1915 * we need to mark it as new to ensure sub-block zeroing is
1916 * executed if necessary.
1917 */
1918 if (!buffer_uptodate(bh) ||
1919 (offset >= i_size_read(inode)))
1920 set_buffer_new(bh);
1921 break;
1922 case IOMAP_DELALLOC:
1923 if (!buffer_uptodate(bh) ||
1924 (offset >= i_size_read(inode)))
1925 set_buffer_new(bh);
1926 set_buffer_uptodate(bh);
1927 set_buffer_mapped(bh);
1928 set_buffer_delay(bh);
1929 break;
1930 case IOMAP_UNWRITTEN:
1931 /*
1932 * For unwritten regions, we always need to ensure that
1933 * sub-block writes cause the regions in the block we are not
1934 * writing to are zeroed. Set the buffer as new to ensure this.
1935 */
1936 set_buffer_new(bh);
1937 set_buffer_unwritten(bh);
1938 /* FALLTHRU */
1939 case IOMAP_MAPPED:
1940 if (offset >= i_size_read(inode))
1941 set_buffer_new(bh);
1942 bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) +
1943 ((offset - iomap->offset) >> inode->i_blkbits);
1944 set_buffer_mapped(bh);
1945 break;
1946 }
1947}
1948
1949int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
1950 get_block_t *get_block, struct iomap *iomap)
1896{ 1951{
1897 unsigned from = pos & (PAGE_SIZE - 1); 1952 unsigned from = pos & (PAGE_SIZE - 1);
1898 unsigned to = from + len; 1953 unsigned to = from + len;
@@ -1928,9 +1983,14 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1928 clear_buffer_new(bh); 1983 clear_buffer_new(bh);
1929 if (!buffer_mapped(bh)) { 1984 if (!buffer_mapped(bh)) {
1930 WARN_ON(bh->b_size != blocksize); 1985 WARN_ON(bh->b_size != blocksize);
1931 err = get_block(inode, block, bh, 1); 1986 if (get_block) {
1932 if (err) 1987 err = get_block(inode, block, bh, 1);
1933 break; 1988 if (err)
1989 break;
1990 } else {
1991 iomap_to_bh(inode, block, bh, iomap);
1992 }
1993
1934 if (buffer_new(bh)) { 1994 if (buffer_new(bh)) {
1935 unmap_underlying_metadata(bh->b_bdev, 1995 unmap_underlying_metadata(bh->b_bdev,
1936 bh->b_blocknr); 1996 bh->b_blocknr);
@@ -1971,6 +2031,12 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1971 page_zero_new_buffers(page, from, to); 2031 page_zero_new_buffers(page, from, to);
1972 return err; 2032 return err;
1973} 2033}
2034
2035int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2036 get_block_t *get_block)
2037{
2038 return __block_write_begin_int(page, pos, len, get_block, NULL);
2039}
1974EXPORT_SYMBOL(__block_write_begin); 2040EXPORT_SYMBOL(__block_write_begin);
1975 2041
1976static int __block_commit_write(struct inode *inode, struct page *page, 2042static int __block_commit_write(struct inode *inode, struct page *page,