summaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-05-11 05:58:51 -0400
committerVishal Verma <vishal.l.verma@intel.com>2016-05-17 02:44:09 -0400
commit069c77bc9eaee70fa9ecbd942372b1693b8cdeb0 (patch)
treebfaebbeaf789904038ea2b24bf61f0f02cd411b1 /fs/dax.c
parent2b10945c532c165a824f541df76a15ed0be04d78 (diff)
dax: Remove zeroing from dax_io()
All the filesystems are now zeroing blocks themselves for DAX IO to avoid races between dax_io() and dax_fault(). Remove the zeroing code from dax_io() and add warning to catch the case when somebody unexpectedly returns new or unwritten buffer. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/fs/dax.c b/fs/dax.c
index ccb8bc399d78..7c0036dd1570 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -119,18 +119,6 @@ int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
119} 119}
120EXPORT_SYMBOL_GPL(dax_clear_sectors); 120EXPORT_SYMBOL_GPL(dax_clear_sectors);
121 121
122/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
123static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
124 loff_t pos, loff_t end)
125{
126 loff_t final = end - pos + first; /* The final byte of the buffer */
127
128 if (first > 0)
129 clear_pmem(addr, first);
130 if (final < size)
131 clear_pmem(addr + final, size - final);
132}
133
134static bool buffer_written(struct buffer_head *bh) 122static bool buffer_written(struct buffer_head *bh)
135{ 123{
136 return buffer_mapped(bh) && !buffer_unwritten(bh); 124 return buffer_mapped(bh) && !buffer_unwritten(bh);
@@ -169,6 +157,9 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
169 struct blk_dax_ctl dax = { 157 struct blk_dax_ctl dax = {
170 .addr = (void __pmem *) ERR_PTR(-EIO), 158 .addr = (void __pmem *) ERR_PTR(-EIO),
171 }; 159 };
160 unsigned blkbits = inode->i_blkbits;
161 sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
162 >> blkbits;
172 163
173 if (rw == READ) 164 if (rw == READ)
174 end = min(end, i_size_read(inode)); 165 end = min(end, i_size_read(inode));
@@ -176,7 +167,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
176 while (pos < end) { 167 while (pos < end) {
177 size_t len; 168 size_t len;
178 if (pos == max) { 169 if (pos == max) {
179 unsigned blkbits = inode->i_blkbits;
180 long page = pos >> PAGE_SHIFT; 170 long page = pos >> PAGE_SHIFT;
181 sector_t block = page << (PAGE_SHIFT - blkbits); 171 sector_t block = page << (PAGE_SHIFT - blkbits);
182 unsigned first = pos - (block << blkbits); 172 unsigned first = pos - (block << blkbits);
@@ -192,6 +182,13 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
192 bh->b_size = 1 << blkbits; 182 bh->b_size = 1 << blkbits;
193 bh_max = pos - first + bh->b_size; 183 bh_max = pos - first + bh->b_size;
194 bdev = bh->b_bdev; 184 bdev = bh->b_bdev;
185 /*
186 * We allow uninitialized buffers for writes
187 * beyond EOF as those cannot race with faults
188 */
189 WARN_ON_ONCE(
190 (buffer_new(bh) && block < file_blks) ||
191 (rw == WRITE && buffer_unwritten(bh)));
195 } else { 192 } else {
196 unsigned done = bh->b_size - 193 unsigned done = bh->b_size -
197 (bh_max - (pos - first)); 194 (bh_max - (pos - first));
@@ -211,11 +208,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
211 rc = map_len; 208 rc = map_len;
212 break; 209 break;
213 } 210 }
214 if (buffer_unwritten(bh) || buffer_new(bh)) {
215 dax_new_buf(dax.addr, map_len, first,
216 pos, end);
217 need_wmb = true;
218 }
219 dax.addr += first; 211 dax.addr += first;
220 size = map_len - first; 212 size = map_len - first;
221 } 213 }