aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2015-02-16 18:59:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 20:56:03 -0500
commitf7ca90b160307d63aaedab8bd451c24a182db20f (patch)
tree687eb94acbc8ebfab6d5e12a57dc336ce21b7c64 /fs/dax.c
parent289c6aedac981533331428bc933fff21ae332c9e (diff)
dax,ext2: replace the XIP page fault handler with the DAX page fault handler
Instead of calling aops->get_xip_mem from the fault handler, the filesystem passes a get_block_t that is used to find the appropriate blocks. This requires that all architectures implement copy_user_page(). At the time of writing, mips and arm do not. Patches exist and are in progress. [akpm@linux-foundation.org: remap_file_pages went away] Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Andreas Dilger <andreas.dilger@intel.com> Cc: Boaz Harrosh <boaz@plexistor.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c241
1 files changed, 241 insertions, 0 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 69c3126a05b4..553e55b93495 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -19,9 +19,13 @@
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/genhd.h> 21#include <linux/genhd.h>
22#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
22#include <linux/mutex.h> 25#include <linux/mutex.h>
23#include <linux/sched.h> 26#include <linux/sched.h>
24#include <linux/uio.h> 27#include <linux/uio.h>
28#include <linux/vmstat.h>
25 29
26int dax_clear_blocks(struct inode *inode, sector_t block, long size) 30int dax_clear_blocks(struct inode *inode, sector_t block, long size)
27{ 31{
@@ -221,3 +225,240 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
221 return retval; 225 return retval;
222} 226}
223EXPORT_SYMBOL_GPL(dax_do_io); 227EXPORT_SYMBOL_GPL(dax_do_io);
228
229/*
230 * The user has performed a load from a hole in the file. Allocating
231 * a new page in the file would cause excessive storage usage for
232 * workloads with sparse files. We allocate a page cache page instead.
233 * We'll kick it out of the page cache if it's ever written to,
234 * otherwise it will simply fall out of the page cache under memory
235 * pressure without ever having been dirtied.
236 */
237static int dax_load_hole(struct address_space *mapping, struct page *page,
238 struct vm_fault *vmf)
239{
240 unsigned long size;
241 struct inode *inode = mapping->host;
242 if (!page)
243 page = find_or_create_page(mapping, vmf->pgoff,
244 GFP_KERNEL | __GFP_ZERO);
245 if (!page)
246 return VM_FAULT_OOM;
247 /* Recheck i_size under page lock to avoid truncate race */
248 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
249 if (vmf->pgoff >= size) {
250 unlock_page(page);
251 page_cache_release(page);
252 return VM_FAULT_SIGBUS;
253 }
254
255 vmf->page = page;
256 return VM_FAULT_LOCKED;
257}
258
259static int copy_user_bh(struct page *to, struct buffer_head *bh,
260 unsigned blkbits, unsigned long vaddr)
261{
262 void *vfrom, *vto;
263 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
264 return -EIO;
265 vto = kmap_atomic(to);
266 copy_user_page(vto, vfrom, vaddr, to);
267 kunmap_atomic(vto);
268 return 0;
269}
270
271static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
272 struct vm_area_struct *vma, struct vm_fault *vmf)
273{
274 struct address_space *mapping = inode->i_mapping;
275 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
276 unsigned long vaddr = (unsigned long)vmf->virtual_address;
277 void *addr;
278 unsigned long pfn;
279 pgoff_t size;
280 int error;
281
282 i_mmap_lock_read(mapping);
283
284 /*
285 * Check truncate didn't happen while we were allocating a block.
286 * If it did, this block may or may not be still allocated to the
287 * file. We can't tell the filesystem to free it because we can't
288 * take i_mutex here. In the worst case, the file still has blocks
289 * allocated past the end of the file.
290 */
291 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
292 if (unlikely(vmf->pgoff >= size)) {
293 error = -EIO;
294 goto out;
295 }
296
297 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
298 if (error < 0)
299 goto out;
300 if (error < PAGE_SIZE) {
301 error = -EIO;
302 goto out;
303 }
304
305 if (buffer_unwritten(bh) || buffer_new(bh))
306 clear_page(addr);
307
308 error = vm_insert_mixed(vma, vaddr, pfn);
309
310 out:
311 i_mmap_unlock_read(mapping);
312
313 if (bh->b_end_io)
314 bh->b_end_io(bh, 1);
315
316 return error;
317}
318
319static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
320 get_block_t get_block)
321{
322 struct file *file = vma->vm_file;
323 struct address_space *mapping = file->f_mapping;
324 struct inode *inode = mapping->host;
325 struct page *page;
326 struct buffer_head bh;
327 unsigned long vaddr = (unsigned long)vmf->virtual_address;
328 unsigned blkbits = inode->i_blkbits;
329 sector_t block;
330 pgoff_t size;
331 int error;
332 int major = 0;
333
334 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 if (vmf->pgoff >= size)
336 return VM_FAULT_SIGBUS;
337
338 memset(&bh, 0, sizeof(bh));
339 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
340 bh.b_size = PAGE_SIZE;
341
342 repeat:
343 page = find_get_page(mapping, vmf->pgoff);
344 if (page) {
345 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
346 page_cache_release(page);
347 return VM_FAULT_RETRY;
348 }
349 if (unlikely(page->mapping != mapping)) {
350 unlock_page(page);
351 page_cache_release(page);
352 goto repeat;
353 }
354 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
355 if (unlikely(vmf->pgoff >= size)) {
356 /*
357 * We have a struct page covering a hole in the file
358 * from a read fault and we've raced with a truncate
359 */
360 error = -EIO;
361 goto unlock_page;
362 }
363 }
364
365 error = get_block(inode, block, &bh, 0);
366 if (!error && (bh.b_size < PAGE_SIZE))
367 error = -EIO; /* fs corruption? */
368 if (error)
369 goto unlock_page;
370
371 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
372 if (vmf->flags & FAULT_FLAG_WRITE) {
373 error = get_block(inode, block, &bh, 1);
374 count_vm_event(PGMAJFAULT);
375 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
376 major = VM_FAULT_MAJOR;
377 if (!error && (bh.b_size < PAGE_SIZE))
378 error = -EIO;
379 if (error)
380 goto unlock_page;
381 } else {
382 return dax_load_hole(mapping, page, vmf);
383 }
384 }
385
386 if (vmf->cow_page) {
387 struct page *new_page = vmf->cow_page;
388 if (buffer_written(&bh))
389 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
390 else
391 clear_user_highpage(new_page, vaddr);
392 if (error)
393 goto unlock_page;
394 vmf->page = page;
395 if (!page) {
396 i_mmap_lock_read(mapping);
397 /* Check we didn't race with truncate */
398 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
399 PAGE_SHIFT;
400 if (vmf->pgoff >= size) {
401 i_mmap_unlock_read(mapping);
402 error = -EIO;
403 goto out;
404 }
405 }
406 return VM_FAULT_LOCKED;
407 }
408
409 /* Check we didn't race with a read fault installing a new page */
410 if (!page && major)
411 page = find_lock_page(mapping, vmf->pgoff);
412
413 if (page) {
414 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
415 PAGE_CACHE_SIZE, 0);
416 delete_from_page_cache(page);
417 unlock_page(page);
418 page_cache_release(page);
419 }
420
421 error = dax_insert_mapping(inode, &bh, vma, vmf);
422
423 out:
424 if (error == -ENOMEM)
425 return VM_FAULT_OOM | major;
426 /* -EBUSY is fine, somebody else faulted on the same PTE */
427 if ((error < 0) && (error != -EBUSY))
428 return VM_FAULT_SIGBUS | major;
429 return VM_FAULT_NOPAGE | major;
430
431 unlock_page:
432 if (page) {
433 unlock_page(page);
434 page_cache_release(page);
435 }
436 goto out;
437}
438
439/**
440 * dax_fault - handle a page fault on a DAX file
441 * @vma: The virtual memory area where the fault occurred
442 * @vmf: The description of the fault
443 * @get_block: The filesystem method used to translate file offsets to blocks
444 *
445 * When a page fault occurs, filesystems may call this helper in their
446 * fault handler for DAX files.
447 */
448int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
449 get_block_t get_block)
450{
451 int result;
452 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
453
454 if (vmf->flags & FAULT_FLAG_WRITE) {
455 sb_start_pagefault(sb);
456 file_update_time(vma->vm_file);
457 }
458 result = do_dax_fault(vma, vmf, get_block);
459 if (vmf->flags & FAULT_FLAG_WRITE)
460 sb_end_pagefault(sb);
461
462 return result;
463}
464EXPORT_SYMBOL_GPL(dax_fault);