aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2017-09-06 19:18:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:27:24 -0400
commite30331ff05f689f8f2faeb51664299c4d7841f15 (patch)
tree62cd31e0a819980b117885d883b38ace91f602ca /fs/dax.c
parentb2770da6425406cf3f6d3fddbf9086b1db0106a1 (diff)
dax: relocate some dax functions
dax_load_hole() will soon need to call dax_insert_mapping_entry(), so it needs to be moved lower in dax.c so the definition exists. dax_wake_mapping_entry_waiter() will soon be removed from dax.h and be made static to dax.c, so we need to move its definition above all its callers. Link: http://lkml.kernel.org/r/20170724170616.25810-3-ross.zwisler@linux.intel.com Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c138
1 files changed, 69 insertions, 69 deletions
diff --git a/fs/dax.c b/fs/dax.c
index ab925dc6647a..b8882b5ce6ed 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -121,6 +121,31 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mo
121} 121}
122 122
123/* 123/*
124 * We do not necessarily hold the mapping->tree_lock when we call this
125 * function so it is possible that 'entry' is no longer a valid item in the
126 * radix tree. This is okay because all we really need to do is to find the
127 * correct waitqueue where tasks might be waiting for that old 'entry' and
128 * wake them.
129 */
130void dax_wake_mapping_entry_waiter(struct address_space *mapping,
131 pgoff_t index, void *entry, bool wake_all)
132{
133 struct exceptional_entry_key key;
134 wait_queue_head_t *wq;
135
136 wq = dax_entry_waitqueue(mapping, index, entry, &key);
137
138 /*
139 * Checking for locked entry and prepare_to_wait_exclusive() happens
140 * under mapping->tree_lock, ditto for entry handling in our callers.
141 * So at this point all tasks that could have seen our entry locked
142 * must be in the waitqueue and the following check will see them.
143 */
144 if (waitqueue_active(wq))
145 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
146}
147
148/*
124 * Check whether the given slot is locked. The function must be called with 149 * Check whether the given slot is locked. The function must be called with
125 * mapping->tree_lock held 150 * mapping->tree_lock held
126 */ 151 */
@@ -392,31 +417,6 @@ restart:
392 return entry; 417 return entry;
393} 418}
394 419
395/*
396 * We do not necessarily hold the mapping->tree_lock when we call this
397 * function so it is possible that 'entry' is no longer a valid item in the
398 * radix tree. This is okay because all we really need to do is to find the
399 * correct waitqueue where tasks might be waiting for that old 'entry' and
400 * wake them.
401 */
402void dax_wake_mapping_entry_waiter(struct address_space *mapping,
403 pgoff_t index, void *entry, bool wake_all)
404{
405 struct exceptional_entry_key key;
406 wait_queue_head_t *wq;
407
408 wq = dax_entry_waitqueue(mapping, index, entry, &key);
409
410 /*
411 * Checking for locked entry and prepare_to_wait_exclusive() happens
412 * under mapping->tree_lock, ditto for entry handling in our callers.
413 * So at this point all tasks that could have seen our entry locked
414 * must be in the waitqueue and the following check will see them.
415 */
416 if (waitqueue_active(wq))
417 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
418}
419
420static int __dax_invalidate_mapping_entry(struct address_space *mapping, 420static int __dax_invalidate_mapping_entry(struct address_space *mapping,
421 pgoff_t index, bool trunc) 421 pgoff_t index, bool trunc)
422{ 422{
@@ -468,50 +468,6 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
468 return __dax_invalidate_mapping_entry(mapping, index, false); 468 return __dax_invalidate_mapping_entry(mapping, index, false);
469} 469}
470 470
471/*
472 * The user has performed a load from a hole in the file. Allocating
473 * a new page in the file would cause excessive storage usage for
474 * workloads with sparse files. We allocate a page cache page instead.
475 * We'll kick it out of the page cache if it's ever written to,
476 * otherwise it will simply fall out of the page cache under memory
477 * pressure without ever having been dirtied.
478 */
479static int dax_load_hole(struct address_space *mapping, void **entry,
480 struct vm_fault *vmf)
481{
482 struct inode *inode = mapping->host;
483 struct page *page;
484 int ret;
485
486 /* Hole page already exists? Return it... */
487 if (!radix_tree_exceptional_entry(*entry)) {
488 page = *entry;
489 goto finish_fault;
490 }
491
492 /* This will replace locked radix tree entry with a hole page */
493 page = find_or_create_page(mapping, vmf->pgoff,
494 vmf->gfp_mask | __GFP_ZERO);
495 if (!page) {
496 ret = VM_FAULT_OOM;
497 goto out;
498 }
499
500finish_fault:
501 vmf->page = page;
502 ret = finish_fault(vmf);
503 vmf->page = NULL;
504 *entry = page;
505 if (!ret) {
506 /* Grab reference for PTE that is now referencing the page */
507 get_page(page);
508 ret = VM_FAULT_NOPAGE;
509 }
510out:
511 trace_dax_load_hole(inode, vmf, ret);
512 return ret;
513}
514
515static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, 471static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
516 sector_t sector, size_t size, struct page *to, 472 sector_t sector, size_t size, struct page *to,
517 unsigned long vaddr) 473 unsigned long vaddr)
@@ -941,6 +897,50 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
941} 897}
942EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); 898EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
943 899
900/*
901 * The user has performed a load from a hole in the file. Allocating
902 * a new page in the file would cause excessive storage usage for
903 * workloads with sparse files. We allocate a page cache page instead.
904 * We'll kick it out of the page cache if it's ever written to,
905 * otherwise it will simply fall out of the page cache under memory
906 * pressure without ever having been dirtied.
907 */
908static int dax_load_hole(struct address_space *mapping, void **entry,
909 struct vm_fault *vmf)
910{
911 struct inode *inode = mapping->host;
912 struct page *page;
913 int ret;
914
915 /* Hole page already exists? Return it... */
916 if (!radix_tree_exceptional_entry(*entry)) {
917 page = *entry;
918 goto finish_fault;
919 }
920
921 /* This will replace locked radix tree entry with a hole page */
922 page = find_or_create_page(mapping, vmf->pgoff,
923 vmf->gfp_mask | __GFP_ZERO);
924 if (!page) {
925 ret = VM_FAULT_OOM;
926 goto out;
927 }
928
929finish_fault:
930 vmf->page = page;
931 ret = finish_fault(vmf);
932 vmf->page = NULL;
933 *entry = page;
934 if (!ret) {
935 /* Grab reference for PTE that is now referencing the page */
936 get_page(page);
937 ret = VM_FAULT_NOPAGE;
938 }
939out:
940 trace_dax_load_hole(inode, vmf, ret);
941 return ret;
942}
943
944static bool dax_range_is_aligned(struct block_device *bdev, 944static bool dax_range_is_aligned(struct block_device *bdev,
945 unsigned int offset, unsigned int length) 945 unsigned int offset, unsigned int length)
946{ 946{