aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2017-10-14 20:13:45 -0400
committerDan Williams <dan.j.williams@intel.com>2018-03-30 14:34:54 -0400
commit3fe0791c295cfd3cd735de7a32cc0780949c009f (patch)
treefee362b87ce6ff4289d511e154fadd05667ca184
parent6b2bb7265f0b62605e8caee3613449ed0db270b9 (diff)
dax: store pfns in the radix
In preparation for examining the busy state of dax pages in the truncate path, switch from sectors to pfns in the radix. Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/dax/super.c15
-rw-r--r--fs/dax.c83
2 files changed, 43 insertions, 55 deletions
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ecdc292aa4e4..2b2332b605e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -124,10 +124,19 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
124 return len < 0 ? len : -EIO; 124 return len < 0 ? len : -EIO;
125 } 125 }
126 126
127 if ((IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) 127 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
128 || pfn_t_devmap(pfn)) 128 /*
129 * An arch that has enabled the pmem api should also
130 * have its drivers support pfn_t_devmap()
131 *
132 * This is a developer warning and should not trigger in
133 * production. dax_flush() will crash since it depends
134 * on being able to do (page_address(pfn_to_page())).
135 */
136 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
137 } else if (pfn_t_devmap(pfn)) {
129 /* pass */; 138 /* pass */;
130 else { 139 } else {
131 pr_debug("VFS (%s): error: dax support not enabled\n", 140 pr_debug("VFS (%s): error: dax support not enabled\n",
132 sb->s_id); 141 sb->s_id);
133 return -EOPNOTSUPP; 142 return -EOPNOTSUPP;
diff --git a/fs/dax.c b/fs/dax.c
index 0276df90e86c..b646a46e4d12 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -73,16 +73,15 @@ fs_initcall(init_dax_wait_table);
73#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2)) 73#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)) 74#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75 75
76static unsigned long dax_radix_sector(void *entry) 76static unsigned long dax_radix_pfn(void *entry)
77{ 77{
78 return (unsigned long)entry >> RADIX_DAX_SHIFT; 78 return (unsigned long)entry >> RADIX_DAX_SHIFT;
79} 79}
80 80
81static void *dax_radix_locked_entry(sector_t sector, unsigned long flags) 81static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
82{ 82{
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags | 83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84 ((unsigned long)sector << RADIX_DAX_SHIFT) | 84 (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
85 RADIX_DAX_ENTRY_LOCK);
86} 85}
87 86
88static unsigned int dax_radix_order(void *entry) 87static unsigned int dax_radix_order(void *entry)
@@ -526,12 +525,13 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
526 */ 525 */
527static void *dax_insert_mapping_entry(struct address_space *mapping, 526static void *dax_insert_mapping_entry(struct address_space *mapping,
528 struct vm_fault *vmf, 527 struct vm_fault *vmf,
529 void *entry, sector_t sector, 528 void *entry, pfn_t pfn_t,
530 unsigned long flags, bool dirty) 529 unsigned long flags, bool dirty)
531{ 530{
532 struct radix_tree_root *page_tree = &mapping->page_tree; 531 struct radix_tree_root *page_tree = &mapping->page_tree;
533 void *new_entry; 532 unsigned long pfn = pfn_t_to_pfn(pfn_t);
534 pgoff_t index = vmf->pgoff; 533 pgoff_t index = vmf->pgoff;
534 void *new_entry;
535 535
536 if (dirty) 536 if (dirty)
537 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 537 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -546,7 +546,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
546 } 546 }
547 547
548 spin_lock_irq(&mapping->tree_lock); 548 spin_lock_irq(&mapping->tree_lock);
549 new_entry = dax_radix_locked_entry(sector, flags); 549 new_entry = dax_radix_locked_entry(pfn, flags);
550 550
551 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { 551 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
552 /* 552 /*
@@ -657,17 +657,14 @@ unlock_pte:
657 i_mmap_unlock_read(mapping); 657 i_mmap_unlock_read(mapping);
658} 658}
659 659
660static int dax_writeback_one(struct block_device *bdev, 660static int dax_writeback_one(struct dax_device *dax_dev,
661 struct dax_device *dax_dev, struct address_space *mapping, 661 struct address_space *mapping, pgoff_t index, void *entry)
662 pgoff_t index, void *entry)
663{ 662{
664 struct radix_tree_root *page_tree = &mapping->page_tree; 663 struct radix_tree_root *page_tree = &mapping->page_tree;
665 void *entry2, **slot, *kaddr; 664 void *entry2, **slot;
666 long ret = 0, id; 665 unsigned long pfn;
667 sector_t sector; 666 long ret = 0;
668 pgoff_t pgoff;
669 size_t size; 667 size_t size;
670 pfn_t pfn;
671 668
672 /* 669 /*
673 * A page got tagged dirty in DAX mapping? Something is seriously 670 * A page got tagged dirty in DAX mapping? Something is seriously
@@ -683,10 +680,10 @@ static int dax_writeback_one(struct block_device *bdev,
683 goto put_unlocked; 680 goto put_unlocked;
684 /* 681 /*
685 * Entry got reallocated elsewhere? No need to writeback. We have to 682 * Entry got reallocated elsewhere? No need to writeback. We have to
686 * compare sectors as we must not bail out due to difference in lockbit 683 * compare pfns as we must not bail out due to difference in lockbit
687 * or entry type. 684 * or entry type.
688 */ 685 */
689 if (dax_radix_sector(entry2) != dax_radix_sector(entry)) 686 if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
690 goto put_unlocked; 687 goto put_unlocked;
691 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || 688 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
692 dax_is_zero_entry(entry))) { 689 dax_is_zero_entry(entry))) {
@@ -712,33 +709,15 @@ static int dax_writeback_one(struct block_device *bdev,
712 /* 709 /*
713 * Even if dax_writeback_mapping_range() was given a wbc->range_start 710 * Even if dax_writeback_mapping_range() was given a wbc->range_start
714 * in the middle of a PMD, the 'index' we are given will be aligned to 711 * in the middle of a PMD, the 'index' we are given will be aligned to
715 * the start index of the PMD, as will the sector we pull from 712 * the start index of the PMD, as will the pfn we pull from 'entry'.
716 * 'entry'. This allows us to flush for PMD_SIZE and not have to 713 * This allows us to flush for PMD_SIZE and not have to worry about
717 * worry about partial PMD writebacks. 714 * partial PMD writebacks.
718 */ 715 */
719 sector = dax_radix_sector(entry); 716 pfn = dax_radix_pfn(entry);
720 size = PAGE_SIZE << dax_radix_order(entry); 717 size = PAGE_SIZE << dax_radix_order(entry);
721 718
722 id = dax_read_lock(); 719 dax_mapping_entry_mkclean(mapping, index, pfn);
723 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff); 720 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
724 if (ret)
725 goto dax_unlock;
726
727 /*
728 * dax_direct_access() may sleep, so cannot hold tree_lock over
729 * its invocation.
730 */
731 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
732 if (ret < 0)
733 goto dax_unlock;
734
735 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
736 ret = -EIO;
737 goto dax_unlock;
738 }
739
740 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
741 dax_flush(dax_dev, kaddr, size);
742 /* 721 /*
743 * After we have flushed the cache, we can clear the dirty tag. There 722 * After we have flushed the cache, we can clear the dirty tag. There
744 * cannot be new dirty data in the pfn after the flush has completed as 723 * cannot be new dirty data in the pfn after the flush has completed as
@@ -749,8 +728,6 @@ static int dax_writeback_one(struct block_device *bdev,
749 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY); 728 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
750 spin_unlock_irq(&mapping->tree_lock); 729 spin_unlock_irq(&mapping->tree_lock);
751 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); 730 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
752 dax_unlock:
753 dax_read_unlock(id);
754 put_locked_mapping_entry(mapping, index); 731 put_locked_mapping_entry(mapping, index);
755 return ret; 732 return ret;
756 733
@@ -808,8 +785,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
808 break; 785 break;
809 } 786 }
810 787
811 ret = dax_writeback_one(bdev, dax_dev, mapping, 788 ret = dax_writeback_one(dax_dev, mapping, indices[i],
812 indices[i], pvec.pages[i]); 789 pvec.pages[i]);
813 if (ret < 0) { 790 if (ret < 0) {
814 mapping_set_error(mapping, ret); 791 mapping_set_error(mapping, ret);
815 goto out; 792 goto out;
@@ -877,6 +854,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
877 int ret = VM_FAULT_NOPAGE; 854 int ret = VM_FAULT_NOPAGE;
878 struct page *zero_page; 855 struct page *zero_page;
879 void *entry2; 856 void *entry2;
857 pfn_t pfn;
880 858
881 zero_page = ZERO_PAGE(0); 859 zero_page = ZERO_PAGE(0);
882 if (unlikely(!zero_page)) { 860 if (unlikely(!zero_page)) {
@@ -884,14 +862,15 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
884 goto out; 862 goto out;
885 } 863 }
886 864
887 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0, 865 pfn = page_to_pfn_t(zero_page);
866 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
888 RADIX_DAX_ZERO_PAGE, false); 867 RADIX_DAX_ZERO_PAGE, false);
889 if (IS_ERR(entry2)) { 868 if (IS_ERR(entry2)) {
890 ret = VM_FAULT_SIGBUS; 869 ret = VM_FAULT_SIGBUS;
891 goto out; 870 goto out;
892 } 871 }
893 872
894 vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page)); 873 vm_insert_mixed(vmf->vma, vaddr, pfn);
895out: 874out:
896 trace_dax_load_hole(inode, vmf, ret); 875 trace_dax_load_hole(inode, vmf, ret);
897 return ret; 876 return ret;
@@ -1200,8 +1179,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1200 if (error < 0) 1179 if (error < 0)
1201 goto error_finish_iomap; 1180 goto error_finish_iomap;
1202 1181
1203 entry = dax_insert_mapping_entry(mapping, vmf, entry, 1182 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1204 dax_iomap_sector(&iomap, pos),
1205 0, write && !sync); 1183 0, write && !sync);
1206 if (IS_ERR(entry)) { 1184 if (IS_ERR(entry)) {
1207 error = PTR_ERR(entry); 1185 error = PTR_ERR(entry);
@@ -1280,13 +1258,15 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1280 void *ret = NULL; 1258 void *ret = NULL;
1281 spinlock_t *ptl; 1259 spinlock_t *ptl;
1282 pmd_t pmd_entry; 1260 pmd_t pmd_entry;
1261 pfn_t pfn;
1283 1262
1284 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); 1263 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1285 1264
1286 if (unlikely(!zero_page)) 1265 if (unlikely(!zero_page))
1287 goto fallback; 1266 goto fallback;
1288 1267
1289 ret = dax_insert_mapping_entry(mapping, vmf, entry, 0, 1268 pfn = page_to_pfn_t(zero_page);
1269 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1290 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false); 1270 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1291 if (IS_ERR(ret)) 1271 if (IS_ERR(ret))
1292 goto fallback; 1272 goto fallback;
@@ -1409,8 +1389,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1409 if (error < 0) 1389 if (error < 0)
1410 goto finish_iomap; 1390 goto finish_iomap;
1411 1391
1412 entry = dax_insert_mapping_entry(mapping, vmf, entry, 1392 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1413 dax_iomap_sector(&iomap, pos),
1414 RADIX_DAX_PMD, write && !sync); 1393 RADIX_DAX_PMD, write && !sync);
1415 if (IS_ERR(entry)) 1394 if (IS_ERR(entry))
1416 goto finish_iomap; 1395 goto finish_iomap;