summaryrefslogtreecommitdiffstats
path: root/fs/block_dev.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-01-28 23:13:39 -0500
committerDan Williams <dan.j.williams@intel.com>2016-01-30 16:35:31 -0500
commit9f4736fe7ca804aa79b5916221bb13dfc6221a0f (patch)
tree1604ce0292634817892790f0eb8cdb0e1821de1e /fs/block_dev.c
parent65f87ee71852a754f7981d0653e7136039b8798a (diff)
block: revert runtime dax control of the raw block device
Dynamically enabling DAX requires that the page cache first be flushed and invalidated. This must occur atomically with the change of DAX mode otherwise we confuse the fsync/msync tracking and violate data durability guarantees. Eliminate the possibilty of DAX-disabled to DAX-enabled transitions for now and revisit this for the next cycle. Cc: Jan Kara <jack@suse.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7b9cd49622b1..afb437484362 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1736,37 +1736,13 @@ static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
1736 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); 1736 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
1737} 1737}
1738 1738
1739static void blkdev_vm_open(struct vm_area_struct *vma)
1740{
1741 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1742 struct block_device *bdev = I_BDEV(bd_inode);
1743
1744 inode_lock(bd_inode);
1745 bdev->bd_map_count++;
1746 inode_unlock(bd_inode);
1747}
1748
1749static void blkdev_vm_close(struct vm_area_struct *vma)
1750{
1751 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1752 struct block_device *bdev = I_BDEV(bd_inode);
1753
1754 inode_lock(bd_inode);
1755 bdev->bd_map_count--;
1756 inode_unlock(bd_inode);
1757}
1758
1759static const struct vm_operations_struct blkdev_dax_vm_ops = { 1739static const struct vm_operations_struct blkdev_dax_vm_ops = {
1760 .open = blkdev_vm_open,
1761 .close = blkdev_vm_close,
1762 .fault = blkdev_dax_fault, 1740 .fault = blkdev_dax_fault,
1763 .pmd_fault = blkdev_dax_pmd_fault, 1741 .pmd_fault = blkdev_dax_pmd_fault,
1764 .pfn_mkwrite = blkdev_dax_fault, 1742 .pfn_mkwrite = blkdev_dax_fault,
1765}; 1743};
1766 1744
1767static const struct vm_operations_struct blkdev_default_vm_ops = { 1745static const struct vm_operations_struct blkdev_default_vm_ops = {
1768 .open = blkdev_vm_open,
1769 .close = blkdev_vm_close,
1770 .fault = filemap_fault, 1746 .fault = filemap_fault,
1771 .map_pages = filemap_map_pages, 1747 .map_pages = filemap_map_pages,
1772}; 1748};
@@ -1774,18 +1750,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
1774static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) 1750static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
1775{ 1751{
1776 struct inode *bd_inode = bdev_file_inode(file); 1752 struct inode *bd_inode = bdev_file_inode(file);
1777 struct block_device *bdev = I_BDEV(bd_inode);
1778 1753
1779 file_accessed(file); 1754 file_accessed(file);
1780 inode_lock(bd_inode);
1781 bdev->bd_map_count++;
1782 if (IS_DAX(bd_inode)) { 1755 if (IS_DAX(bd_inode)) {
1783 vma->vm_ops = &blkdev_dax_vm_ops; 1756 vma->vm_ops = &blkdev_dax_vm_ops;
1784 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 1757 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1785 } else { 1758 } else {
1786 vma->vm_ops = &blkdev_default_vm_ops; 1759 vma->vm_ops = &blkdev_default_vm_ops;
1787 } 1760 }
1788 inode_unlock(bd_inode);
1789 1761
1790 return 0; 1762 return 0;
1791} 1763}