diff options
author | Jan Kara <jack@suse.cz> | 2016-05-12 12:29:20 -0400 |
---|---|---|
committer | Ross Zwisler <ross.zwisler@linux.intel.com> | 2016-05-19 17:28:40 -0400 |
commit | 4d9a2c8746671efbb0c27d3ae28c7474597a7aad (patch) | |
tree | 34fb766e63a1e503ba48482b34325dbb7bd3aafe /fs | |
parent | bc2466e4257369d0ebee2b6265070d323343fa72 (diff) |
dax: Remove i_mmap_lock protection
Currently faults are protected against truncate by filesystem specific
i_mmap_sem and page lock in case of hole page. Cow faults are protected
DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX
code. Remove it.
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dax.c | 24 |
1 files changed, 5 insertions, 19 deletions
@@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping, | |||
798 | .sector = to_sector(bh, mapping->host), | 798 | .sector = to_sector(bh, mapping->host), |
799 | .size = bh->b_size, | 799 | .size = bh->b_size, |
800 | }; | 800 | }; |
801 | int error; | ||
802 | void *ret; | 801 | void *ret; |
803 | void *entry = *entryp; | 802 | void *entry = *entryp; |
804 | 803 | ||
805 | i_mmap_lock_read(mapping); | 804 | if (dax_map_atomic(bdev, &dax) < 0) |
806 | 805 | return PTR_ERR(dax.addr); | |
807 | if (dax_map_atomic(bdev, &dax) < 0) { | ||
808 | error = PTR_ERR(dax.addr); | ||
809 | goto out; | ||
810 | } | ||
811 | dax_unmap_atomic(bdev, &dax); | 806 | dax_unmap_atomic(bdev, &dax); |
812 | 807 | ||
813 | ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); | 808 | ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); |
814 | if (IS_ERR(ret)) { | 809 | if (IS_ERR(ret)) |
815 | error = PTR_ERR(ret); | 810 | return PTR_ERR(ret); |
816 | goto out; | ||
817 | } | ||
818 | *entryp = ret; | 811 | *entryp = ret; |
819 | 812 | ||
820 | error = vm_insert_mixed(vma, vaddr, dax.pfn); | 813 | return vm_insert_mixed(vma, vaddr, dax.pfn); |
821 | out: | ||
822 | i_mmap_unlock_read(mapping); | ||
823 | return error; | ||
824 | } | 814 | } |
825 | 815 | ||
826 | /** | 816 | /** |
@@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
1058 | truncate_pagecache_range(inode, lstart, lend); | 1048 | truncate_pagecache_range(inode, lstart, lend); |
1059 | } | 1049 | } |
1060 | 1050 | ||
1061 | i_mmap_lock_read(mapping); | ||
1062 | |||
1063 | if (!write && !buffer_mapped(&bh)) { | 1051 | if (!write && !buffer_mapped(&bh)) { |
1064 | spinlock_t *ptl; | 1052 | spinlock_t *ptl; |
1065 | pmd_t entry; | 1053 | pmd_t entry; |
@@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
1148 | } | 1136 | } |
1149 | 1137 | ||
1150 | out: | 1138 | out: |
1151 | i_mmap_unlock_read(mapping); | ||
1152 | |||
1153 | return result; | 1139 | return result; |
1154 | 1140 | ||
1155 | fallback: | 1141 | fallback: |