diff options
author | Jan Kara <jack@suse.cz> | 2016-05-12 12:29:20 -0400 |
---|---|---|
committer | Ross Zwisler <ross.zwisler@linux.intel.com> | 2016-05-19 17:28:40 -0400 |
commit | 4d9a2c8746671efbb0c27d3ae28c7474597a7aad (patch) | |
tree | 34fb766e63a1e503ba48482b34325dbb7bd3aafe | |
parent | bc2466e4257369d0ebee2b6265070d323343fa72 (diff) |
dax: Remove i_mmap_lock protection
Currently faults are protected against truncate by filesystem specific
i_mmap_sem and page lock in case of hole page. Cow faults are protected
DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX
code. Remove it.
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
-rw-r--r-- | fs/dax.c | 24 | ||||
-rw-r--r-- | mm/memory.c | 2 |
2 files changed, 5 insertions, 21 deletions
@@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping, | |||
798 | .sector = to_sector(bh, mapping->host), | 798 | .sector = to_sector(bh, mapping->host), |
799 | .size = bh->b_size, | 799 | .size = bh->b_size, |
800 | }; | 800 | }; |
801 | int error; | ||
802 | void *ret; | 801 | void *ret; |
803 | void *entry = *entryp; | 802 | void *entry = *entryp; |
804 | 803 | ||
805 | i_mmap_lock_read(mapping); | 804 | if (dax_map_atomic(bdev, &dax) < 0) |
806 | 805 | return PTR_ERR(dax.addr); | |
807 | if (dax_map_atomic(bdev, &dax) < 0) { | ||
808 | error = PTR_ERR(dax.addr); | ||
809 | goto out; | ||
810 | } | ||
811 | dax_unmap_atomic(bdev, &dax); | 806 | dax_unmap_atomic(bdev, &dax); |
812 | 807 | ||
813 | ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); | 808 | ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); |
814 | if (IS_ERR(ret)) { | 809 | if (IS_ERR(ret)) |
815 | error = PTR_ERR(ret); | 810 | return PTR_ERR(ret); |
816 | goto out; | ||
817 | } | ||
818 | *entryp = ret; | 811 | *entryp = ret; |
819 | 812 | ||
820 | error = vm_insert_mixed(vma, vaddr, dax.pfn); | 813 | return vm_insert_mixed(vma, vaddr, dax.pfn); |
821 | out: | ||
822 | i_mmap_unlock_read(mapping); | ||
823 | return error; | ||
824 | } | 814 | } |
825 | 815 | ||
826 | /** | 816 | /** |
@@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
1058 | truncate_pagecache_range(inode, lstart, lend); | 1048 | truncate_pagecache_range(inode, lstart, lend); |
1059 | } | 1049 | } |
1060 | 1050 | ||
1061 | i_mmap_lock_read(mapping); | ||
1062 | |||
1063 | if (!write && !buffer_mapped(&bh)) { | 1051 | if (!write && !buffer_mapped(&bh)) { |
1064 | spinlock_t *ptl; | 1052 | spinlock_t *ptl; |
1065 | pmd_t entry; | 1053 | pmd_t entry; |
@@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
1148 | } | 1136 | } |
1149 | 1137 | ||
1150 | out: | 1138 | out: |
1151 | i_mmap_unlock_read(mapping); | ||
1152 | |||
1153 | return result; | 1139 | return result; |
1154 | 1140 | ||
1155 | fallback: | 1141 | fallback: |
diff --git a/mm/memory.c b/mm/memory.c index f09cdb8d48fa..06f552504e79 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2453,8 +2453,6 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2453 | if (details.last_index < details.first_index) | 2453 | if (details.last_index < details.first_index) |
2454 | details.last_index = ULONG_MAX; | 2454 | details.last_index = ULONG_MAX; |
2455 | 2455 | ||
2456 | |||
2457 | /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ | ||
2458 | i_mmap_lock_write(mapping); | 2456 | i_mmap_lock_write(mapping); |
2459 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) | 2457 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) |
2460 | unmap_mapping_range_tree(&mapping->i_mmap, &details); | 2458 | unmap_mapping_range_tree(&mapping->i_mmap, &details); |