summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c22
1 files changed, 6 insertions, 16 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c33c5cbb67ff..98a3c7c224cb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3824,8 +3824,7 @@ retry:
3824 * handling userfault. Reacquire after handling 3824 * handling userfault. Reacquire after handling
3825 * fault to make calling code simpler. 3825 * fault to make calling code simpler.
3826 */ 3826 */
3827 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3827 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3828 idx, haddr);
3829 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3828 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3830 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3829 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3831 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3830 mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -3933,21 +3932,14 @@ backout_unlocked:
3933} 3932}
3934 3933
3935#ifdef CONFIG_SMP 3934#ifdef CONFIG_SMP
3936u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3935u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3937 struct vm_area_struct *vma,
3938 struct address_space *mapping,
3939 pgoff_t idx, unsigned long address) 3936 pgoff_t idx, unsigned long address)
3940{ 3937{
3941 unsigned long key[2]; 3938 unsigned long key[2];
3942 u32 hash; 3939 u32 hash;
3943 3940
3944 if (vma->vm_flags & VM_SHARED) { 3941 key[0] = (unsigned long) mapping;
3945 key[0] = (unsigned long) mapping; 3942 key[1] = idx;
3946 key[1] = idx;
3947 } else {
3948 key[0] = (unsigned long) mm;
3949 key[1] = address >> huge_page_shift(h);
3950 }
3951 3943
3952 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); 3944 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3953 3945
@@ -3958,9 +3950,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3958 * For uniprocesor systems we always use a single mutex, so just 3950 * For uniprocesor systems we always use a single mutex, so just
3959 * return 0 and avoid the hashing overhead. 3951 * return 0 and avoid the hashing overhead.
3960 */ 3952 */
3961u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 3953u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3962 struct vm_area_struct *vma,
3963 struct address_space *mapping,
3964 pgoff_t idx, unsigned long address) 3954 pgoff_t idx, unsigned long address)
3965{ 3955{
3966 return 0; 3956 return 0;
@@ -4005,7 +3995,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4005 * get spurious allocation failures if two CPUs race to instantiate 3995 * get spurious allocation failures if two CPUs race to instantiate
4006 * the same page in the page cache. 3996 * the same page in the page cache.
4007 */ 3997 */
4008 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3998 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4009 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3999 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4010 4000
4011 entry = huge_ptep_get(ptep); 4001 entry = huge_ptep_get(ptep);