diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2015-09-08 17:59:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 18:35:28 -0400 |
commit | 46c043ede4711e8d598b9d63c5616c1fedb0605e (patch) | |
tree | 8a559930980f59ef24c25b73b91899106f8f0418 /fs | |
parent | 3fdd1b479dbc03347e98f904f54133a9cef5521f (diff) |
mm: take i_mmap_lock in unmap_mapping_range() for DAX
DAX is not so special: we need i_mmap_lock to protect mapping->i_mmap.
__dax_pmd_fault() uses unmap_mapping_range() shoot out zero page from
all mappings. We need to drop i_mmap_lock there to avoid lock deadlock.
Re-aquiring the lock should be fine since we check i_size after the
point.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/dax.c | 35 |
1 files changed, 19 insertions, 16 deletions
@@ -554,6 +554,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
554 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | 554 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) |
555 | goto fallback; | 555 | goto fallback; |
556 | 556 | ||
557 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { | ||
558 | int i; | ||
559 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
560 | clear_page(kaddr + i * PAGE_SIZE); | ||
561 | count_vm_event(PGMAJFAULT); | ||
562 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
563 | result |= VM_FAULT_MAJOR; | ||
564 | } | ||
565 | |||
566 | /* | ||
567 | * If we allocated new storage, make sure no process has any | ||
568 | * zero pages covering this hole | ||
569 | */ | ||
570 | if (buffer_new(&bh)) { | ||
571 | i_mmap_unlock_write(mapping); | ||
572 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); | ||
573 | i_mmap_lock_write(mapping); | ||
574 | } | ||
575 | |||
557 | /* | 576 | /* |
558 | * If a truncate happened while we were allocating blocks, we may | 577 | * If a truncate happened while we were allocating blocks, we may |
559 | * leave blocks allocated to the file that are beyond EOF. We can't | 578 | * leave blocks allocated to the file that are beyond EOF. We can't |
@@ -568,13 +587,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
568 | if ((pgoff | PG_PMD_COLOUR) >= size) | 587 | if ((pgoff | PG_PMD_COLOUR) >= size) |
569 | goto fallback; | 588 | goto fallback; |
570 | 589 | ||
571 | /* | ||
572 | * If we allocated new storage, make sure no process has any | ||
573 | * zero pages covering this hole | ||
574 | */ | ||
575 | if (buffer_new(&bh)) | ||
576 | unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); | ||
577 | |||
578 | if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { | 590 | if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { |
579 | spinlock_t *ptl; | 591 | spinlock_t *ptl; |
580 | pmd_t entry; | 592 | pmd_t entry; |
@@ -605,15 +617,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
605 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | 617 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) |
606 | goto fallback; | 618 | goto fallback; |
607 | 619 | ||
608 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { | ||
609 | int i; | ||
610 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
611 | clear_page(kaddr + i * PAGE_SIZE); | ||
612 | count_vm_event(PGMAJFAULT); | ||
613 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
614 | result |= VM_FAULT_MAJOR; | ||
615 | } | ||
616 | |||
617 | result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); | 620 | result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); |
618 | } | 621 | } |
619 | 622 | ||