aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e009ce870859..22bfa7a47a0b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1511 unsigned long pfn) 1511 unsigned long pfn)
1512{ 1512{
1513 int ret; 1513 int ret;
1514 pgprot_t pgprot = vma->vm_page_prot;
1514 /* 1515 /*
1515 * Technically, architectures with pte_special can avoid all these 1516 * Technically, architectures with pte_special can avoid all these
1516 * restrictions (same for remap_pfn_range). However we would like 1517 * restrictions (same for remap_pfn_range). However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1525 1526
1526 if (addr < vma->vm_start || addr >= vma->vm_end) 1527 if (addr < vma->vm_start || addr >= vma->vm_end)
1527 return -EFAULT; 1528 return -EFAULT;
1528 if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) 1529 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
1529 return -EINVAL; 1530 return -EINVAL;
1530 1531
1531 ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1532 ret = insert_pfn(vma, addr, pfn, pgprot);
1532 1533
1533 if (ret) 1534 if (ret)
1534 untrack_pfn_vma(vma, pfn, PAGE_SIZE); 1535 untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,9 +1672,15 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1671 1672
1672 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1673 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1673 1674
1674 err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); 1675 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
1675 if (err) 1676 if (err) {
1677 /*
1678 * To indicate that track_pfn related cleanup is not
1679 * needed from higher level routine calling unmap_vmas
1680 */
1681 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
1676 return -EINVAL; 1682 return -EINVAL;
1683 }
1677 1684
1678 BUG_ON(addr >= end); 1685 BUG_ON(addr >= end);
1679 pfn -= addr >> PAGE_SHIFT; 1686 pfn -= addr >> PAGE_SHIFT;
@@ -3165,6 +3172,15 @@ void print_vma_addr(char *prefix, unsigned long ip)
3165#ifdef CONFIG_PROVE_LOCKING 3172#ifdef CONFIG_PROVE_LOCKING
3166void might_fault(void) 3173void might_fault(void)
3167{ 3174{
3175 /*
3176 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
3177 * holding the mmap_sem, this is safe because kernel memory doesn't
3178 * get paged out, therefore we'll never actually fault, and the
3179 * below annotations will generate false positives.
3180 */
3181 if (segment_eq(get_fs(), KERNEL_DS))
3182 return;
3183
3168 might_sleep(); 3184 might_sleep();
3169 /* 3185 /*
3170 * it would be nicer only to annotate paths which are not under 3186 * it would be nicer only to annotate paths which are not under