aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fremap.c7
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/rmap.c15
3 files changed, 27 insertions, 19 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dacf90a2..ec520c7b28df 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125{ 125{
126 struct mm_struct *mm = current->mm; 126 struct mm_struct *mm = current->mm;
127 struct address_space *mapping; 127 struct address_space *mapping;
128 unsigned long end = start + size;
129 struct vm_area_struct *vma; 128 struct vm_area_struct *vma;
130 int err = -EINVAL; 129 int err = -EINVAL;
131 int has_write_lock = 0; 130 int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
142 if (start + size <= start) 141 if (start + size <= start)
143 return err; 142 return err;
144 143
144 /* Does pgoff wrap? */
145 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
146 return err;
147
145 /* Can we represent this offset inside this architecture's pte's? */ 148 /* Can we represent this offset inside this architecture's pte's? */
146#if PTE_FILE_MAX_BITS < BITS_PER_LONG 149#if PTE_FILE_MAX_BITS < BITS_PER_LONG
147 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) 150 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
168 if (!(vma->vm_flags & VM_CAN_NONLINEAR)) 171 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169 goto out; 172 goto out;
170 173
171 if (end <= start || start < vma->vm_start || end > vma->vm_end) 174 if (start < vma->vm_start || start + size > vma->vm_end)
172 goto out; 175 goto out;
173 176
174 /* Must set VM_NONLINEAR before any pages are populated. */ 177 /* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be788a39f..c03273807182 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
2324 * and just make the page writable */ 2324 * and just make the page writable */
2325 avoidcopy = (page_mapcount(old_page) == 1); 2325 avoidcopy = (page_mapcount(old_page) == 1);
2326 if (avoidcopy) { 2326 if (avoidcopy) {
2327 if (!trylock_page(old_page)) { 2327 if (PageAnon(old_page))
2328 if (PageAnon(old_page)) 2328 page_move_anon_rmap(old_page, vma, address);
2329 page_move_anon_rmap(old_page, vma, address);
2330 } else
2331 unlock_page(old_page);
2332 set_huge_ptep_writable(vma, address, ptep); 2329 set_huge_ptep_writable(vma, address, ptep);
2333 return 0; 2330 return 0;
2334 } 2331 }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
2404 set_huge_pte_at(mm, address, ptep, 2401 set_huge_pte_at(mm, address, ptep,
2405 make_huge_pte(vma, new_page, 1)); 2402 make_huge_pte(vma, new_page, 1));
2406 page_remove_rmap(old_page); 2403 page_remove_rmap(old_page);
2407 hugepage_add_anon_rmap(new_page, vma, address); 2404 hugepage_add_new_anon_rmap(new_page, vma, address);
2408 /* Make the old page be freed below */ 2405 /* Make the old page be freed below */
2409 new_page = old_page; 2406 new_page = old_page;
2410 mmu_notifier_invalidate_range_end(mm, 2407 mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2631 vma, address); 2628 vma, address);
2632 } 2629 }
2633 2630
2634 if (!pagecache_page) { 2631 /*
2635 page = pte_page(entry); 2632 * hugetlb_cow() requires page locks of pte_page(entry) and
2633 * pagecache_page, so here we need take the former one
2634 * when page != pagecache_page or !pagecache_page.
2635 * Note that locking order is always pagecache_page -> page,
2636 * so no worry about deadlock.
2637 */
2638 page = pte_page(entry);
2639 if (page != pagecache_page)
2636 lock_page(page); 2640 lock_page(page);
2637 }
2638 2641
2639 spin_lock(&mm->page_table_lock); 2642 spin_lock(&mm->page_table_lock);
2640 /* Check for a racing update before calling hugetlb_cow */ 2643 /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
2661 if (pagecache_page) { 2664 if (pagecache_page) {
2662 unlock_page(pagecache_page); 2665 unlock_page(pagecache_page);
2663 put_page(pagecache_page); 2666 put_page(pagecache_page);
2664 } else {
2665 unlock_page(page);
2666 } 2667 }
2668 unlock_page(page);
2667 2669
2668out_mutex: 2670out_mutex:
2669 mutex_unlock(&hugetlb_instantiation_mutex); 2671 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/rmap.c b/mm/rmap.c
index f6f0d2dda2ea..9d2ba01bd4f9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1564,13 +1564,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
1564 struct vm_area_struct *vma, unsigned long address, int exclusive) 1564 struct vm_area_struct *vma, unsigned long address, int exclusive)
1565{ 1565{
1566 struct anon_vma *anon_vma = vma->anon_vma; 1566 struct anon_vma *anon_vma = vma->anon_vma;
1567
1567 BUG_ON(!anon_vma); 1568 BUG_ON(!anon_vma);
1568 if (!exclusive) { 1569
1569 struct anon_vma_chain *avc; 1570 if (PageAnon(page))
1570 avc = list_entry(vma->anon_vma_chain.prev, 1571 return;
1571 struct anon_vma_chain, same_vma); 1572 if (!exclusive)
1572 anon_vma = avc->anon_vma; 1573 anon_vma = anon_vma->root;
1573 } 1574
1574 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1575 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1575 page->mapping = (struct address_space *) anon_vma; 1576 page->mapping = (struct address_space *) anon_vma;
1576 page->index = linear_page_index(vma, address); 1577 page->index = linear_page_index(vma, address);
@@ -1581,6 +1582,8 @@ void hugepage_add_anon_rmap(struct page *page,
1581{ 1582{
1582 struct anon_vma *anon_vma = vma->anon_vma; 1583 struct anon_vma *anon_vma = vma->anon_vma;
1583 int first; 1584 int first;
1585
1586 BUG_ON(!PageLocked(page));
1584 BUG_ON(!anon_vma); 1587 BUG_ON(!anon_vma);
1585 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1588 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1586 first = atomic_inc_and_test(&page->_mapcount); 1589 first = atomic_inc_and_test(&page->_mapcount);