aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2007-10-04 11:56:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-04 13:13:09 -0400
commit16abfa086096895d438b19198e408ee96da7b508 (patch)
tree1d82091f35f069d7b2a7636b5f488987671bdade /mm/memory.c
parent804b3f9a16e446cb023417faec58b6506c834052 (diff)
Fix sys_remap_file_pages BUG at highmem.c:15!
Gurudas Pai reports kernel BUG at arch/i386/mm/highmem.c:15! below sys_remap_file_pages, while running Oracle database test on x86 in 6GB RAM: kunmap thinks we're in_interrupt because the preempt count has wrapped. That's because __do_fault expected to unmap page_table, but one of its two callers do_nonlinear_fault already unmapped it: let do_linear_fault unmap it first too, and then there's no need to pass the page_table arg down. Why have we been so slow to notice this? Probably through forgetting that the mapping_cap_account_dirty test means that sys_remap_file_pages nowadays only goes the full nonlinear vma route on a few memory-backed filesystems like ramfs, tmpfs and hugetlbfs. [ It also depends on CONFIG_HIGHPTE, so it becomes even harder to trigger in practice. Many who have need of large memory have probably migrated to x86-64.. Problem introduced by commit d0217ac04ca6591841e5665f518e38064f4e65bd ("mm: fault feedback #1") -- Linus ] Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: gurudas pai <gurudas.pai@oracle.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ca8cac11bd2c..c0e7741a98de 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2307,13 +2307,14 @@ oom:
2307 * do not need to flush old virtual caches or the TLB. 2307 * do not need to flush old virtual caches or the TLB.
2308 * 2308 *
2309 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2309 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2310 * but allow concurrent faults), and pte mapped but not yet locked. 2310 * but allow concurrent faults), and pte neither mapped nor locked.
2311 * We return with mmap_sem still held, but pte unmapped and unlocked. 2311 * We return with mmap_sem still held, but pte unmapped and unlocked.
2312 */ 2312 */
2313static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2313static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2314 unsigned long address, pte_t *page_table, pmd_t *pmd, 2314 unsigned long address, pmd_t *pmd,
2315 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2315 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2316{ 2316{
2317 pte_t *page_table;
2317 spinlock_t *ptl; 2318 spinlock_t *ptl;
2318 struct page *page; 2319 struct page *page;
2319 pte_t entry; 2320 pte_t entry;
@@ -2327,7 +2328,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2327 vmf.flags = flags; 2328 vmf.flags = flags;
2328 vmf.page = NULL; 2329 vmf.page = NULL;
2329 2330
2330 pte_unmap(page_table);
2331 BUG_ON(vma->vm_flags & VM_PFNMAP); 2331 BUG_ON(vma->vm_flags & VM_PFNMAP);
2332 2332
2333 if (likely(vma->vm_ops->fault)) { 2333 if (likely(vma->vm_ops->fault)) {
@@ -2468,8 +2468,8 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2468 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff; 2468 - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
2469 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2469 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2470 2470
2471 return __do_fault(mm, vma, address, page_table, pmd, pgoff, 2471 pte_unmap(page_table);
2472 flags, orig_pte); 2472 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2473} 2473}
2474 2474
2475 2475
@@ -2552,9 +2552,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2552 } 2552 }
2553 2553
2554 pgoff = pte_to_pgoff(orig_pte); 2554 pgoff = pte_to_pgoff(orig_pte);
2555 2555 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2556 return __do_fault(mm, vma, address, page_table, pmd, pgoff,
2557 flags, orig_pte);
2558} 2556}
2559 2557
2560/* 2558/*