aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChen, Kenneth W <kenneth.w.chen@intel.com>2006-10-04 05:15:24 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 10:55:12 -0400
commitfe1668ae5bf0145014c71797febd9ad5670d5d05 (patch)
treea1ad042a0c9b604653ca24645358fe31d1bbcfd6
parente80ee884ae0e3794ef2b65a18a767d502ad712ee (diff)
[PATCH] enforce proper tlb flush in unmap_hugepage_range
Spotted by Hugh that hugetlb page is free'ed back to global pool before performing any TLB flush in unmap_hugepage_range(). This potentially allow threads to abuse free-alloc race condition. The generic tlb gather code is unsuitable to use by hugetlb, I just open coded a page gathering list and delayed put_page until tlb flush is performed. Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Ken Chen <kenneth.w.chen@intel.com> Acked-by: William Irwin <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/hugetlb.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c7d03dbf73d..1d709ff528e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -364,6 +364,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
364 pte_t *ptep; 364 pte_t *ptep;
365 pte_t pte; 365 pte_t pte;
366 struct page *page; 366 struct page *page;
367 struct page *tmp;
368 LIST_HEAD(page_list);
367 369
368 WARN_ON(!is_vm_hugetlb_page(vma)); 370 WARN_ON(!is_vm_hugetlb_page(vma));
369 BUG_ON(start & ~HPAGE_MASK); 371 BUG_ON(start & ~HPAGE_MASK);
@@ -384,12 +386,16 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
384 continue; 386 continue;
385 387
386 page = pte_page(pte); 388 page = pte_page(pte);
387 put_page(page); 389 list_add(&page->lru, &page_list);
388 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); 390 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
389 } 391 }
390 392
391 spin_unlock(&mm->page_table_lock); 393 spin_unlock(&mm->page_table_lock);
392 flush_tlb_range(vma, start, end); 394 flush_tlb_range(vma, start, end);
395 list_for_each_entry_safe(page, tmp, &page_list, lru) {
396 list_del(&page->lru);
397 put_page(page);
398 }
393} 399}
394 400
395static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 401static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,