aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2010-05-27 20:29:16 -0400
committerAndi Kleen <ak@linux.intel.com>2010-08-11 03:21:15 -0400
commit0fe6e20b9c4c53b3e97096ee73a0857f60aad43f (patch)
tree3014636f2ed66fdebecb6f6bab338b39c3543a07 /include/linux
parent8edf344c66a3f214d709dad1421c29d678915b3f (diff)
hugetlb, rmap: add reverse mapping for hugepage
This patch adds reverse mapping feature for hugepage by introducing mapcount for shared/private-mapped hugepage and anon_vma for private-mapped hugepage. While hugepage is not currently swappable, reverse mapping can be useful for memory error handler. Without this patch, memory error handler cannot identify processes using the bad hugepage nor unmap it from them. That is: - for shared hugepage: we can collect processes using a hugepage through pagecache, but can not unmap the hugepage because of the lack of mapcount. - for privately mapped hugepage: we can neither collect processes nor unmap the hugepage. This patch solves these problems. This patch include the bug fix given by commit 23be7468e8, so reverts it. Dependency: "hugetlb: move definition of is_vm_hugetlb_page() to hugepage_inline.h" ChangeLog since May 24. - create hugetlb_inline.h and move is_vm_hugetlb_index() in it. - move functions setting up anon_vma for hugepage into mm/rmap.c. ChangeLog since May 13. - rebased to 2.6.34 - fix logic error (in case that private mapping and shared mapping coexist) - move is_vm_hugetlb_page() into include/linux/mm.h to use this function from linear_page_index() - define and use linear_hugepage_index() instead of compound_order() - use page_move_anon_rmap() in hugetlb_cow() - copy exclusive switch of __set_page_anon_rmap() into hugepage counterpart. - revert commit 24be7468 completely Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Acked-by: Fengguang Wu <fengguang.wu@intel.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/pagemap.h8
-rw-r--r--include/linux/poison.h9
-rw-r--r--include/linux/rmap.h5
4 files changed, 13 insertions, 10 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d47a7c41745d..e688fd89354d 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -99,6 +99,7 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
99#define is_hugepage_only_range(mm, addr, len) 0 99#define is_hugepage_only_range(mm, addr, len) 0
100#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 100#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
101#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) 101#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
102#define huge_pte_offset(mm, address) 0
102 103
103#define hugetlb_change_protection(vma, address, end, newprot) 104#define hugetlb_change_protection(vma, address, end, newprot)
104 105
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b2bd2bae9775..a547d9689170 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -282,10 +282,16 @@ static inline loff_t page_offset(struct page *page)
282 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 282 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
283} 283}
284 284
285extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
286 unsigned long address);
287
285static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 288static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
286 unsigned long address) 289 unsigned long address)
287{ 290{
288 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 291 pgoff_t pgoff;
292 if (unlikely(is_vm_hugetlb_page(vma)))
293 return linear_hugepage_index(vma, address);
294 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
289 pgoff += vma->vm_pgoff; 295 pgoff += vma->vm_pgoff;
290 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 296 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
291} 297}
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 34066ffd893d..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -48,15 +48,6 @@
48#define POISON_FREE 0x6b /* for use-after-free poisoning */ 48#define POISON_FREE 0x6b /* for use-after-free poisoning */
49#define POISON_END 0xa5 /* end-byte of poisoning */ 49#define POISON_END 0xa5 /* end-byte of poisoning */
50 50
51/********** mm/hugetlb.c **********/
52/*
53 * Private mappings of hugetlb pages use this poisoned value for
54 * page->mapping. The core VM should not be doing anything with this mapping
55 * but futex requires the existence of some page->mapping value even though it
56 * is unused if PAGE_MAPPING_ANON is set.
57 */
58#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
59
60/********** arch/$ARCH/mm/init.c **********/ 51/********** arch/$ARCH/mm/init.c **********/
61#define POISON_FREE_INITMEM 0xcc 52#define POISON_FREE_INITMEM 0xcc
62 53
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 77216742c178..9d50e7ef5f5a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -140,6 +140,11 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
140void page_add_file_rmap(struct page *); 140void page_add_file_rmap(struct page *);
141void page_remove_rmap(struct page *); 141void page_remove_rmap(struct page *);
142 142
143void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
144 unsigned long);
145void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
146 unsigned long);
147
143static inline void page_dup_rmap(struct page *page) 148static inline void page_dup_rmap(struct page *page)
144{ 149{
145 atomic_inc(&page->_mapcount); 150 atomic_inc(&page->_mapcount);