diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2008-10-18 23:27:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:52:32 -0400 |
commit | 4b2e38ad703541f7845c2d766426148b8d1aa329 (patch) | |
tree | aaafbec5325d15c38c382c655120fb6492c11f82 /mm/hugetlb.c | |
parent | e575f111dc0f27044e170580e7de50985ab3e011 (diff) |
hugepage: support ZERO_PAGE()
Presently hugepage doesn't use zero page at all because zero page is only
used for coredumping and hugepage can't core dump.
However we have now implemented hugepage coredumping. Therefore we should
implement the zero page of hugepage.
Implementation note:
o Why do we only check VM_SHARED for zero page?
normal page checked as ..
static inline int use_zero_page(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
return 0;
return !vma->vm_ops || !vma->vm_ops->fault;
}
First, hugepages are never mlock()ed. We aren't concerned with VM_LOCKED.
Second, hugetlbfs is a pseudo filesystem, not a real filesystem and it
doesn't have any file backing. Thus ops->fault checking is meaningless.
o Why don't we use zero page if !pte.
!pte indicate {pud, pmd} doesn't exist or some error happened. So we
shouldn't return zero page if any error occurred.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Kawai Hidehiro <hidehiro.kawai.ez@hitachi.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ab79cd4dd23c..ce8cbb29860b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2071,6 +2071,14 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, | |||
2071 | return NULL; | 2071 | return NULL; |
2072 | } | 2072 | } |
2073 | 2073 | ||
2074 | static int huge_zeropage_ok(pte_t *ptep, int write, int shared) | ||
2075 | { | ||
2076 | if (!ptep || write || shared) | ||
2077 | return 0; | ||
2078 | else | ||
2079 | return huge_pte_none(huge_ptep_get(ptep)); | ||
2080 | } | ||
2081 | |||
2074 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2082 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
2075 | struct page **pages, struct vm_area_struct **vmas, | 2083 | struct page **pages, struct vm_area_struct **vmas, |
2076 | unsigned long *position, int *length, int i, | 2084 | unsigned long *position, int *length, int i, |
@@ -2080,6 +2088,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2080 | unsigned long vaddr = *position; | 2088 | unsigned long vaddr = *position; |
2081 | int remainder = *length; | 2089 | int remainder = *length; |
2082 | struct hstate *h = hstate_vma(vma); | 2090 | struct hstate *h = hstate_vma(vma); |
2091 | int zeropage_ok = 0; | ||
2092 | int shared = vma->vm_flags & VM_SHARED; | ||
2083 | 2093 | ||
2084 | spin_lock(&mm->page_table_lock); | 2094 | spin_lock(&mm->page_table_lock); |
2085 | while (vaddr < vma->vm_end && remainder) { | 2095 | while (vaddr < vma->vm_end && remainder) { |
@@ -2092,8 +2102,11 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2092 | * first, for the page indexing below to work. | 2102 | * first, for the page indexing below to work. |
2093 | */ | 2103 | */ |
2094 | pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); | 2104 | pte = huge_pte_offset(mm, vaddr & huge_page_mask(h)); |
2105 | if (huge_zeropage_ok(pte, write, shared)) | ||
2106 | zeropage_ok = 1; | ||
2095 | 2107 | ||
2096 | if (!pte || huge_pte_none(huge_ptep_get(pte)) || | 2108 | if (!pte || |
2109 | (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) || | ||
2097 | (write && !pte_write(huge_ptep_get(pte)))) { | 2110 | (write && !pte_write(huge_ptep_get(pte)))) { |
2098 | int ret; | 2111 | int ret; |
2099 | 2112 | ||
@@ -2113,8 +2126,11 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2113 | page = pte_page(huge_ptep_get(pte)); | 2126 | page = pte_page(huge_ptep_get(pte)); |
2114 | same_page: | 2127 | same_page: |
2115 | if (pages) { | 2128 | if (pages) { |
2116 | get_page(page); | 2129 | if (zeropage_ok) |
2117 | pages[i] = page + pfn_offset; | 2130 | pages[i] = ZERO_PAGE(0); |
2131 | else | ||
2132 | pages[i] = page + pfn_offset; | ||
2133 | get_page(pages[i]); | ||
2118 | } | 2134 | } |
2119 | 2135 | ||
2120 | if (vmas) | 2136 | if (vmas) |