aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-06-21 20:14:44 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:15 -0400
commit63551ae0feaaa23807ebea60de1901564bbef32e (patch)
treef6f97f60f83c3e9813bdfcc6039c499997b1ea10 /arch/i386/mm
parent1e7e5a9048b30c57ba1ddaa6cdf59b21b65cde99 (diff)
[PATCH] Hugepage consolidation
A lot of the code in arch/*/mm/hugetlbpage.c is quite similar. This patch attempts to consolidate a lot of the code across the arch's, putting the combined version in mm/hugetlb.c. There are a couple of uglyish hacks in order to covert all the hugepage archs, but the result is a very large reduction in the total amount of code. It also means things like hugepage lazy allocation could be implemented in one place, instead of six. Tested, at least a little, on ppc64, i386 and x86_64. Notes: - this patch changes the meaning of set_huge_pte() to be more analagous to set_pte() - does SH4 need s special huge_ptep_get_and_clear()?? Acked-by: William Lee Irwin <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/hugetlbpage.c170
1 files changed, 8 insertions, 162 deletions
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 171fc925e1e4..5aa06001a4bd 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -18,7 +18,7 @@
18#include <asm/tlb.h> 18#include <asm/tlb.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20 20
21static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 21pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22{ 22{
23 pgd_t *pgd; 23 pgd_t *pgd;
24 pud_t *pud; 24 pud_t *pud;
@@ -30,7 +30,7 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
30 return (pte_t *) pmd; 30 return (pte_t *) pmd;
31} 31}
32 32
33static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 33pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34{ 34{
35 pgd_t *pgd; 35 pgd_t *pgd;
36 pud_t *pud; 36 pud_t *pud;
@@ -42,21 +42,6 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
42 return (pte_t *) pmd; 42 return (pte_t *) pmd;
43} 43}
44 44
45static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
46{
47 pte_t entry;
48
49 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
50 if (write_access) {
51 entry =
52 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
53 } else
54 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
55 entry = pte_mkyoung(entry);
56 mk_pte_huge(entry);
57 set_pte(page_table, entry);
58}
59
60/* 45/*
61 * This function checks for proper alignment of input addr and len parameters. 46 * This function checks for proper alignment of input addr and len parameters.
62 */ 47 */
@@ -69,77 +54,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
69 return 0; 54 return 0;
70} 55}
71 56
72int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
73 struct vm_area_struct *vma)
74{
75 pte_t *src_pte, *dst_pte, entry;
76 struct page *ptepage;
77 unsigned long addr = vma->vm_start;
78 unsigned long end = vma->vm_end;
79
80 while (addr < end) {
81 dst_pte = huge_pte_alloc(dst, addr);
82 if (!dst_pte)
83 goto nomem;
84 src_pte = huge_pte_offset(src, addr);
85 entry = *src_pte;
86 ptepage = pte_page(entry);
87 get_page(ptepage);
88 set_pte(dst_pte, entry);
89 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
90 addr += HPAGE_SIZE;
91 }
92 return 0;
93
94nomem:
95 return -ENOMEM;
96}
97
98int
99follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
100 struct page **pages, struct vm_area_struct **vmas,
101 unsigned long *position, int *length, int i)
102{
103 unsigned long vpfn, vaddr = *position;
104 int remainder = *length;
105
106 WARN_ON(!is_vm_hugetlb_page(vma));
107
108 vpfn = vaddr/PAGE_SIZE;
109 while (vaddr < vma->vm_end && remainder) {
110
111 if (pages) {
112 pte_t *pte;
113 struct page *page;
114
115 pte = huge_pte_offset(mm, vaddr);
116
117 /* hugetlb should be locked, and hence, prefaulted */
118 WARN_ON(!pte || pte_none(*pte));
119
120 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
121
122 WARN_ON(!PageCompound(page));
123
124 get_page(page);
125 pages[i] = page;
126 }
127
128 if (vmas)
129 vmas[i] = vma;
130
131 vaddr += PAGE_SIZE;
132 ++vpfn;
133 --remainder;
134 ++i;
135 }
136
137 *length = remainder;
138 *position = vaddr;
139
140 return i;
141}
142
143#if 0 /* This is just for testing */ 57#if 0 /* This is just for testing */
144struct page * 58struct page *
145follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 59follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
@@ -204,83 +118,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
204} 118}
205#endif 119#endif
206 120
207void unmap_hugepage_range(struct vm_area_struct *vma, 121void hugetlb_clean_stale_pgtable(pte_t *pte)
208 unsigned long start, unsigned long end)
209{ 122{
210 struct mm_struct *mm = vma->vm_mm; 123 pmd_t *pmd = (pmd_t *) pte;
211 unsigned long address;
212 pte_t pte, *ptep;
213 struct page *page; 124 struct page *page;
214 125
215 BUG_ON(start & (HPAGE_SIZE - 1)); 126 page = pmd_page(*pmd);
216 BUG_ON(end & (HPAGE_SIZE - 1)); 127 pmd_clear(pmd);
217 128 dec_page_state(nr_page_table_pages);
218 for (address = start; address < end; address += HPAGE_SIZE) { 129 page_cache_release(page);
219 ptep = huge_pte_offset(mm, address);
220 if (!ptep)
221 continue;
222 pte = ptep_get_and_clear(mm, address, ptep);
223 if (pte_none(pte))
224 continue;
225 page = pte_page(pte);
226 put_page(page);
227 }
228 add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT));
229 flush_tlb_range(vma, start, end);
230}
231
232int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
233{
234 struct mm_struct *mm = current->mm;
235 unsigned long addr;
236 int ret = 0;
237
238 BUG_ON(vma->vm_start & ~HPAGE_MASK);
239 BUG_ON(vma->vm_end & ~HPAGE_MASK);
240
241 spin_lock(&mm->page_table_lock);
242 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
243 unsigned long idx;
244 pte_t *pte = huge_pte_alloc(mm, addr);
245 struct page *page;
246
247 if (!pte) {
248 ret = -ENOMEM;
249 goto out;
250 }
251
252 if (!pte_none(*pte))
253 continue;
254
255 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
256 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
257 page = find_get_page(mapping, idx);
258 if (!page) {
259 /* charge the fs quota first */
260 if (hugetlb_get_quota(mapping)) {
261 ret = -ENOMEM;
262 goto out;
263 }
264 page = alloc_huge_page();
265 if (!page) {
266 hugetlb_put_quota(mapping);
267 ret = -ENOMEM;
268 goto out;
269 }
270 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
271 if (! ret) {
272 unlock_page(page);
273 } else {
274 hugetlb_put_quota(mapping);
275 free_huge_page(page);
276 goto out;
277 }
278 }
279 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
280 }
281out:
282 spin_unlock(&mm->page_table_lock);
283 return ret;
284} 130}
285 131
286/* x86_64 also uses this file */ 132/* x86_64 also uses this file */